content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def construct_fid_mask(catalog):
"""
Constructs the fidelity mask based off my results, not Robertos
:param catalog:
:return:
"""
line_widths = [i for i in range(3, 21, 2)]
fid_catalog = load_table("fidelity_snr.out", start=0)
fid_limit = 0.4
six_fids = []
for width in line_widths:
f = interp1d(fid_catalog["fbin"], fid_catalog["pure{}".format(width)], kind='slinear')
xdata = np.linspace(5.85, 7.85, 10000)
six_fids.append(xdata[np.argmax(f(xdata) >= fid_limit)])
masks = []
line_widths = [i for i in range(3, 21, 2)]
#six_fids = [6.3, 6.2, 6.1, 6.15, 6.1, 6.20, 6.1, 6.20, 6.05]
# six_fids = [6.35, 6.25, 6.15, 6.15, 6.15, 6.25, 6.15, 6.25, 6.05]
# six_fids = [6.25, 6.2, 6.1, 6.1, 6.1, 6.15, 6.1, 6.15, 6.05]
for index, width in enumerate(line_widths):
print(six_fids[index])
masks.append(catalog[((catalog['width'] == width) & (catalog['rsnrrbin'] >= six_fids[index]))])
total = masks[0]
t_sum = 0
for mask in masks[1:]:
t_sum += len(mask)
total = vstack((total, mask))
print("Total One: {}".format(len(total)))
return total
| 5,346,700 |
def get_template(filename):
"""
return html mail template
"""
current_dir = os.path.dirname(__file__)
tpl = read_file(os.path.join(current_dir,'templates',filename))
if not tpl:
_log('Mailer error: could not load file "%s"'%filename)
sys.exit(1)
return tpl
| 5,346,701 |
def _run_job(tgt, fun, arg, kwarg, tgt_type, timeout, retry):
"""
Helper function to send execution module command using ``client.run_job``
method and collect results using ``client.get_event_iter_returns``. Implements
basic retry mechanism.
If ``client.get_event_iter_returns`` return no results, ``_run_job`` will retry
the command until minions return results or ``retry`` threshold reached, in
latter case ``CommandExecutionError`` raised with job details
"""
ret = {}
attempt = 1
while attempt <= retry:
# publish job command
pub_data = client.run_job(
tgt=tgt, fun=fun, arg=arg, kwarg=kwarg, tgt_type=tgt_type, timeout=timeout
)
# collect job results
job_results = client.get_event_iter_returns(timeout=timeout, **pub_data)
for item in job_results:
ret.update(item)
if not set(pub_data["minions"]) == set(ret.keys()):
minions_no_return = set(pub_data["minions"]) - set(ret.keys())
log.warning(
"Nornir-runner:_run_job - {}s timeout; no results from {}; returned {}; jid {}; attempt: {}".format(
timeout,
list(minions_no_return),
list(ret.keys()),
pub_data["jid"],
attempt,
)
)
if ret:
break
attempt += 1
else:
raise CommandExecutionError(
"Nornir-runner:_run_job - no results from minions; tgt: {}; fun: {}; tgt_type: {}; timeout: {}; retry: {}; kwarg: {}".format(
tgt, fun, tgt_type, timeout, retry, kwarg
)
)
return ret
| 5,346,702 |
def _set_rank_colorbar(ax, img, norm):
""" Set color bar for rankshow on the right of the ax
"""
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(img, cax=cax)
y_tick_values = cax.get_yticks()
boundary_means = [np.mean((y_tick_values[ii],y_tick_values[ii-1]))
for ii in range(1, len(y_tick_values))]
print(norm.boundaries)
category_names = [(str(norm.boundaries[ii-1])+'~'+
str(norm.boundaries[ii]))
for ii in range(1, len(norm.boundaries))]
# category_names[0] = '<='+str(norm.boundaries[1])
category_names[-1] = '>'+str(norm.boundaries[-2])
cax.yaxis.set_ticks(boundary_means)
cax.yaxis.set_ticklabels(category_names,rotation=0)
return cax
| 5,346,703 |
def yield_checksumfiles(queryset: Union[QuerySet, List[ChecksumFile]], directory: str):
"""Checkout a queryset of ChecksumFile records under a single directory.
This will use the `name` field of each of the files as their relative path
under the temporary directory.
Please note that this uses a contextmanager to acquire a lock on the
directory to make sure the files are not automatically cleaned up by
other threads or processes.
"""
files = list(queryset) if isinstance(queryset, QuerySet) else queryset
# Touch the directory to update the mtime
directory = Path(directory)
directory.touch()
# Acquire a lock on the directory so that it isn't cleaned up
lock = get_file_lock(directory)
lock.acquire()
# Download each file to the directory and yield it so that the lock can be released when done
try:
names = set()
# TODO: implement a FUSE interface
for file in files:
if file.name in names:
# NOTE: caller's responsibility to handle duplicate names
logger.error(
f'Duplicate `name` for ChecksumFile ({file.pk}: {file.name}). Overwriting...'
)
names.add(file.name)
file.download_to_local_path(directory=directory)
yield directory
finally:
lock.release()
| 5,346,704 |
def destination(stub: str) -> Optional[Path]:
"""Determine stub path
Only handle micropython stubs, ignoring
any cPython stdlib equivalents.
"""
prefix, _, suffix = stub.partition(".")
if importlib.util.find_spec(prefix): # type: ignore
return # in cPython stdlib, skip
prefix = Path(prefix)
if suffix in ("py", "pyi"): # module
return prefix / f"__init__.{suffix}"
return prefix / suffix
| 5,346,705 |
def is_gafqmc_result_dir(D, files=None, dirs=None,
file_pattern=None, parse_file=True):
"""Tests whether the directory D, containing `files' (and softlinks)
and directories `dirs' is a result directory for a GAFQMC-type
calculation.
Returns the score of the test, where higher score means more
reliability.
Input arguments `files' and `dirs' are not necessary (in fact, not
recommended) unless you use this in conjunction with os.walk,
where the files and dirs would have been gathered during the
iteration cycle.
Return flag: an integer or-ed
1 = output file name exists
2 = AND output file does exists as a regular file
4 = AND output file is indeed a GAFQMC output file
8 = AND input filename exists that matches the output
"""
from os.path import join, isdir, isfile
from wpylib.sugar import is_iterable
from wpylib.file.file_utils import list_dir_entries
from pyqmc.results.gafqmc_info import is_gafqmc_info
if files == None or dirs == None:
dirs, files = list_dir_entries(D)[:2]
rslt = 0
if file_pattern == None:
file_pattern = gafqmc_out_file_patterns
if isinstance(file_pattern, (set, tuple, list)) or is_iterable(file_pattern):
if not isinstance(file_pattern, set):
file_pattern = set(file_pattern)
fset = set(files)
fset_good = file_pattern & fset
if len(fset_good) > 0:
# WARNING: This will create uncertainty if there are more than one file
# matching the pattern. BE WARNED!
info_file = sorted(list(fset_good))[0]
rslt |= 1
else:
raise NotImplementedError
if rslt:
# At least the filename is found:
info_path = join(D, info_file)
if isfile(info_path):
rslt |= 2
if parse_file and is_gafqmc_info(info_path):
rslt |= 4
# the next if's are TO BE IMPLEMENTED LATER
return rslt
| 5,346,706 |
def train_IPCA(X,n_dims,batch_size,model='ipca'):
"""
name: train_IPCA
Linear dimensionality reduction using Singular Value Decomposition of
centered data, keeping only the most significant singular vectors to
project the data to a lower dimensional space.
returns: the transformer model
"""
estimator=transformer[model].set_params(pca__n_components=n_dims,pca__batch_size=batch_size)
estimator.fit(X)
return estimator
| 5,346,707 |
def _str_cell(cell: Cell) -> str:
"""Строковое представление клетки.
Данной строкой клетка будет выводится на экран.
"""
if cell.is_open:
if cell.is_empty:
return " "
elif cell.value:
return f" {cell.value} "
elif cell.is_flagged:
return "[F]"
else:
return "[ ]"
| 5,346,708 |
def mvw_ledoit_wolf(prices,
weight_bounds=(0.,1.),
rf = 0.,
options = None):
"""
Calculates the mean-variance weights given a DataFrame of returns.
Wraps mean_var_weights with ledoit_wolf covariance calculation method
Args:
* prices (DataFrame): Prices for multiple securities.
* weight_bounds ((low, high)): Weigh limits for optimization.
* rf (float): `Risk-free rate <https://www.investopedia.com/terms/r/risk-freerate.asp>`_ used in utility calculation
* options (dict): options for minimizing, e.g. {'maxiter': 10000 }
Returns:
Series {col_name: weight}
"""
r = prices.to_returns().dropna()
covar = ledoit_wolf(r)[0]
return covar
| 5,346,709 |
def _PadLabels3d(logits, labels):
"""Pads or slices 3-d labels to match logits.
Covers the case of 2-d softmax output, when labels is [batch, height, width]
and logits is [batch, height, width, onehot]
Args:
logits: 4-d Pre-softmax fully-connected output.
labels: 3-d, but not necessarily matching in size.
Returns:
labels: Resized by padding or clipping to match logits.
"""
logits_shape = shapes.tensor_shape(logits)
labels_shape = shapes.tensor_shape(labels)
labels = tf.reshape(labels, [-1, labels_shape[2]])
labels = _PadLabels2d(logits_shape[2], labels)
labels = tf.reshape(labels, [labels_shape[0], -1])
labels = _PadLabels2d(logits_shape[1] * logits_shape[2], labels)
return tf.reshape(labels, [labels_shape[0], logits_shape[1], logits_shape[2]])
| 5,346,710 |
def intx():
"""Returns the default int type, as a string.
(e.g. 'int16', 'int32', 'int64').
# Returns
String, the current default int type.
"""
return _INTX
| 5,346,711 |
def dev_end_hardware_script() -> Response:
"""Designate the end of a hardware script in flask log.
Can be invoked by: curl http://localhost:4567/development/end_hardware_script
"""
return Response(json.dumps({}), mimetype="application/json")
| 5,346,712 |
def heappush(heap, item): # real signature unknown; restored from __doc__
""" heappush(heap, item) -> None. Push item onto heap, maintaining the heap invariant. """
pass
| 5,346,713 |
def _init_train(opt):
"""Common initilization stuff for all training process."""
ArgumentParser.validate_prepare_opts(opt)
if opt.train_from:
# Load checkpoint if we resume from a previous training.
checkpoint = load_checkpoint(ckpt_path=opt.train_from)
fields = load_fields(opt.save_data, checkpoint)
transforms_cls = get_transforms_cls(opt._all_transform)
if (hasattr(checkpoint["opt"], '_all_transform') and
len(opt._all_transform.symmetric_difference(
checkpoint["opt"]._all_transform)) != 0):
_msg = "configured transforms is different from checkpoint:"
new_transf = opt._all_transform.difference(
checkpoint["opt"]._all_transform)
old_transf = checkpoint["opt"]._all_transform.difference(
opt._all_transform)
if len(new_transf) != 0:
_msg += f" +{new_transf}"
if len(old_transf) != 0:
_msg += f" -{old_transf}."
logger.warning(_msg)
if opt.update_vocab:
logger.info("Updating checkpoint vocabulary with new vocabulary")
fields, transforms_cls = prepare_fields_transforms(opt)
else:
checkpoint = None
#数据预处理准备阶段,目的是将数据处理成torchtext.field格式
fields, transforms_cls = prepare_fields_transforms(opt)
# Report src and tgt vocab sizes
for side in ['src', 'tgt']:
f = fields[side]
try:
f_iter = iter(f)
except TypeError:
f_iter = [(side, f)]
for sn, sf in f_iter:
if sf.use_vocab:
logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))
return checkpoint, fields, transforms_cls
| 5,346,714 |
def main(wf):
"""Run workflow script."""
opts = docopt.docopt(__doc__, argv=wf.args, version=wf.version)
if opts['list']:
return list_actions(opts)
dry_run = opts['--nothing']
log.info('=' * 50)
log.debug('opts=%r', opts)
log.info('looking for workflows using an outdated version '
'of Alfred-Workflow...')
# subprocess.call(['open', '-a', 'Console', wf.logfile])
root = get_workflow_directory()
if not root:
log.critical('could not find your workflow directory')
print('ERROR: could not find workflow directory')
return 1
log.info('workflow directory: %r', root)
blacklisted = load_blacklist()
updated = 0
failed = 0
# loop through subdirectories of workflow directory
# 1. ignore symlinks
# 2. ignore files
# 3. ignore blacklisted workflows
# 4. identify AW workflows
# 5. check version of AW the workflow has
# 6. if AW is outdated, backup the existing copy and replace
# it with an up-to-date version of AW
for dn in os.listdir(root):
p = os.path.join(root, dn)
if os.path.islink(p):
log.info('ignoring symlink: %s', dn)
continue
if not os.path.isdir(p):
log.debug('ignoring non-directory: %s', dn)
continue
try:
info = get_workflow_info(p)
except Exception as err:
log.error('could not read workflow: %s: %s', dn, err)
continue
if not info or not info.aw.dir:
log.debug('not an AW workflow: %s', dn)
continue
if info.id == wf.bundleid:
log.debug('ignoring self')
continue
ok = True
for pat in blacklisted:
if fnmatch(info.id, pat):
log.debug('blacklisted: "%s" matches "%s"', info.id, pat)
log.info('skipping blacklisted workflow: %s', dn)
ok = False
break
if not ok:
continue
log.info('')
log.info('found AW workflow: %s', dn)
log.info(' name: %s', info.name)
log.info(' bundle ID: %s', info.id)
log.info(' AW version: %s', info.aw.version)
if info.aw.version >= MIN_VERSION:
log.info('[OK] workflow "%s" has current version of '
'Alfred-Workflow', info.name)
log.info('')
continue
log.info('[!!] workflow "%s" is using outdated version '
'(%s) of Alfred-Workflow', info.name, info.aw.version)
if not dry_run:
try:
update_workflow(info)
except Exception as err:
failed += 1
log.error('failed to update workflow "%s" (%s): %s',
info.name, info.aw.dir, err, exc_info=True)
log.info('')
continue
log.info('')
updated += 1
if dry_run:
log.info('[DONE] would update %d workflow(s) with a newer version of '
'Alfred-Workflow', updated)
print('Would update {} workflow(s)'.format(updated))
return
else:
if failed:
log.info('[DONE] failed to update %d/%d workflow(s) with a '
'newer version of Alfred-Workflow',
failed, failed + updated)
print('ERROR: Failed to update {}/{} workflow(s)'.format(
failed, failed + updated))
return 1
else:
log.info('[DONE] updated %d workflow(s) with a newer version of '
'Alfred-Workflow', updated)
print('Updated {} workflow(s)'.format(updated))
return
| 5,346,715 |
def compare(isamAppliance1, isamAppliance2):
"""
Compare Update Servers between two appliances
"""
ret_obj1 = get_all(isamAppliance1)
ret_obj2 = get_all(isamAppliance2)
for obj in ret_obj1['data']:
del obj['uuid']
for obj in ret_obj2['data']:
del obj['uuid']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['uuid'])
| 5,346,716 |
def rasterize(feature, grid, id_column=None,
include_ids=None,
crs=None, epsg=None, proj4=None,
dtype=np.float32, **kwargs):
"""Rasterize a feature onto the model grid, using
the rasterio.features.rasterize method. Features are intersected
if they contain the cell center.
Parameters
----------
feature : str (shapefile path), list of shapely objects,
or dataframe with geometry column
id_column : str
Column with unique integer identifying each feature; values
from this column will be assigned to the output raster.
grid : grid.StructuredGrid instance
crs : obj
A Python int, dict, str, or pyproj.crs.CRS instance
passed to :meth:`pyproj.crs.CRS.from_user_input`
Can be any of:
- PROJ string
- Dictionary of PROJ parameters
- PROJ keyword arguments for parameters
- JSON string with PROJ parameters
- CRS WKT string
- An authority string [i.e. 'epsg:4326']
- An EPSG integer code [i.e. 4326]
- A tuple of ("auth_name": "auth_code") [i.e ('epsg', '4326')]
- An object with a `to_wkt` method.
- A :class:`pyproj.crs.CRS` class
dtype : dtype
Datatype for the output array
**kwargs : keyword arguments to rasterio.features.rasterize()
https://rasterio.readthedocs.io/en/stable/api/rasterio.features.html
Returns
-------
2D numpy array with intersected values
"""
try:
from rasterio import Affine, features
except:
print('This method requires rasterio.')
return
if epsg is not None:
warnings.warn("The epsg argument is deprecated. Use crs instead, "
"which requires gisutils >= 0.2",
DeprecationWarning)
if proj4 is not None:
warnings.warn("The epsg argument is deprecated. Use crs instead, "
"which requires gisutils >= 0.2",
DeprecationWarning)
if crs is not None:
if version.parse(gisutils.__version__) < version.parse('0.2.0'):
raise ValueError("The crs argument requires gisutils >= 0.2")
from gisutils import get_authority_crs
crs = get_authority_crs(crs)
trans = grid.transform
kwargs = {}
if isinstance(feature, str):
proj4 = get_proj_str(feature)
kwargs = {'dest_crs': grid.crs}
kwargs = get_input_arguments(kwargs, shp2df)
df = shp2df(feature, **kwargs)
elif isinstance(feature, pd.DataFrame):
df = feature.copy()
elif isinstance(feature, collections.Iterable):
# list of shapefiles
if isinstance(feature[0], str):
proj4 = get_proj_str(feature[0])
kwargs = {'dest_crs': grid.crs}
kwargs = get_input_arguments(kwargs, shp2df)
df = shp2df(feature, **kwargs)
else:
df = pd.DataFrame({'geometry': feature})
elif not isinstance(feature, collections.Iterable):
df = pd.DataFrame({'geometry': [feature]})
else:
print('unrecognized feature input')
return
# handle shapefiles in different CRS than model grid
if 'dest_crs' not in kwargs:
reproject = False
# todo: consolidate rasterize reprojection to just use crs
if crs is not None:
if crs != grid.crs:
df['geometry'] = project(df.geometry.values, crs, grid.crs)
if proj4 is not None:
if proj4 != grid.proj_str:
reproject = True
elif epsg is not None and grid.epsg is not None:
if epsg != grid.epsg:
reproject = True
from fiona.crs import from_epsg, to_string
proj4 = to_string(from_epsg(epsg))
if reproject:
df['geometry'] = project(df.geometry.values, proj4, grid.proj_str)
# subset to include_ids
if id_column is not None and include_ids is not None:
df = df.loc[df[id_column].isin(include_ids)].copy()
# create list of GeoJSON features, with unique value for each feature
if id_column is None:
numbers = range(1, len(df)+1)
# if IDs are strings, get a number for each one
# pd.DataFrame.unique() generally preserves order
elif isinstance(df[id_column].dtype, np.object):
unique_values = df[id_column].unique()
values = dict(zip(unique_values, range(1, len(unique_values) + 1)))
numbers = [values[n] for n in df[id_column]]
else:
numbers = df[id_column].tolist()
geoms = list(zip(df.geometry, numbers))
result = features.rasterize(geoms,
out_shape=(grid.nrow, grid.ncol),
transform=trans)
assert result.sum(axis=(0, 1)) != 0, "Nothing was intersected!"
return result.astype(dtype)
| 5,346,717 |
def get_random():
"""
Retrieves the current issue of XKCD, chooses an issue 1 - current issue #, and returns a json object.
Returns null if an requests error occurs.
"""
return get_issue(random.randint(1, int(get_current()["num"])))
| 5,346,718 |
def get_elements():
#elements = driver.find_elements(By.XPATH, '//div[@class="view-items-wrp"]/a')
""" driver.find_elements(By.XPATH, '//div[@class="view-items-wrp"]/a') """
heading3 = driver.find_elements_by_tag_name("h3")
elements = driver.find_elements_by_tag_name("a")
updatedLength = len(heading3)
filteredList = []
for item in heading3:
print(item.get_attribute("textContent"))
filteredHREF = []
for item in elements:
filteredHREF.append(item.get_attribute("href"))
""" substring = ["news", "video"]
filteredList = [word for word in filteredHREF if substring[0] or substring[1] in word]
for item in filteredList:
print(item) """
newsList = []
for item in filteredHREF:
if "https://www.ufc.com/news/" or "https://www.ufc.com/video/" or "https://www.ufc.com/gallery/" in item:
newsList.append(item)
print(f'This is news: {item}')
| 5,346,719 |
def model_to_model_2x_wide(model_from: tf.Module,
model_to: tf.Module,
epsilon: float = 0.1):
"""Expands a model to a wider version.
Also makes sure that the output of the model is not changed after expanding.
For example:
```
model_narrow = tf.keras.Sequential()
model_narrow.add(tf.keras.Input(shape=(3,)))
model_narrow.add(tf.keras.layers.Dense(4))
model_narrow.add(tf.keras.layers.Dense(1))
model_wide = tf.keras.Sequential()
model_wide.add(tf.keras.Input(shape=(6,)))
model_wide.add(tf.keras.layers.Dense(8))
model_wide.add(tf.keras.layers.Dense(1))
model_to_model_2x_wide(model_narrow, model_wide)
assert model_narrow([[1, 2, 3]]) == model_wide([[1, 1, 2, 2, 3, 3]])
```
We assume that `model_from` and `model_to` has the same architecture and only
widths of them differ.
Args:
model_from: input model to expand.
model_to: output model whose variables will be assigned expanded values
according to `model_from`.
epsilon: the noise ratio that will be added, when splitting `var_from`.
"""
for w_from, w_to in zip(model_from.trainable_variables,
model_to.trainable_variables):
logging.info("expanding %s %s to %s %s",
w_from.name, w_from.shape, w_to.name, w_to.shape)
var_to_var(w_from, w_to, epsilon=epsilon)
| 5,346,720 |
def run_script(args, script):
""" Run the script with the arguments provided """
# check for visualization script
visualization_script = utilities.get_package_file(script, "Rscript")
if not visualization_script:
sys.exit("ERROR: Unable to find script "+script)
try:
command = [visualization_script]+args[2:]
subprocess.call(command)
except ( subprocess.CalledProcessError, EnvironmentError):
sys.exit("Error: Unable to run: " +" ".join(command))
| 5,346,721 |
def get_fasta_readlengths(fasta_file):
"""
Get a sorted list of contig lengths
:return: (tuple)
"""
lens = []
with open_fasta_reader(fasta_file) as f:
for record in f:
lens.append(len(record.sequence))
lens.sort()
return lens
| 5,346,722 |
def postprocess(p, gt, width_and_height, p_binary, false_positives=False, false_negatives=False):
"""
This function does matching and then postprocessing of p's and gt's
:param p: the objects given from rcnn
:param gt: the objects we get from the ground truth
:param width_and_height: the width and height of the image
:return: info_image: a list which contains the postprocessed p, rectangels for p, postprocessed gt, rectangles
for gt, width and height
"""
len_p = len(p)
len_gt = len(gt)
elements_in_p = [i for i in xrange(len_p)]
elements_in_gt = [i for i in xrange(len_gt)]
matching_table = create_matching_table(p, gt)
max_number_of_matches = min(matching_table.shape[0], matching_table.shape[1])
new_p = []
new_gt = []
new_rects_p = []
new_rects_gt = []
new_p_binary = []
new_gt_binary = []
threshold = 0.5
# on this part we create the real matches between p and gt
for _ in xrange(max_number_of_matches):
best_match = unravel_index(matching_table.argmax(), matching_table.shape)
if matching_table[best_match[0], best_match[1]] > threshold: # check if it is a different value from 0
matching_table[best_match[0], :] = 0.
matching_table[:, best_match[1]] = 0.
new_p.append(p[best_match[0], :21])
new_p_binary.append(p_binary[best_match[0]])
new_gt_binary.append(np.array([1., 0.]))
new_rects_p.append(p[best_match[0], 21:])
new_gt.append(gt[best_match[1], :21])
new_rects_gt.append(gt[best_match[1], 21:])
elements_in_p.remove(best_match[0])
elements_in_gt.remove(best_match[1])
# here we add the matches of false positives by inserting background class on the given rectangles on the ground
# truth
if false_positives:
for element in elements_in_p:
new_p.append(p[element, :21])
new_p_binary.append(p_binary[element])
new_rects_p.append(p[element, 21:])
new_gt.append(create_background_peak_array())
new_gt_binary.append(np.array([0., 1.])) # 0 - not background; 1 - background
new_rects_gt.append(p[element, 21:])
# here we deal with false negatives, by adding them as r-cnn outputs equal to the ground truth
if false_negatives:
for element in elements_in_gt:
new_p.append(gt[element, :21])
new_p_binary.append(np.array([1., 0.]))
new_rects_p.append(gt[element, 21:])
new_gt.append(gt[element, :21])
new_gt_binary.append((np.array([1., 0.])))
new_rects_gt.append(gt[element, 21:])
# convert all the lists to numpy arrays
new_p = np.asarray(new_p)
new_rects_p = np.asarray(new_rects_p)
new_gt = np.asarray(new_gt)
new_rects_gt = np.asarray(new_rects_gt)
# add all the postprocessed information to a list
info_image = [new_p, new_gt, new_rects_p, new_rects_gt, width_and_height, new_p_binary, new_gt_binary]
return info_image
| 5,346,723 |
def deg(x):
"""
Convert an array of torsion angles in radians to torsion degrees
ranging from -180 to 180.
@param x: array of angles
@type x: numpy array
@rtype: numpy array
"""
from csb.bio.structure import TorsionAngles
func = numpy.vectorize(TorsionAngles.deg)
return func(x)
| 5,346,724 |
def run_median_trial():
"""Generate table for Median Trial."""
tbl = DataTable([10,15,15],['N', 'median_time', 'sort_median'])
trials = [2**k+1 for k in range(8,20)]
for n in trials:
t_med = 1000*min(timeit.repeat(stmt='assert(linear_median(a) == {}//2)'.format(n),
setup='''
import random
from ch01.challenge import linear_median
a = list(range({}))
random.shuffle(a)
'''.format(n), repeat=10, number=5))/5
t_sort = 1000*min(timeit.repeat(stmt='assert(median_from_sorted_list(a) == {0}//2)'.format(n),
setup='''
import random
from ch01.challenge import median_from_sorted_list
a = list(range({}))
random.shuffle(a)
'''.format(n), repeat=10, number=5))/5
tbl.row([n, t_med, t_sort])
return tbl
| 5,346,725 |
def regroup(X, N):
"""
Regroups the rows and columns of X such that rows/cols
that are N apart in X, are adjeacent in Y. If N is a
2 element vector, N[0] is used for rows and N[1] is used
for columns.
Parameters:
X: m by n matrix to be regrouped.
N: Integer or two element vector.
Returns:
Y: Regrouped matrix.
"""
m, n = X.shape
if isinstance(N, int):
N = [N, N]
if m % N[0] != 0 or n % N[1] != 0:
raise ValueError('X dimensions need to be multiple\
of elements in N')
row_ind = np.ravel(
[[i + k for i in np.arange(0, n, N[0])] for k in range(N[0])])
col_ind = np.ravel(
[[i + k for i in np.arange(0, n, N[1])] for k in range(N[1])])
Y = X[row_ind, :]
Y = Y[:, col_ind]
return Y
| 5,346,726 |
def return_estimators(n_components):
"""Returns all of the estimators that can be used to generate models.
A larger selection of possible estimators have been commented out, but
could be uncommented."""
estimators = [
('PCArandom',
decomposition.PCA(n_components=n_components, svd_solver='randomized',
whiten=True))
]
# estimators = [
# ('PCArandom',
# decomposition.PCA(n_components=n_components,
# svd_solver='randomized',
# whiten=True)),
# ('PCAfull',
# decomposition.PCA(n_components=n_components,
# svd_solver='full',
# whiten=True)),
# ('PCAarpack',
# decomposition.PCA(n_components=n_components,
# svd_solver='arpack',
# whiten=True)),
# ('PCAauto',
# decomposition.PCA(n_components=n_components,
# svd_solver='auto',
# whiten=True))
# ]
return estimators
| 5,346,727 |
def _check_type_picks(picks):
"""helper to guarantee type integrity of picks"""
err_msg = 'picks must be None, a list or an array of integers'
if picks is None:
pass
elif isinstance(picks, list):
if not all(isinstance(i, int) for i in picks):
raise ValueError(err_msg)
picks = np.array(picks)
elif isinstance(picks, np.ndarray):
if not picks.dtype.kind == 'i':
raise ValueError(err_msg)
else:
raise ValueError(err_msg)
return picks
| 5,346,728 |
def get_test_standard_scaler_str():
"""
Get a pandas projection code str
"""
test_code = cleandoc("""
standard_scaler = StandardScaler()
encoded_data = standard_scaler.fit_transform(df)
""")
return test_code
| 5,346,729 |
def add_command(
command_list: List[Tuple[re.Pattern, callable]], func: callable, command_str: str
) -> List[Tuple[re.Pattern, callable]]:
"""Add a function and the command pattern to the command list.
Args:
func: Function it will be called
command_str: command string that specifies the pattern
"""
command_pattern = build_command_pattern(command_str)
command_list.append((command_pattern, func))
return command_list
| 5,346,730 |
def get_clusters_and_critical_nodes(G, k, rho_star, phi_in):
"""
The implementation of the main body of the partitioning Algorithm.
The main while-loop of the algorithm is executed as long as a refinement is still possible.
:param phi_in: An algorithm parameter used to lower bound the inner conductance of each cluster
:param rho_star: A technical parameter of the algorithm
:param G: A networkx graph
:param k: The (supposed) number of clusters
:return: a list containing an l-wise partitioning of the nodes of G, for some l <= k
"""
# A list of vertices in the graph G
vertices = list(G.nodes())
# Initially the graph contains one cluster P_1 = V with core set core_1 = P_1.
P_1 = vertices[:]
core_1 = P_1[:]
# num_clusters is the variable denoting the current number of clusters
num_clusters = 1
# clusters is a list storing the current cluster structure of G (i.e. P_1, ..., P_l)
clusters = [P_1]
# core_sets is a list containing the current core_subsets of each cluster.
# (i.e. core_1, ..., core_(num_clusters) with core_i being a subset of P_i)
core_sets = [core_1]
# A list of lists, where each element grouped_critical_nodes[i] is a list of critical nodes from the tree T_i of
# cluster clusters[i]
grouped_critical_nodes = []
# The main loop of the algorithm. We continue as long as an update is possible
overall_update_is_found = True
while overall_update_is_found:
# At the beginning of the loop there is no update found
overall_update_is_found = False
# The main loop of the Partition Algorithm. We continue as long as a GT_update is possible
GT_update_is_found = True
while GT_update_is_found:
# First we check if a GT_update is possible
GT_update_is_found, index_cluster_to_update = check_if_GT_update_is_possible(G, clusters, core_sets,
phi_in)
if GT_update_is_found:
GT_update_is_done = False
# Notation of the corresponding sets of vertices
P_i = clusters[index_cluster_to_update]
core_i = core_sets[index_cluster_to_update]
S = cheeger_cut.cheeger_cut(G.subgraph(P_i))
S_complement = diff(vertices, S)
S_plus = intersect(S, core_i)
S_plus_bar = intersect(S_complement, core_i)
S_minus = intersect(diff(P_i, core_i), S)
S_minus_bar = intersect(diff(P_i, core_i), S_complement)
# Without loss of generality we assume vol(S_plus) < vol(core_i) / 2
if vol(G, S_plus) > vol(G, S_plus_bar):
S_plus, S_plus_bar = S_plus_bar, S_plus
S_minus, S_minus_bar = S_minus_bar, S_minus
# First "if" in the algorithm
if is_first_if_condition_satisfied(G, S_plus, S_plus_bar, k, num_clusters, rho_star):
make_new_cluster_with_subset_T_bar_of_core_i(
S_plus, S_plus_bar, clusters, core_sets, index_cluster_to_update)
num_clusters += 1
# A sanity check update
num_clusters = min(num_clusters, k)
GT_update_is_done = True
# Second "if" in the algorithm
if not GT_update_is_done and is_second_if_condition_satisfied(G, S_plus, S_plus_bar, core_i, k):
update_core_to_subset_T_or_T_bar(G, S_plus, S_plus_bar, core_sets, index_cluster_to_update)
GT_update_is_done = True
# Third "if" in the algorithm
if not GT_update_is_done and is_third_if_condition_satisfied(G, S_minus, k, num_clusters, rho_star):
make_new_cluster_with_subset_T_of_P_i(S_minus, clusters, core_sets, index_cluster_to_update)
num_clusters += 1
# A sanity check update
num_clusters = min(num_clusters, k)
GT_update_is_done = True
# At this point only a refinement of the partition is possible
if not GT_update_is_done:
# If there is a cluster P_j s.t. w(P_i - core_i -> P_i) < w(P_i - core_i -> P_j),
# then merge (P_i - core_i) with argmax_(P_j){w(P_i - core_i -> P_j)}
P_i_minus_core_i = diff(P_i, core_i)
# Find the index j of argmax_(P_j){w(P_i - core_i -> P_j)}.
best_cluster_index = find_cluster_P_j_that_maximises_weight_from_T_to_P_j(G, P_i_minus_core_i,
clusters)
# Forth "if" in the algorithm.
if best_cluster_index != index_cluster_to_update:
move_subset_T_from_P_i_to_P_j(P_i_minus_core_i, clusters, index_cluster_to_update,
best_cluster_index)
GT_update_is_done = True
if not GT_update_is_done:
# If there is a cluster P_j s.t. w(S_minus -> P_i) < w(S_minus -> P_j),
# then merge S_minus with argmax_(P_j){w(S_minus -> P_j)}
# Find the index j of argmax_(P_j){w(S_minus -> P_j)}.
best_cluster_index = find_cluster_P_j_that_maximises_weight_from_T_to_P_j(G, S_minus, clusters)
# Fifth "if" in the algorithm
if best_cluster_index != index_cluster_to_update:
move_subset_T_from_P_i_to_P_j(S_minus, clusters, index_cluster_to_update,
best_cluster_index)
GT_update_is_done = True
if not GT_update_is_done:
raise Exception('No GT_update performed in iteration')
grouped_critical_nodes = []
# Check if critical nodes need refinements
for i in range(len(clusters)):
# Get the list of critical nodes in the degree based construction of the graph G_i = G[P_i]
P_i = clusters[i]
core_i = core_sets[i]
G_i = G.subgraph(P_i)
T_i = tree.Tree()
T_i.make_tree(G_i, "degree")
critical_nodes_of_T_i = T_i.get_critical_nodes()
grouped_critical_nodes = grouped_critical_nodes + [critical_nodes_of_T_i]
for node in critical_nodes_of_T_i:
# Notation
N = node.vertices
N_complement = diff(vertices, N)
N_plus = intersect(N, core_i)
N_plus_bar = intersect(N_complement, core_i)
N_minus = intersect(diff(P_i, core_i), N)
N_minus_bar = intersect(diff(P_i, core_i), N_complement)
# Sixth "if" of the algorithm, first "if" of the refinement of the nodes,
if is_sixth_if_condition_satisfied(G, N_plus, N_plus_bar, k, num_clusters, rho_star):
make_new_cluster_with_subset_T_bar_of_core_i(
N_plus, N_plus_bar, clusters, core_sets, i)
num_clusters += 1
# A sanity check update
num_clusters = min(num_clusters, k)
overall_update_is_found = True
break
# Seventh "if" of the algorithm, second if of the refinement of the nodes
if not overall_update_is_found and is_seventh_if_condition_satisfied(G, N_plus, core_i, k):
update_core_to_subset_T_or_T_bar(G, N_plus, N_plus_bar, core_sets, i)
overall_update_is_found = True
break
# We attempt to move N_minus to the cluster P_j that maximises w(N_minus -> P_j)
if not overall_update_is_found and vol(G, N_minus) <= vol(G, P_i) / 2:
# Find the index j of argmax_(P_j){w(N_minus -> P_j)}.
# If best_cluster_index = i, then the eighth "if" is not satisfied
best_cluster_index = find_cluster_P_j_that_maximises_weight_from_T_to_P_j(G, N_minus, clusters)
# Eighth "if" of the algorithm, third if of the refinement of the nodes.
if weight(G, N_minus, P_i) < weight(G, N_minus, clusters[best_cluster_index]):
move_subset_T_from_P_i_to_P_j(N_minus, clusters, i,
best_cluster_index)
overall_update_is_found = True
break
if overall_update_is_found:
break
return clusters, grouped_critical_nodes
| 5,346,731 |
def scatter_row_inplace(data, row_index, value):
"""Write the value into the data tensor using the row index inplace.
This is an inplace write so it will break the autograd.
Parameters
----------
data : Tensor
The data tensor to be updated.
row_index : Tensor
A 1-D integer tensor containing which rows to be updated.
value : Tensor
The new value.
"""
pass
| 5,346,732 |
def get_edges_out_for_vertex(edges: list, vertex: int) -> list:
"""Get a sublist of edges that have the specified vertex as first element
:param edges: edges of the graph
:param vertex: vertex of which we want to find the corresponding edges
:return: selected edges
"""
return [e for e in edges if e[0] == vertex]
| 5,346,733 |
def pellet_plot_multi_unaligned(FEDs, shade_dark, lights_on,
lights_off,**kwargs):
"""
FED3 Viz: Plot cumulaive pellet retrieval for multiple FEDs, keeping the
x-axis to show absolute time.
Parameters
----------
FEDs : list of FED3_File objects
FED3 files (loaded by load.FED3_File)
shade_dark : bool
Whether to shade lights-off periods
lights_on : int
Integer between 0 and 23 denoting the start of the light cycle.
lights_off : int
Integer between 0 and 23 denoting the end of the light cycle.
**kwargs :
ax : matplotlib.axes.Axes
Axes to plot on, a new Figure and Axes are
created if not passed
date_filter : array
A two-element array of datetimes (start, end) used to filter
the data
**kwargs also allows FED3 Viz to pass all settings to all functions.
Returns
-------
fig : matplotlib.figure.Figure
"""
if not isinstance(FEDs, list):
FEDs = [FEDs]
for file in FEDs:
assert isinstance(file, FED3_File),'Non FED3_File passed to pellet_plot_multi()'
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(7,3.5), dpi=150)
else:
ax = kwargs['ax']
min_date = np.datetime64('2100')
max_date = np.datetime64('1970')
for file in FEDs:
df = file.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
x = df.index
y = df['Pellet_Count']
ax.plot(x, y, label=file.filename, alpha=.6, lw=1)
if max(x) > max_date:
max_date = max(x)
if min(x) < min_date:
min_date = min(x)
ax.set_xlabel('Time (h)')
date_format_x(ax, min_date, max_date)
ax.set_ylabel('Cumulative Pellets')
title = ('Pellets Retrieved for Multiple FEDs')
ax.set_title(title)
if shade_dark:
shade_darkness(ax, min_date, max_date,
lights_on=lights_on,
lights_off=lights_off)
if len(FEDs) < 10:
ax.legend(bbox_to_anchor=(1,1), loc='upper left')
plt.tight_layout()
return fig if 'ax' not in kwargs else None
| 5,346,734 |
def print_results(results, num_tests):
"""
Prints the results of regression tests.
results -- a list of all testing results
"""
if par.format_for_diff:
failed = sum([item.split(" ")[0] == par.format_for_diff.split(" ")[0] for item in results])
else:
failed = sum([item.split(":")[0] == "Error" for item in results])
output = "Test Results: " + str(num_tests - failed) + "/" + str(num_tests) + " passed."
line = ('=' * len(output))
print(line)
print(output)
print(line)
print(*results, sep = '\n') if results else print("All tests pass!!!")
print(line)
print(output)
print(line)
| 5,346,735 |
def generateMostProbesPerDay(database):
"""Generate the Top 20 most probes per day from the MySQL Database"""
try:
conn = pymysql.connect(host=conf.MYSQL_HOST, port=conf.MYSQL_PORT, user=conf.MYSQL_USER, passwd=conf.MYSQL_PWD, db=database)
except pymysql.MySQLError as e:
print e.args[1]
sys.exit(1)
cur = conn.cursor()
sql = "SELECT COUNT(session), timestamp " \
"FROM auth " \
"GROUP BY DAYOFYEAR(timestamp) " \
"ORDER BY COUNT(session) DESC " \
"LIMIT 20"
cur.execute(sql)
if cur.rowcount != 0:
timestamp = []
countSession = []
for (countSess, dt) in cur.fetchall():
timestamp.append(dt.strftime('%m-%d-%Y'))
countSession.append(countSess)
cur.close()
conn.close()
title = 'Most Probes per day (Top 20)'
generateBarGraph(timestamp, countSession, title, 'most_probes_per_day_' + database)
else:
noGraphGenerated('most_probes_per_day_' + database)
| 5,346,736 |
def collect_ocs_logs(dir_name, ocp=True, ocs=True, mcg=False):
"""
Collects OCS logs
Args:
dir_name (str): directory name to store OCS logs. Logs will be stored
in dir_name suffix with _ocs_logs.
ocp (bool): Whether to gather OCP logs
ocs (bool): Whether to gather OCS logs
mcg (bool): True for collecting MCG logs (noobaa db dump)
"""
if not (
'KUBECONFIG' in os.environ
or os.path.exists(os.path.expanduser('~/.kube/config'))
):
log.warning(
"Cannot find $KUBECONFIG or ~/.kube/config; "
"skipping log collection"
)
return
log_dir_path = os.path.join(
os.path.expanduser(ocsci_config.RUN['log_dir']),
f"failed_testcase_ocs_logs_{ocsci_config.RUN['run_id']}",
f"{dir_name}_ocs_logs"
)
if ocs:
latest_tag = ocsci_config.REPORTING.get(
'ocs_must_gather_latest_tag',
ocsci_config.REPORTING.get(
'default_ocs_must_gather_latest_tag', ocsci_config.DEPLOYMENT[
'default_latest_tag'
]
)
)
ocs_log_dir_path = os.path.join(log_dir_path, 'ocs_must_gather')
ocs_must_gather_image = ocsci_config.REPORTING['ocs_must_gather_image']
ocs_must_gather_image_and_tag = f"{ocs_must_gather_image}:{latest_tag}"
if ocsci_config.DEPLOYMENT.get('disconnected'):
ocs_must_gather_image_and_tag = (
mirror_image(ocs_must_gather_image_and_tag)
)
run_must_gather(ocs_log_dir_path, ocs_must_gather_image_and_tag)
if ocp:
ocp_log_dir_path = os.path.join(log_dir_path, 'ocp_must_gather')
ocp_must_gather_image = ocsci_config.REPORTING['ocp_must_gather_image']
if ocsci_config.DEPLOYMENT.get('disconnected'):
ocp_must_gather_image = (
mirror_image(ocp_must_gather_image)
)
run_must_gather(ocp_log_dir_path, ocp_must_gather_image)
run_must_gather(
ocp_log_dir_path, ocp_must_gather_image,
'/usr/bin/gather_service_logs worker'
)
if mcg:
counter = 0
while counter < 5:
counter += 1
try:
collect_noobaa_db_dump(log_dir_path)
break
except CommandFailed as ex:
log.error(f"Failed to dump noobaa DB! Error: {ex}")
sleep(30)
| 5,346,737 |
def sync_via_mrmsdtw(f_chroma1: np.ndarray,
f_chroma2: np.ndarray,
f_DLNCO1: np.ndarray = None,
f_DLNCO2: np.ndarray = None,
input_feature_rate: float = 50,
step_sizes: np.ndarray = np.array([[1, 0], [0, 1], [1, 1]], np.int32),
step_weights: np.ndarray = np.array([1.0, 1.0, 1.0], np.float64),
threshold_rec: int = 10000, win_len_smooth: np.ndarray = np.array([201, 101, 21, 1]),
downsamp_smooth: np.ndarray = np.array([50, 25, 5, 1]),
verbose: bool = False,
dtw_implementation: str = 'synctoolbox',
normalize_chroma: bool = True,
chroma_norm_ord: int = 2,
chroma_norm_threshold: float = 0.001):
"""Compute memory-restricted multi-scale DTW (MrMsDTW) using chroma and (optionally) DLNCO features.
MrMsDTW is performed on multiple levels that get progressively finer, with rectangular constraint
regions defined by the alignment found on the previous, coarser level.
If DLNCO features are provided, these are used on the finest level in addition to chroma
to provide higher synchronization accuracy.
Parameters
----------
f_chroma1 : np.ndarray [shape=(12, N)]
Chroma feature matrix of the first sequence
f_chroma2 : np.ndarray [shape=(12, M)]
Chroma feature matrix of the second sequence
f_DLNCO1 : np.ndarray [shape=(12, N)]
DLNCO feature matrix of the first sequence (optional, default: None)
f_DLNCO2 : np.ndarray [shape=(12, M)]
DLNCO feature matrix of the second sequence (optional, default: None)
input_feature_rate: float
Input feature rate of the chroma features (default: 50)
step_sizes: np.ndarray
DTW step sizes (default: np.array([[1, 0], [0, 1], [1, 1]]))
step_weights: np.ndarray
DTW step weights (np.array([1.0, 1.0, 1.0]))
threshold_rec: int
Defines the maximum area that is spanned by the rectangle of two
consecutive elements in the alignment (default: 10000)
win_len_smooth : np.ndarray
Window lengths for chroma feature smoothing (default: np.array([201, 101, 21, 1]))
downsamp_smooth : np.ndarray
Downsampling factors (default: np.array([50, 25, 5, 1]))
verbose : bool
Set `True` for visualization (default: False)
dtw_implementation : str
DTW implementation, librosa or synctoolbox (default: synctoolbox)
normalize_chroma : bool
Set `True` to normalize input chroma features after each downsampling
and smoothing operation.
chroma_norm_ord: int
Order of chroma normalization, relevant if ``normalize_chroma`` is True.
(default: 2)
chroma_norm_threshold: float
If the norm falls below threshold for a feature vector, then the
normalized feature vector is set to be the unit vector. Relevant, if
``normalize_chroma`` is True (default: 0.001)
Returns
-------
alignment : np.ndarray [shape=(2, T)]
Resulting warping path
"""
# If DLNCO features are given as input, high resolution MrMsDTW is activated.
high_res = False
if f_DLNCO1 is not None and f_DLNCO2 is not None:
high_res = True
if high_res and (f_chroma1.shape[1] != f_DLNCO1.shape[1] or f_chroma2.shape[1] != f_DLNCO2.shape[1]):
raise ValueError('Chroma and DLNCO features must be of the same length.')
if downsamp_smooth[-1] != 1 or win_len_smooth[-1] != 1:
raise ValueError('The downsampling factor of the last iteration must be equal to 1, i.e.'
'at the last iteration, it is computed at the input feature rate!')
num_iterations = win_len_smooth.shape[0]
cost_matrix_size_old = tuple()
feature_rate_old = input_feature_rate / downsamp_smooth[0]
alignment = None
total_computation_time = 0.0
for it in range(num_iterations):
tic1 = time.perf_counter()
# Smooth and downsample given raw features
f_chroma1_cur, _ = smooth_downsample_feature(f_chroma1,
input_feature_rate=input_feature_rate,
win_len_smooth=win_len_smooth[it],
downsamp_smooth=downsamp_smooth[it])
f_chroma2_cur, feature_rate_new = smooth_downsample_feature(f_chroma2,
input_feature_rate=input_feature_rate,
win_len_smooth=win_len_smooth[it],
downsamp_smooth=downsamp_smooth[it])
if normalize_chroma:
f_chroma1_cur = normalize_feature(f_chroma1_cur,
norm_ord=chroma_norm_ord,
threshold=chroma_norm_threshold)
f_chroma2_cur = normalize_feature(f_chroma2_cur,
norm_ord=chroma_norm_ord,
threshold=chroma_norm_threshold)
# Project path onto new resolution
cost_matrix_size_new = (f_chroma1_cur.shape[1], f_chroma2_cur.shape[1])
if alignment is None:
# Initialize the alignment with the start and end frames of the feature sequence
anchors = np.array([[0, f_chroma1_cur.shape[1] - 1], [0, f_chroma2_cur.shape[1] - 1]])
else:
projected_alignment = project_alignment_on_a_new_feature_rate(alignment=alignment,
feature_rate_old=feature_rate_old,
feature_rate_new=feature_rate_new,
cost_matrix_size_old=cost_matrix_size_old,
cost_matrix_size_new=cost_matrix_size_new)
anchors = derive_anchors_from_projected_alignment(projected_alignment=projected_alignment,
threshold=threshold_rec)
# Cost matrix and warping path computation
if high_res and it == num_iterations - 1:
# Compute cost considering chroma and pitch onset features and alignment only in the last iteration,
# where the features are at the finest level.
cost_matrices_step1 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
f_DLNCO1=f_DLNCO1,
f_DLNCO2=f_DLNCO2,
anchors=anchors)
else:
cost_matrices_step1 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
anchors=anchors)
wp_list = compute_warping_paths_from_cost_matrices(cost_matrices_step1,
step_sizes=step_sizes,
step_weights=step_weights,
implementation=dtw_implementation)
# Concatenate warping paths
wp = build_path_from_warping_paths(warping_paths=wp_list,
anchors=anchors)
anchors_step1 = None
wp_step1 = None
num_rows_step1 = 0
num_cols_step1 = 0
ax = None
toc1 = time.perf_counter()
if verbose and cost_matrices_step1 is not None:
anchors_step1 = np.array(anchors, copy=True)
wp_step1 = np.array(wp, copy=True)
num_rows_step1, num_cols_step1 = np.sum(np.array([dtw_mat.shape for dtw_mat in cost_matrices_step1], int),
axis=0)
fig, ax = sync_visualize_step1(cost_matrices_step1,
num_rows_step1,
num_cols_step1,
anchors,
wp)
tic2 = time.perf_counter()
# Compute neighboring anchors and refine alignment using local path between neighboring anchors
anchor_indices_in_warping_path = find_anchor_indices_in_warping_path(wp, anchors=anchors)
# Compute neighboring anchors for refinement
neighboring_anchors, neighboring_anchor_indices = \
derive_neighboring_anchors(wp, anchor_indices=anchor_indices_in_warping_path)
if neighboring_anchor_indices.shape[0] > 1 \
and it == num_iterations - 1 and high_res:
cost_matrices_step2 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
f_DLNCO1=f_DLNCO1,
f_DLNCO2=f_DLNCO2,
anchors=neighboring_anchors)
else:
cost_matrices_step2 = compute_cost_matrices_between_anchors(f_chroma1=f_chroma1_cur,
f_chroma2=f_chroma2_cur,
anchors=neighboring_anchors)
wp_list_refine = compute_warping_paths_from_cost_matrices(cost_matrices=cost_matrices_step2,
step_sizes=step_sizes,
step_weights=step_weights,
implementation=dtw_implementation)
wp = __refine_wp(wp, anchors, wp_list_refine, neighboring_anchors, neighboring_anchor_indices)
toc2 = time.perf_counter()
computation_time_it = toc2 - tic2 + toc1 - tic1
total_computation_time += computation_time_it
alignment = wp
feature_rate_old = feature_rate_new
cost_matrix_size_old = cost_matrix_size_new
if verbose and cost_matrices_step2 is not None:
sync_visualize_step2(ax,
cost_matrices_step2,
wp,
wp_step1,
num_rows_step1,
num_cols_step1,
anchors_step1,
neighboring_anchors)
print('Level {} computation time: {:.2f} seconds'.format(it, computation_time_it))
if verbose:
print('Computation time of MrMsDTW: {:.2f} seconds'.format(total_computation_time))
return alignment
| 5,346,738 |
def test_key_usage(crypto_protocol_name: str) -> None:
"""Generate keys, sign and verify previously created signature."""
private_key = generate_private_key(crypto_protocol_name)
public_key = private_key.public_key()
data = b"Some randome data we would like to sign"
signature = private_key.sign(data)
try:
# Passing the method without error means the signature is valid.
public_key.verify_signature(signature, data)
except InvalidSignature:
pytest.fail("Should not raise InvalidSignature")
| 5,346,739 |
def simulate_beta_binomial(
K, D, sigma2, theta, mu=0, invlink=logistic, seed=None):
"""Simulates from binomial Gaussian process with Beta latent noise.
Args:
K: Cell-state kernel, for example as generated by create_linear_kernel
or create_rbf_kernel.
D: Array of total counts.
sigma2: Kernel variance component.
theta: Dispersion parameter. If zero, sample from a regular Binomial
distribution instead.
mu: Optional fixed effects on a logit scale. Defaults to zero, which
corresponds to a binomial mean of 0.5.
invlink: Inverse link function. Defaults to invlogit.
seed: Random seed.
Returns:
List with alternative counts, latent rates as well as sampled binomial
means.
"""
D = atleast_2d_column(D)
n, p = D.shape
rng = np.random.default_rng(seed)
if sigma2 == 0:
latent = mu * np.ones((n, p))
else:
mu = mu * np.ones((n, 1))
latent = _sample_normal(p, mu, sigma2*K, rng)
beta_mean = invlink(latent)
if theta > 0:
binomial_mean = rng.beta(a=beta_mean / theta, b=(1-beta_mean) / theta)
else:
binomial_mean = beta_mean
a = rng.binomial(n=D, p=binomial_mean)
return {'A': a, 'beta_mean': beta_mean, 'binomial_mean': binomial_mean}
| 5,346,740 |
def convert_to_floats(tsi):
"""
A helper function that tax all of the fields of a TaxSaveInputs model
and converts them to floats, or list of floats
"""
def numberfy_one(x):
if isinstance(x, float):
return x
else:
return float(x)
def numberfy(x):
if isinstance(x, list):
return [numberfy_one(i) for i in x]
else:
return numberfy_one(x)
attrs = vars(tsi)
return {k: numberfy(v) for k, v in list(attrs.items()) if v}
| 5,346,741 |
def properties_filter(mol):
"""
Calculates the properties that contain logP, MW, HBA, HBD, TPSA, NRB
"""
#frag = Chem.rdmolops.GetMolFrags(mol) # remove '.'
#if len(frag) > 1:
#return False
MW_s = Descriptors.MolWt(mol) # MW
if MW_s < 250 or MW_s > 750:
return False
ALOGP_s = Descriptors.MolLogP(mol) # ALOGP
if ALOGP_s < -2 or ALOGP_s > 7:
return False
HBA_s = 0
for hba in Acceptors: # HBA
if mol.HasSubstructMatch(hba):
matches = mol.GetSubstructMatches(hba)
HBA_s += len(matches)
HBD_s = Descriptors.NumHDonors(mol) # HBD
if HBA_s + HBD_s >= 10:
return False
TPSA_s = Descriptors.TPSA(mol) # TPSA
if TPSA_s >= 150:
return False
NRB_s = Descriptors.NumRotatableBonds(mol) # NRB
if NRB_s >= 10:
return False
return True
| 5,346,742 |
def _create_certificate_chain():
"""
Construct and return a chain of certificates.
1. A new self-signed certificate authority certificate (cacert)
2. A new intermediate certificate signed by cacert (icert)
3. A new server certificate signed by icert (scert)
"""
caext = X509Extension(b('basicConstraints'), False, b('CA:true'))
# Step 1
cakey = PKey()
cakey.generate_key(TYPE_RSA, 512)
cacert = X509()
cacert.get_subject().commonName = "Authority Certificate"
cacert.set_issuer(cacert.get_subject())
cacert.set_pubkey(cakey)
cacert.set_notBefore(b("20000101000000Z"))
cacert.set_notAfter(b("20200101000000Z"))
cacert.add_extensions([caext])
cacert.set_serial_number(0)
cacert.sign(cakey, "sha1")
# Step 2
ikey = PKey()
ikey.generate_key(TYPE_RSA, 512)
icert = X509()
icert.get_subject().commonName = "Intermediate Certificate"
icert.set_issuer(cacert.get_subject())
icert.set_pubkey(ikey)
icert.set_notBefore(b("20000101000000Z"))
icert.set_notAfter(b("20200101000000Z"))
icert.add_extensions([caext])
icert.set_serial_number(0)
icert.sign(cakey, "sha1")
# Step 3
skey = PKey()
skey.generate_key(TYPE_RSA, 512)
scert = X509()
scert.get_subject().commonName = "Server Certificate"
scert.set_issuer(icert.get_subject())
scert.set_pubkey(skey)
scert.set_notBefore(b("20000101000000Z"))
scert.set_notAfter(b("20200101000000Z"))
scert.add_extensions([
X509Extension(b('basicConstraints'), True, b('CA:false'))])
scert.set_serial_number(0)
scert.sign(ikey, "sha1")
return [(cakey, cacert), (ikey, icert), (skey, scert)]
| 5,346,743 |
async def async_recorder_block_till_done(
opp: OpenPeerPower,
instance: recorder.Recorder,
) -> None:
"""Non blocking version of recorder.block_till_done()."""
await opp.async_add_executor_job(instance.block_till_done)
| 5,346,744 |
def make_constant_raster_from_base_uri(
base_dataset_uri, constant_value, out_uri, nodata_value=None,
dataset_type=gdal.GDT_Float32):
"""Create new gdal raster filled with uniform values.
A helper function that creates a new gdal raster from base, and fills
it with the constant value provided.
Args:
base_dataset_uri (string): the gdal base raster
constant_value: the value to set the new base raster to
out_uri (string): the uri of the output raster
Keyword Args:
nodata_value: the value to set the constant raster's nodata
value to. If not specified, it will be set to constant_value - 1.0
dataset_type: the datatype to set the dataset to, default
will be a float 32 value.
Returns:
None
"""
if nodata_value is None:
nodata_value = constant_value - 1.0
new_raster_from_base_uri(
base_dataset_uri, out_uri, 'GTiff', nodata_value,
dataset_type)
base_dataset = gdal.OpenEx(out_uri, gdal.GA_Update)
base_band = base_dataset.GetRasterBand(1)
base_band.Fill(constant_value)
base_band = None
gdal.Dataset.__swig_destroy__(base_dataset)
base_dataset = None
| 5,346,745 |
async def test_simple_properties(hass: HomeAssistant):
"""Test that simple properties work as intended."""
state = hass.states.get(VAC_ENTITY_ID)
registry = await hass.helpers.entity_registry.async_get_registry()
entity = registry.async_get(VAC_ENTITY_ID)
assert entity
assert state
assert state.state == STATE_CLEANING
assert entity.unique_id == "AC000Wxxxxxxxxx"
| 5,346,746 |
def build_job_spec_name(file_name, version="develop"):
"""
:param file_name:
:param version:
:return: str, ex. job-hello_world:develop
"""
name = file_name.split('.')[-1]
job_name = 'job-%s:%s' % (name, version)
return job_name
| 5,346,747 |
def draw_mask(im: torch.Tensor, mask: torch.Tensor, t=0.2, color=(255, 255, 255), visualize_instances=True):
"""
Visualize mask where mask = 0.
Supports multiple instances.
mask shape: [N, C, H, W], where C is different instances in same image.
"""
assert len(mask.shape) in (3, 4), mask.shape
mask = mask.view(-1, *mask.shape[-3:])
im = im.view(-1, *im.shape[-3:])
assert im.dtype == torch.uint8, im.dtype
assert 0 <= t <= 1
if not visualize_instances:
mask = mask.any(dim=1, keepdim=True)
mask = mask.float()
kernel = torch.ones((3, 3), dtype=mask.dtype, device=mask.device)
outer_border = dilation(mask, kernel).logical_xor(mask)
outer_border = outer_border.any(dim=1, keepdim=True).repeat(1, 3, 1, 1) > 0
inner_border = erosion(mask, kernel).logical_xor(mask)
inner_border = inner_border.any(dim=1, keepdim=True).repeat(1, 3, 1, 1) > 0
mask = (mask == 0).any(dim=1, keepdim=True).repeat(1, 3, 1, 1)
color = torch.tensor(color).to(im.device).byte().view(1, 3, 1, 1)#.repeat(1, *im.shape[1:])
color = color.repeat(im.shape[0], 1, *im.shape[-2:])
im[mask] = (im[mask] * (1-t) + t * color[mask]).byte()
im[outer_border] = 255
im[inner_border] = 0
return im
| 5,346,748 |
def view_about():
"""
shows the about page
:return:
:rtype:
"""
return render_template('about.html', title="About Flask AWS Template")
| 5,346,749 |
def validate_is_mergeable(tc, *python_schema):
"""
Raises an error if the column names in the given schema conflict
"""
scala_schema_list = []
for schema in python_schema:
if not isinstance(schema, list):
schema = [schema]
scala_schema_list.append(schema_to_scala(tc.sc, schema))
jvm_scala_schema(tc.sc).validateIsMergeable(tc.jutils.convert.to_scala_list(scala_schema_list))
| 5,346,750 |
def viterbi(observed_values,
transition_probabilities,
emission_probabilities,
initial_distribution,
file_name,
log=True):
"""Calculates the viterbi-path for a given hidden-markov-model, heavily
inspired by Abhisek Janas Blogpost "Implement Viterbi Algorithm in Hidden
Markov Model using Python and R" at February 21, 2019.
The Blog as well as the original source-code can be found under http://www.adeveloperdiary.com/data-science/machine-learning/implement-viterbi-algorithm-in-hidden-markov-model-using-python-and-r/ #noqa
Args:
observed_values (np.array): visible part of the hidden-markov-model
transition_probabilities (np.array): transition probabilities for the
hidden part of the hidden-markov-model
emission_probabilities (np.array): transition probabilities for the
visible part of the hidden-markov-model
initial_distribution (np.array): probabilities for the initial status
log (bool) = True: The results are calculated using the logarithmic
projection
Returns:
(np.array): the viterbi-path for the given hidden-markov-model
"""
# Amount of steps
epochs = observed_values.shape[0]
# Amount of states
states = transition_probabilities.shape[0]
# Hightest probability to end in specific state
omega = numpy.zeros((epochs, states), dtype=numpy.longdouble)
prev = numpy.zeros((epochs - 1, states), dtype=numpy.longdouble)
# Two Dimensional Array, which holds all forward probability for every
# state and epoch
forward_probs = numpy.zeros((epochs, states), dtype=numpy.longdouble)
# Two Dimensional Array, which holds all backword probability for every
# state and epoch
backward_probs = numpy.zeros((epochs, states), dtype=numpy.longdouble)
# Since we start at the pack of the list we need to init it with a one,
# instead of a zero
backward_probs[epochs - 1] = numpy.ones((states))
# Two Dimensional Array, which holds all posteriori probability for every
# state and epoch
posteriori_probs = numpy.zeros((epochs, states), dtype=numpy.longdouble)
# Calculation of the probability for the observed initial state
if log:
omega[0, :] = numpy.log(initial_distribution * emission_probabilities[:, observed_values[0]-1]) #noqa
else:
omega[0, :] = initial_distribution * emission_probabilities[:, observed_values[0]-1] #noqa
forward_probs[0, :] = initial_distribution * emission_probabilities[:, observed_values[0]-1] #noqa
for epoch in range(1, epochs):
for state in range(1, -1, -1):
# Calculate the probability of obtaining the observed value for
# each possible transition.
if log:
probability = omega[epoch - 1] + \
numpy.log(transition_probabilities[:, state]) + \
numpy.log(emission_probabilities[state, observed_values[epoch]-1]) #noqa
else:
probability = omega[epoch - 1] * \
transition_probabilities[:, state] * \
emission_probabilities[state, observed_values[epoch]-1]
# This is our most probable state given previous state at epoch
prev[epoch - 1, state] = numpy.argmax(probability)
# save probability of the most probable state
omega[epoch, state] = numpy.max(probability)
# Calculate forward probability's for Posteriori-Decoding
# The sum of the equations is calculated with matrix
# multiplication(.dot), since that way a generice implementation
# is provided!
if not log:
forward_probs[epoch, state] = emission_probabilities[state, observed_values[epoch]-1] * forward_probs[epoch - 1].dot(transition_probabilities[:, state]) #noqa
# Path Array
path = numpy.zeros(epochs)
# Find the most probable last hidden state
last_state = numpy.argmax(omega[epochs - 1, :]).astype(int)
# Start building the path
path[0] = last_state
# Start backtracking
backtrack_index = 1
for i in range(epochs - 2, -1, -1):
# Calculate the next hidden state based on its successor
next_hidden = prev[i, last_state]
# Add state to the path
path[backtrack_index] = next_hidden
# Save state for the next backtracking step
last_state = next_hidden.astype(int)
backtrack_index += 1
# Posteriori-Decoding, calculate backward probability's.
# The sum of the equations is calculated with matrix
# multiplication(.dot), since that way a generice implementation is
# provided!
# The results are at this point in the reversed order, since we started
# do calculate them from the end!
if not log:
for state in range(states):
backward_probs[i, state] = (backward_probs[i+1]*emission_probabilities[:, observed_values[i]-1]).dot(transition_probabilities[state, :]) #noqa
# Flip the path array since we were backtracking
path = numpy.flip(path, axis=0)
# Convert numeric values to actual hidden states
result = ""
for element in path:
if element == 0:
result = result + "F"
else:
result = result + "L"
# Posteriori-Decoding, calculate posteriori probability's.
if not log:
# Flip the backward probability's to provide the probability's in
# the correct order
backward_probs = numpy.flip(backward_probs, axis=0)
increase = 1
for i in range(epochs):
# A counter to manage the constant multiplication used
if(i % 20 == 0):
# increase the multiplication factor
increase *= numpy.longdouble(10**5)
# Calculate the posteriori probability based on the given algorithm
posteriori_probs[i, :] = ((forward_probs[i, :]*increase) * (backward_probs[i, :]*increase)) / (numpy.max(omega[epochs-1, :])*increase) #noqa
# Remove the constant factor and override the current posteriori
# probability, to give a correct value
posteriori_probs[i, :] = posteriori_probs[i, :] / increase
numpy.savetxt("results\\posteriori-decoding"+file_name, posteriori_probs) #noqa
dirName = "results\\viterbi-Path"+file_name
text_file = open(dirName, "w")
text_file.write(result)
text_file.close()
return result
| 5,346,751 |
def extract_psf_fitting_names(psf):
"""
Determine the names of the x coordinate, y coordinate, and flux from
a model. Returns (xname, yname, fluxname)
"""
if hasattr(psf, 'xname'):
xname = psf.xname
elif 'x_0' in psf.param_names:
xname = 'x_0'
else:
raise ValueError('Could not determine x coordinate name for '
'psf_photometry.')
if hasattr(psf, 'yname'):
yname = psf.yname
elif 'y_0' in psf.param_names:
yname = 'y_0'
else:
raise ValueError('Could not determine y coordinate name for '
'psf_photometry.')
if hasattr(psf, 'fluxname'):
fluxname = psf.fluxname
elif 'flux' in psf.param_names:
fluxname = 'flux'
else:
raise ValueError('Could not determine flux name for psf_photometry.')
return xname, yname, fluxname
| 5,346,752 |
def primes():
"""
Generate prime numbers
"""
primes = []
for n in count(2):
found_prime = True
for p in primes:
if p*p > n:
break
if n % p == 0:
found_prime = False
break
if found_prime:
primes.append(n)
yield n
| 5,346,753 |
def parseargs(p):
"""
Add arguments and `func` to `p`.
:param p: ArgumentParser
:return: ArgumentParser
"""
# TODO: Implement --date, --time and -t
p.set_defaults(func=func)
p.description = (
"Update the access and modification times of each "
+ "FILE to the current time. A FILE argument that does "
+ "not exist is created empty. A FILE argument string "
+ "of - is handled specially and causes touch to"
)
p.add_argument('FILE', nargs='*')
p.add_argument(
"-a", action="store_true", dest="accessonly", help="change only the access time"
)
p.add_argument(
"-c",
"--no-create",
action="store_true",
dest="nocreate",
help="do not create any files",
)
p.add_argument(
"-f", action="store_true", dest="thisoptionshouldbeignored", help="(ignored)"
)
p.add_argument(
"-m",
action="store_true",
dest="modonly",
help="change only the modification time",
)
p.add_argument(
"-r",
"--reference",
dest="reference",
help="use this file's times instead of current time",
)
return p
| 5,346,754 |
def config_check_conformance(cookie, dn):
""" Auto-generated UCS XML API Method. """
method = ExternalMethod("ConfigCheckConformance")
method.cookie = cookie
method.dn = dn
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request
| 5,346,755 |
def get_dist_to_port(geotiff):
"""
Extract "truth" dist_to_port from geotiff
"""
with Geotiff(geotiff) as tif:
dist_to_port = tif.values
return dist_to_port
| 5,346,756 |
def build_dataset_exporter(
dataset_type, strip_none=True, warn_unused=True, **kwargs
):
"""Builds the :class:`DatasetExporter` instance for the given parameters.
Args:
dataset_type: the :class:`fiftyone.types.dataset_types.Dataset` type
strip_none (True): whether to exclude None-valued items from ``kwargs``
warn_unused (True): whether to issue warnings for any non-None unused
parameters encountered
**kwargs: keyword arguments to pass to the dataset exporter's
constructor via ``DatasetExporter(**kwargs)``
Returns:
a tuple of:
- the :class:`DatasetExporter` instance
- a dict of unused keyword arguments
"""
if dataset_type is None:
raise ValueError(
"You must provide a `dataset_type` in order to build a dataset "
"exporter"
)
if inspect.isclass(dataset_type):
dataset_type = dataset_type()
dataset_exporter_cls = dataset_type.get_dataset_exporter_cls()
if strip_none:
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs, unused_kwargs = fou.extract_kwargs_for_class(
dataset_exporter_cls, kwargs
)
try:
dataset_exporter = dataset_exporter_cls(**kwargs)
except Exception as e:
raise ValueError(
"Failed to construct exporter of type %s using the provided "
"parameters. See above for the error. You may need to supply "
"additional mandatory arguments. Please consult the documentation "
"of %s to learn more"
% (dataset_exporter_cls, dataset_exporter_cls)
) from e
if warn_unused:
for key, value in unused_kwargs.items():
if value is not None:
logger.warning(
"Ignoring unsupported parameter '%s' for exporter type %s",
key,
dataset_exporter_cls,
)
return dataset_exporter, unused_kwargs
| 5,346,757 |
def conditions(x):
"""
This function will check whether the constraints that apply to
our optimization are met or not.
"""
if ( (10/x[0]) > 66.0 ):
return False
elif ( (10/x[0] + 12/x[1]) > 88.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2]) > 107.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3]) > 128.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4]) > 157.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5]) > 192.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6]) > 222.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7]) > 242.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7] + 16/x[8]) > 268.0 ):
return False
elif ( (10/x[0] + 12/x[1] + 7/x[2] + 14/x[3] + 15/x[4] + 20/x[5] + 10/x[6] + 10/x[7] + 16/x[8] + 8/x[9]) > 292.0 ):
return False
return True
| 5,346,758 |
def _write_report(
report_filename,
supervision_modes,
counters,
):
"""Creates the report_filename file containing statistics about conversion."""
contents = []
contents.append('\t'.join(('Total', 'Valid', 'Failed', 'File')))
for dataset, counter in counters.items():
failed = counter.pop('failed', 0)
valid = counter.pop('valid', 0)
total = failed + valid
contents.append('\t'.join(map(str, (total, valid, failed, dataset))))
# Prints an overview to the stdout.
logging.info('\n'.join(contents))
with tf.io.gfile.GFile(report_filename, 'w') as report_file:
for dataset, supervision_mode in sorted(supervision_modes.items()):
report_file.write('# Dataset: {} supervision_mode: {}\n'.format(
dataset, supervision_mode))
report_file.write('\n')
report_file.write('\n'.join(contents))
report_file.write('\n')
for dataset, counter in counters.items():
if not counter:
continue
report_file.write('# Detailed error statistics for {}:\n'.format(dataset))
for key, value in sorted(counter.items()):
report_file.write('# {}\t{}\n'.format(key, value))
| 5,346,759 |
def clear_SystemInfo() -> None: # noqa
"""Clear OsInfo singleton."""
SystemInfo.clear_singleton()
| 5,346,760 |
def step_impl(context):
"""
Now we call the different internal methods and save their values
internally in the world parameter
"""
with mock.patch('requests.get') as requests_get:
requests_get.return_value = APIHelperCallAPI(ASSET_LIST_GET)
world.client_assets = world.client.assets()
world.first_asset = world.client_assets.first()
with mock.patch('requests.get') as requests_get:
requests_get.return_value = APIHelperCallAPI(TAG_LIST_MULTI_GET)
world.first_asset_tags = world.first_asset.get_tags()
world.tag_list.add(world.first_asset_tags.get("id", 1))
world.tag_list.add(world.first_asset_tags.get("id", 2))
with mock.patch('requests.post') as requests_post:
requests_post.return_value = APIHelperCallAPI(TAG_LIST_DATA_POST)
world.tag_list_data_pd = world.tag_list.data(start_time=1, stop_time=2)
world.tag_list_data_json = world.tag_list.data(
start_time=1, stop_time=2, return_type=Constants.RETURN_JSON)
with mock.patch('requests.post') as requests_post:
requests_post.return_value = APIHelperCallAPI(TAG_LIST_DATA_POST)
test_transformation1 = [{
"transformation_type": "interpolation",
"column": "1",
"method": "linear"
}, {
"transformation_type": "interpolation",
"column": "2",
"method": "cubic",
"order": 2
}]
world.first_asset_data_with_correct_transformation = world.tag_list.data(
start_time=1, stop_time=2, transformations=test_transformation1)
| 5,346,761 |
def Tcolloc2X1S(outf, (prefixes,topics,segments), IFflag=False):
"""non-linguistic context topics
topics can generate exactly one topical collocation
topical collocations contain exactly one topical word"""
outf.write("1 1 Colloc2s --> Colloc2s Colloc2\n")
for topic in topics:
if topic != "None":
outf.write("1 1 Colloc2s --> Topic_%s Colloc2_%s\n"%(topic,topic))
for prefix in prefixes:
outf.write("1 1 Colloc2s --> T_%s\n"%prefix)
outf.write("Colloc2 --> Collocs\n")
outf.write("1 1 Collocs --> Colloc\n")
outf.write("1 1 Collocs --> Collocs Colloc\n")
outf.write("Colloc --> Words\n")
outf.write("1 1 Words --> Words Word\n")
outf.write("1 1 Words --> Word\n")
for topic in topics:
if topic != "None":
outf.write("1 1 Topic_%s --> Topic_%s Colloc2\n"%(topic,topic))
for prefix in prefixes:
if topic in prefix.split('|'):
outf.write("1 1 Topic_%s --> T_%s\n"%(topic,prefix))
for topic in topics:
if topic != "None":
outf.write("Colloc2_%s --> Collocs_%s\n"%(topic,topic))
outf.write("1 1 Collocs_%s --> Colloc_%s\n"%(topic,topic))
outf.write("1 1 Collocs_%s --> Collocs Colloc_%s\n"%(topic,topic))
outf.write("1 1 Collocs_%s --> Collocs_%s Colloc\n"%(topic,topic))
outf.write("Colloc_%s --> Words_%s\n"%(topic,topic))
outf.write("1 1 Words_%s --> Word_%s\n"%(topic,topic))
outf.write("1 1 Words_%s --> Words Word_%s\n"%(topic,topic))
outf.write("1 1 Words_%s --> Words_%s Word\n"%(topic,topic))
WordSyllables(outf, ['Word']+['Word_'+t for t in topics], segments, IFflag=IFflag)
| 5,346,762 |
def load_reco_param(source):
"""Load reco parameterisation (energy-dependent) from file or dictionary.
Parameters
----------
source : string or mapping
Source of the parameterization. If string, treat as file path or
resource location and load from the file; this must yield a mapping. If
`source` is a mapping, it is used directly. See notes below on format.
Returns
-------
reco_params : OrderedDict
Keys are stringified flavintgroups and values are dicts of strings
representing the different reco dimensions and lists of distribution
properties. These latter have a 'fraction', a 'dist' and a 'kwargs' key.
The former two hold callables, while the latter holds a dict of
key-callable pairs ('loc', 'scale'), which can be evaluated at the desired
energies and passed into the respective `scipy.stats` distribution.
The distributions for a given dimension will be superimposed according
to their relative weights to form the reco kernels (via integration)
when called with energy values (parameterisations are functions of
energy only!).
Notes
-----
The mapping passed via `source` or loaded therefrom must have the format:
{
<flavintgroup_string>:
{
<dimension_string>:[
{
"dist": dist_id,
"fraction": val,
"kwargs": {
"loc": val,
"scale": val,
...
}
},
...
]
},
<flavintgroup_string>:
...
}
`flavintgroup_string`s must be parsable by
pisa.utils.flavInt.NuFlavIntGroup. Note that the `transform_groups` defined
in a pipeline config file using this must match the groupings defined
above.
`dimension_string`s denote the observables/dimensions whose reco error
distribution is parameterised (`"energy"` or `"coszen"`).
`dist_id` needs to be a string identifying a probability distribution/statistical
function provided by `scipy.stats`. No implicit assumptions about the
distribution will be made if the `"dist"` key is missing.
`"fraction"` holds the relative weight of the distribution. For a given
dimension, the sum of all fractions present must be 1.
Valid kwargs for distributions must at least include `"loc"` and `"scale"` -
these will be passed into the respective `scipy.stats` function.
`val`s can be one of the following:
- Callable with one argument
- String such that `eval(val)` yields a callable with one argument
"""
if not (source is None or isinstance(source, (basestring, Mapping))):
raise TypeError('`source` must be string, mapping, or None')
if isinstance(source, basestring):
orig_dict = from_file(source)
elif isinstance(source, Mapping):
orig_dict = source
else:
raise TypeError('Cannot load reco parameterizations from a %s'
% type(source))
valid_dimensions = ('coszen', 'energy')
required_keys = ('dist', 'fraction', 'kwargs')
# Build dict of parameterizations (each a callable) per flavintgroup
reco_params = OrderedDict()
for flavint_key, dim_dict in orig_dict.iteritems():
flavintgroup = NuFlavIntGroup(flavint_key)
reco_params[flavintgroup] = {}
for dimension in dim_dict.iterkeys():
dim_dist_list = []
if not isinstance(dimension, basestring):
raise TypeError("The dimension needs to be given as a string!"
" Allowed: %s."%valid_dimensions)
if dimension not in valid_dimensions:
raise ValueError("Dimension '%s' not recognised!"%dimension)
for dist_dict in dim_dict[dimension]:
dist_spec_dict = {}
# allow reading in even if kwargs not present - computation of
# transform will fail because "loc" and "scale" hard-coded
# requirement
for required in required_keys:
if required not in dist_dict:
raise ValueError("Found distribution property dict "
"without required '%s' key for "
"%s - %s!"
%(required, flavintgroup, dimension))
for k in dist_dict.iterkeys():
if k not in required_keys:
logging.warn("Unrecognised key in distribution"
" property dict: '%s'"%k)
dist_spec = dist_dict['dist']
if not isinstance(dist_spec, basestring):
raise TypeError(" The resolution function needs to be"
" given as a string!")
if not dist_spec:
raise ValueError("Empty string found for resolution"
" function!")
try:
dist = getattr(stats, dist_spec.lower())
except AttributeError:
try:
import scipy
sp_ver_str = scipy.__version__
except:
sp_ver_str = "N/A"
raise AttributeError("'%s' is not a valid distribution"
" from scipy.stats (your scipy"
" version: '%s')."
%(dist_spec.lower(), sp_ver_str))
logging.debug("Found %s - %s resolution function: '%s'"
%(flavintgroup, dimension, dist.name))
dist_spec_dict['dist'] = dist
frac = dist_dict['fraction']
if isinstance(frac, basestring):
frac_func = eval(frac)
elif callable(frac):
frac_func = frac
else:
raise TypeError(
"Expected 'fraction' to be either a string"
" that can be interpreted by eval or a callable."
" Got '%s'." % type(frac)
)
dist_spec_dict['fraction'] = frac_func
kwargs = dist_dict['kwargs']
if not isinstance(kwargs, dict):
raise TypeError(
"'kwargs' must hold a dictionary. Got '%s' instead."
% type(kwargs)
)
dist_spec_dict['kwargs'] = kwargs
for kwarg, kwarg_spec in kwargs.iteritems():
if isinstance(kwarg_spec, basestring):
kwarg_eval = eval(kwarg_spec)
elif callable(kwarg_spec) or isscalar(kwarg_spec):
kwarg_eval = kwarg_spec
else:
raise TypeError(
"Expected kwarg '%s' spec to be either a string"
" that can be interpreted by eval, a callable or"
" a scalar. Got '%s'." % type(kwarg_spec)
)
dist_spec_dict['kwargs'][kwarg] = kwarg_eval
dim_dist_list.append(dist_spec_dict)
reco_params[flavintgroup][dimension] = dim_dist_list
return reco_params
| 5,346,763 |
def face_palm(text, message):
"""<nick> - Expresses your frustration with <Nick>. Code located in reactions.py"""
face_palmer = text.strip()
message('Dammit {} {}'.format(face_palmer, random.choice(reaction_macros['facepalm_macros'])))
| 5,346,764 |
def get_commit_ancestors_graph(refenv, starting_commit):
"""returns a DAG of all commits starting at some hash pointing to the repo root.
Parameters
----------
refenv : lmdb.Environment
lmdb environment where the commit refs are stored
starting_commit : string
commit hash to start creating the DAG from
Returns
-------
dict
a dictionary where each key is a commit hash encountered along the way,
and it's value is a list containing either one or two elements which
identify the child commits of that parent hash.
"""
parent_commit = starting_commit
commit_graph = {}
seen = set(starting_commit)
more_work = []
end_commit = False
if parent_commit == '':
end_commit = True
while end_commit is not True:
childCommit = get_commit_ancestors(refenv, parent_commit)
if ((childCommit.master_ancestor == '') or (childCommit.master_ancestor in seen)):
end_commit = True
commit_graph[parent_commit] = [childCommit.master_ancestor]
if len(more_work) != 0:
master_commit = more_work.pop(0)
end_commit = False
else:
continue
elif childCommit.is_merge_commit is True:
master_commit = childCommit.master_ancestor
dev_commit = childCommit.dev_ancestor
more_work.append(dev_commit)
commit_graph[parent_commit] = [master_commit, dev_commit]
seen.add(master_commit)
seen.add(dev_commit)
else:
master_commit = childCommit.master_ancestor
commit_graph[parent_commit] = [master_commit]
seen.add(master_commit)
parent_commit = master_commit
return commit_graph
| 5,346,765 |
def is_verification_handshake(rjson):
"""
Determines if the request is the Slack application APIs verification handshake
:rtype: bool
"""
# Check body contains the right keys
for x in ['token', 'challenge', 'type']:
if x not in rjson:
return False
# Check type is correct
if rjson['type'] != "url_verification":
return False
# Note: no need to check the token, we check the request is signed
# before this code is ever run.
# It's a verification request
log.info("Received URL verification handshake request")
return True
| 5,346,766 |
def template_node(scope_key):
""" Create and return a new template node.
Parameters
----------
scope_key : object
The key for the local scope in the local storage maps.
Returns
-------
result : TemplateNode
A new compiler template node.
"""
node = TemplateNode()
node.scope_key = scope_key
return node
| 5,346,767 |
def _add_unit_and_content(unit, result):
"""Adds the score dimensions for units and its lessons and questions."""
# The content of an assessment is indicated by a lesson_id of None.
# Inside that lesson we can find all the questions added directly
# to the assessment.
unit_dict = {
DIM_TYPE: DIM_TYPE_UNIT, # Unit or assessment
DIM_ID: unit['unit_id'],
'name': unit['title']} # Name won't be saved in ClusterEntity
result.append(unit_dict)
unit_scored_lessons = 0
for item in unit['contents']:
lesson_id = item.get('lesson_id')
# A unit may have a pre or post assessment, in that case the item
# has unit_id, not a lesson_id.
included_assessment_id = item.get('unit_id')
lesson_title = item.get('title')
if lesson_title and lesson_id and item.get('tallied'):
result.append({
DIM_TYPE: DIM_TYPE_LESSON,
DIM_ID: lesson_id,
'name': lesson_title})
unit_scored_lessons += 1
elif included_assessment_id and lesson_title:
result.append({
DIM_TYPE: DIM_TYPE_UNIT,
DIM_ID: included_assessment_id,
'name': lesson_title})
unit_scored_lessons += 1
# If lesson is not tallied (graded) is not considered a dimension
for question in item['questions']:
if included_assessment_id:
question_id = pack_question_dimid(
included_assessment_id, None, question['id'])
else:
question_id = pack_question_dimid(
unit['unit_id'], lesson_id, question['id'])
result.append({
DIM_TYPE: DIM_TYPE_QUESTION,
DIM_ID: question_id,
'name': question['description']})
# This should affect the result list as well.
unit_dict[DIM_EXTRA_INFO] = transforms.dumps(
{'unit_scored_lessons': unit_scored_lessons})
| 5,346,768 |
def alarm(state_doc, rds_session):
""""alarm: something went wrong we are going to scream about it."""
logger.error(datadog_dbsnap_verify_status_check(state_doc, "CRITICAL"))
logger.info(datadog_dbsnap_verify_set_count(state_doc, "dbsnap_verify.failed"))
| 5,346,769 |
def run_command(args):
"""Calls the program using the specified command."""
if args.project_id is None:
print('You must specify a project ID or set the '
'"GOOGLE_CLOUD_PROJECT" environment variable.')
return
elif args.command == 'create-fhir-store':
create_fhir_store(
args.service_account_json,
args.api_key,
args.project_id,
args.cloud_region,
args.dataset_id,
args.fhir_store_id)
elif args.command == 'delete-fhir-store':
delete_fhir_store(
args.service_account_json,
args.api_key,
args.project_id,
args.cloud_region,
args.dataset_id,
args.fhir_store_id)
elif args.command == 'get-fhir-store':
get_fhir_store(
args.service_account_json,
args.api_key,
args.project_id,
args.cloud_region,
args.dataset_id,
args.fhir_store_id)
elif args.command == 'list-fhir-stores':
list_fhir_stores(
args.service_account_json,
args.api_key,
args.project_id,
args.cloud_region,
args.dataset_id)
elif args.command == 'patch-fhir-store':
patch_fhir_store(
args.service_account_json,
args.api_key,
args.project_id,
args.cloud_region,
args.dataset_id,
args.fhir_store_id,
args.pubsub_topic)
elif args.command == 'export-fhir-resource':
patch_fhir_store(
args.service_account_json,
args.api_key,
args.project_id,
args.cloud_region,
args.dataset_id,
args.fhir_store_id,
args.uri_prefix)
elif args.command == 'import-fhir-resource':
patch_fhir_store(
args.service_account_json,
args.api_key,
args.project_id,
args.cloud_region,
args.dataset_id,
args.fhir_store_id,
args.content_uri)
| 5,346,770 |
def test_get_id_info_invalid_arg_in_str():
"""
Test to see if get_id_info raises the correct exception when an incorrect type for the in_str arg is passed.
"""
sa_id_book = SAIDBook()
with pytest.raises(TypeError):
sa_id_book.get_id_info(['not legit'])
| 5,346,771 |
def GetAutoResult(chroot_path, buildbucket_id):
"""Returns the conversion of the result of 'cros buildresult'."""
# Calls 'cros buildresult' to get the status of the tryjob.
build_result = GetStatusFromCrosBuildResult(chroot_path, buildbucket_id)
# The string returned by 'cros buildresult' might not be in the mapping.
if build_result not in builder_status_mapping:
raise ValueError(
'"cros buildresult" return value is invalid: %s' % build_result)
return builder_status_mapping[build_result]
| 5,346,772 |
def abort(message: str) -> typing.NoReturn:
"""Print an error message and raise an Exit exception"""
sprint(f"[error]{message}")
raise typer.Exit(1)
| 5,346,773 |
def rft(x):
"""
Real Fourier Transform
"""
# XXX figure out what exactly this is doing...
s = x.shape[-1]
xp = np.zeros(x.shape,dtype="complex64")
xp[...,1:s/2] = x[...,1:-1:2]+x[...,2::2]*1.j
xp[...,0] = x[...,0]/2.
xp[...,s/2] = x[...,-1]/2.
return np.array(nmr_reorder(np.fft.fft(2*xp,axis=-1).real),dtype="float32")
| 5,346,774 |
def yam_path(manifestsdir):
"""Bundletracker manifest."""
return join(manifestsdir, 'yam.json')
| 5,346,775 |
def single_from(iterable):
"""Check that an iterable contains one unique value, and return it."""
unique_vals = set(iterable)
if len(unique_vals) != 1:
raise ValueError('multiple unique values found')
return unique_vals.pop()
| 5,346,776 |
def create(engine):
"""Create new dB"""
Base.metadata.create_all(bind=engine)
| 5,346,777 |
def vthash(filehash: str):
"""Returns the analysis data class for a file in VirusTotal's database"""
endpoint_path = f'/files/{filehash}'
endpoint = f"{api_base_url}{endpoint_path}"
r = requests.get(endpoint, headers=header)
if r.status_code == 404 and r.json()['error']['code'] == 'NotFoundError':
return None
elif r.status_code == 200:
return analysisdata(r)
| 5,346,778 |
def add(ctx, file_, directory, es, username, password, ignore_certs):
"""add data to system"""
if all([file_ is None, directory is None]):
raise click.ClickException('Missing --file/-f or --dir/-d option')
conn_config = configure_es_connection(es, username, password, ignore_certs)
files_to_process = []
if file_ is not None:
files_to_process = [file_]
elif directory is not None:
for root, dirs, files in os.walk(directory):
for f in [file for file in files if file.endswith('.xml')]:
files_to_process.append(os.path.join(root, f))
files_to_process.sort(key=os.path.getmtime)
for file_to_process in files_to_process:
loader = MarineWeatherRealtimeLoader(conn_config)
result = loader.load_data(file_to_process)
if not result:
click.echo('features not generated')
| 5,346,779 |
def non_credibility_index3(sol, ref_sol, evalgrid):
"""Compute a variant of the non-credibility index."""
raise NotImplementedError
| 5,346,780 |
def prod(x, axis=None, keepdims=False):
"""
product of all element in the array
Parameters
----------
x : tensor_like
input array
axis : int, tuple of ints
axis or axes along which a product is performed
keepdims : bool
keep dimensionality or not
Returns
-------
product : tensor_like
product of all element
"""
return Product(axis=axis, keepdims=keepdims).forward(x)
| 5,346,781 |
def menu(queue: List[str] = None):
"""Fred Menu"""
fred_controller = FredController(queue)
an_input = "HELP_ME"
while True:
# There is a command in the queue
if fred_controller.queue and len(fred_controller.queue) > 0:
# If the command is quitting the menu we want to return in here
if fred_controller.queue[0] in ("q", "..", "quit"):
print("")
if len(fred_controller.queue) > 1:
return fred_controller.queue[1:]
return []
# Consume 1 element from the queue
an_input = fred_controller.queue[0]
fred_controller.queue = fred_controller.queue[1:]
# Print the current location because this was an instruction and we want user to know what was the action
if an_input and an_input.split(" ")[0] in fred_controller.CHOICES_COMMANDS:
print(f"{get_flair()} /economy/fred/ $ {an_input}")
# Get input command from user
else:
# Display help menu when entering on this menu from a level above
if an_input == "HELP_ME":
fred_controller.print_help()
# Get input from user using auto-completion
if session and gtff.USE_PROMPT_TOOLKIT and fred_controller.completer:
an_input = session.prompt(
f"{get_flair()} /economy/fred/ $ ",
completer=fred_controller.completer,
search_ignore_case=True,
)
# Get input from user without auto-completion
else:
an_input = input(f"{get_flair()} /economy/fred/ $ ")
try:
# Process the input command
fred_controller.queue = fred_controller.switch(an_input)
except SystemExit:
print(
f"\nThe command '{an_input}' doesn't exist on the /economy/fred menu.",
end="",
)
similar_cmd = difflib.get_close_matches(
an_input.split(" ")[0] if " " in an_input else an_input,
fred_controller.CHOICES,
n=1,
cutoff=0.7,
)
if similar_cmd:
if " " in an_input:
candidate_input = (
f"{similar_cmd[0]} {' '.join(an_input.split(' ')[1:])}"
)
if candidate_input == an_input:
an_input = ""
fred_controller.queue = []
print("\n")
continue
an_input = candidate_input
else:
an_input = similar_cmd[0]
print(f" Replacing by '{an_input}'.")
fred_controller.queue.insert(0, an_input)
else:
print("\n")
| 5,346,782 |
def load_line(
file: str,
separator: Union[None, str] = None,
max_split: int = -1,
deduplication: bool = False,
line_processor: Callable = repeat,
open_method: str = 'open',
) -> Iterator:
"""
按行读入文件,会去掉每行末尾的换行符
:param file: 文件路径
:param separator: 用separator切分每行内容,None表示不做切分
:param max_split: 控制separator的切分次数,-1表示不限制次数
:param line_processor: 一个函数,对separator的结果做处理
:param deduplication: 若为True,将不输出重复的行
:param open_method: 指定打开文件的方法
:return: 文件每行的内容
"""
cache = Deduplication()
def inner_line_process(_file_iterator):
for line in _file_iterator:
if deduplication and cache.is_duplication(line):
continue
item = line.rstrip('\n\r')
if separator:
item = item.split(separator, max_split)
yield line_processor(item)
with Accessor(file, open_method=open_method) as file_iterator:
yield from inner_line_process(file_iterator)
| 5,346,783 |
def generic_repr(name, obj, deferred):
"""
Generic pretty printer for NDTable and NDArray.
Output is of the form::
Array(3, int32)
values := [Numpy(ptr=60597776, dtype=int64, shape=(3,))];
metadata := [contigious]
layout := Identity;
[1 2 3]
"""
if deferred:
if _show_details:
header = "%s\n" % (name)
header += " datashape := %s \n" % str(obj._datashape)
header += " metadata := %s \n" % obj._metadata
else:
header = ''
else:
if _show_details:
header = "%s\n" % (name)
header += " datashape := %s \n" % str(obj._datashape)
header += " values := %s \n" % list(obj.space)
header += " metadata := %s \n" % obj._metadata
header += " layout := %s \n" % obj._layout.desc
else:
header = ''
# Show the data below
fullrepr = header + generic_str(obj, deferred)
return fullrepr
| 5,346,784 |
def build_model(X, y, ann_hidden_dim, num_passes=20000):
"""
:param ann_hidden_dim: Number of nodes in the hidden layer
:param num_passes: Number of passes through the training data for gradient descent
:return: returns the parameters of artificial neural network for prediction using forward propagation of the parameters
"""
model = {}
# Initialize the parameters to random values.
np.random.seed(0)
w1 = np.random.randn(ann_input_dim, ann_hidden_dim) / np.sqrt(ann_input_dim)
c1 = np.zeros((1, ann_hidden_dim))
w2 = np.random.randn(ann_hidden_dim, ann_output_dim) / np.sqrt(ann_hidden_dim)
c2 = np.zeros((1, ann_output_dim))
# Batch gradient descent
for i in range(0, num_passes):
# Forward propagation
z1 = X.dot(w1) + c1
a1 = np.tanh(z1)
z2 = a1.dot(w2) + c2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Back propagation
delta3 = probs
delta3[range(len(X)), y] -= 1
dw2 = (a1.T).dot(delta3)
dc2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(w2.T) * (1 - np.power(a1, 2))
dw1 = np.dot(X.T, delta2)
dc1 = np.sum(delta2, axis=0)
# Add regularization terms (c1 and c2 don't have regularization terms)
dw2 += REG_LAMBDA * w2
dw1 += REG_LAMBDA * w1
# Gradient descent parameter update
w1 += -EPSILON * dw1
c1 += -EPSILON * dc1
w2 += -EPSILON * dw2
c2 += -EPSILON * dc2
# Assign new parameters to the model
model = {'w1': w1, 'c1': c1, 'w2': w2, 'c2': c2}
return model
| 5,346,785 |
def g_square_dis(dm, x, y, s):
"""G square test for discrete data.
Args:
dm: the data matrix to be used (as a numpy.ndarray).
x: the first node (as an integer).
y: the second node (as an integer).
s: the set of neibouring nodes of x and y (as a set()).
levels: levels of each column in the data matrix
(as a list()).
Returns:
p_val: the p-value of conditional independence.
"""
levels = np.amax(dm, axis=0) + 1
def _calculate_tlog(x, y, s, dof, levels, dm):
prod_levels = np.prod(list(map(lambda x: levels[x], s)))
nijk = np.zeros((levels[x], levels[y], prod_levels))
s_size = len(s)
z = []
for z_index in range(s_size):
z.append(s.pop())
pass
for row_index in range(dm.shape[0]):
i = dm[row_index, x]
j = dm[row_index, y]
k = []
k_index = 0
for s_index in range(s_size):
if s_index == 0:
k_index += dm[row_index, z[s_index]]
else:
lprod = np.prod(list(map(lambda x: levels[x], z[:s_index])))
k_index += (dm[row_index, z[s_index]] * lprod)
pass
pass
nijk[i, j, k_index] += 1
pass
nik = np.ndarray((levels[x], prod_levels))
njk = np.ndarray((levels[y], prod_levels))
for k_index in range(prod_levels):
nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)
njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)
pass
nk = njk.sum(axis = 0)
tlog = np.zeros((levels[x], levels[y], prod_levels))
tlog.fill(np.nan)
for k in range(prod_levels):
tx = np.array([nik[:, k]]).T
ty = np.array([njk[:, k]])
tdijk = tx.dot(ty)
tlog[:, :, k] = nijk[:, :, k] * nk[k] / tdijk
pass
return (nijk, tlog)
row_size = dm.shape[0]
s_size = len(s)
dof = ((levels[x] - 1) * (levels[y] - 1)
* np.prod(list(map(lambda x: levels[x], s))))
row_size_required = 10 * dof
nijk = None
if s_size < 5:
if s_size == 0:
nijk = np.zeros((levels[x], levels[y]))
for row_index in range(row_size):
i = dm[row_index, x]
j = dm[row_index, y]
nijk[i, j] += 1
pass
tx = np.array([nijk.sum(axis = 1)]).T
ty = np.array([nijk.sum(axis = 0)])
tdij = tx.dot(ty)
tlog = nijk * row_size / tdij
pass
if s_size > 0:
nijk, tlog = _calculate_tlog(x, y, s, dof, levels, dm)
pass
pass
else:
nijk = np.zeros((levels[x], levels[y], 1))
i = dm[0, x]
j = dm[0, y]
k = []
for z in s:
k.append(dm[:, z])
pass
k = np.array(k).T
parents_count = 1
parents_val = np.array([k[0, :]])
nijk[i, j, parents_count - 1] = 1
for it_sample in range(1, row_size):
is_new = True
i = dm[it_sample, x]
j = dm[it_sample, y]
tcomp = parents_val[:parents_count, :] == k[it_sample, :]
for it_parents in range(parents_count):
if np.all(tcomp[it_parents, :]):
nijk[i, j, it_parents] += 1
is_new = False
break
pass
if is_new is True:
parents_count += 1
parents_val = np.r_[parents_val, [k[it_sample, :]]]
nnijk = np.zeros((levels[x], levels[y], parents_count))
for p in range(parents_count - 1):
nnijk[:, :, p] = nijk[:, :, p]
pass
nnijk[i, j, parents_count - 1] = 1
nijk = nnijk
pass
pass
nik = np.ndarray((levels[x], parents_count))
njk = np.ndarray((levels[y], parents_count))
for k_index in range(parents_count):
nik[:, k_index] = nijk[:, :, k_index].sum(axis = 1)
njk[:, k_index] = nijk[:, :, k_index].sum(axis = 0)
pass
nk = njk.sum(axis = 0)
tlog = np.zeros((levels[x], levels[y], parents_count))
tlog.fill(np.nan)
for k in range(parents_count):
tx = np.array([nik[:, k]]).T
ty = np.array([njk[:, k]])
tdijk = tx.dot(ty)
tlog[:, :, k] = nijk[:, :, k] * nk[k] / tdijk
pass
pass
log_tlog = np.log(tlog)
G2 = np.nansum(2 * nijk * log_tlog)
if dof == 0:
p_val = 1
else:
p_val = chi2.sf(G2, dof)
if s_size == 0:
nijk = nijk.reshape((nijk.shape[0], nijk.shape[1], 1))
log_tlog = log_tlog.reshape((log_tlog.shape[0], log_tlog.shape[1], 1))
return G2, p_val, nijk, log_tlog
| 5,346,786 |
def start(name):
"""
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
"""
cmd = "/usr/sbin/svcadm enable -s -t {0}".format(name)
retcode = __salt__["cmd.retcode"](cmd, python_shell=False)
if not retcode:
return True
if retcode == 3:
# Return code 3 means there was a problem with the service
# A common case is being in the 'maintenance' state
# Attempt a clear and try one more time
clear_cmd = "/usr/sbin/svcadm clear {0}".format(name)
__salt__["cmd.retcode"](clear_cmd, python_shell=False)
return not __salt__["cmd.retcode"](cmd, python_shell=False)
return False
| 5,346,787 |
def line_coloring(num_vertices) -> Dict:
"""
Creates an edge coloring of the line graph, corresponding to the optimal
line swap strategy, given as a dictionary where the keys
correspond to the different colors and the values are lists of edges (where edges
are specified as tuples). The graph coloring consists of one color for all even-numbered
edges and one color for all odd-numbered edges.
Args:
num_vertices: The number of vertices in the line graph
Returns:
Graph coloring as a dictionary of edge lists
"""
line_coloring = {}
for i in range(num_vertices - 1):
line_coloring[(i, i + 1)] = i % 2
line_coloring[(i + 1, i)] = i % 2
return line_coloring
| 5,346,788 |
def flatten_outputs(predictions, number_of_classes):
"""Flatten the prediction batch except the prediction dimensions"""
logits_permuted = predictions.permute(0, 2, 3, 1)
logits_permuted_cont = logits_permuted.contiguous()
outputs_flatten = logits_permuted_cont.view(-1, number_of_classes)
return outputs_flatten
# outputs_flatten = torch.tensor(predictions
| 5,346,789 |
def pytest_sessionstart(session):
"""
Called after the Session object has been created and
before performing collection and entering the run test loop.
"""
"session start"
if session.config.getoption("--cache-requests"):
requests_cache.install_cache(
os.path.join(tempfile.gettempdir(), "requests.cache"),
backend="sqlite",
allowable_methods=("GET", "POST"),
)
ensure_example_script()
urllib3.disable_warnings()
if IS_WINDOWS:
if sys.version_info >= (3, 8):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
| 5,346,790 |
def get_r_vals(cell_obj):
"""Get radial distances for inner and outer membranes for the cell object"""
r_i = cell_obj.coords.calc_rc(cell_obj.data.data_dict['storm_inner']['x'],
cell_obj.data.data_dict['storm_inner']['y'])
r_o = cell_obj.coords.calc_rc(cell_obj.data.data_dict['storm_outer']['x'],
cell_obj.data.data_dict['storm_outer']['y'])
return r_i, r_o
| 5,346,791 |
def test_patch_accessor(api_rf, km_user_accessor_factory):
"""
Sending a PATCH request to the view should update the accessor with
the given ID.
"""
accessor = km_user_accessor_factory(is_admin=False)
data = {"is_admin": True}
api_rf.user = accessor.km_user.user
request = api_rf.patch("/", data)
response = accessor_detail_view(request, pk=accessor.pk)
assert response.status_code == status.HTTP_200_OK
accessor.refresh_from_db()
assert accessor.is_admin == data["is_admin"]
| 5,346,792 |
def get_all_migrations(ctxt, inactive=0):
"""Get all non-deleted source hypervisors.
Pass true as argument if you want deleted sources returned also.
"""
return db.migration_get_all(ctxt, inactive)
| 5,346,793 |
def extract(input_data: str) -> tuple:
"""take input data and return the appropriate data structure"""
rules = input_data.split('\n')
graph = dict()
reverse_graph = dict()
for rule in rules:
container, contents = rule.split('contain')
container = ' '.join(container.split()[:2])
content_graph = dict()
for content in contents.split(','):
if content == " no other bags.":
break
parts = content.split()
amount = int(parts[0])
color = ' '.join(parts[1:3])
content_graph[color] = amount
if color in reverse_graph.keys():
reverse_graph[color].append(container)
else:
reverse_graph[color] = [container]
graph[container] = content_graph
return (graph, reverse_graph)
| 5,346,794 |
def test_send_control_c_windows():
"""Test send control c on Windows."""
process = Popen( # nosec
["timeout" if platform.system() == "Windows" else "sleep", "5"]
)
time.sleep(0.001)
pid = process.pid
with patch("aea.helpers.base.signal") as mock_signal:
mock_signal.CTRL_C_EVENT = "mock"
with patch("platform.system", return_value="Windows"):
with patch("os.kill") as mock_kill:
send_control_c(process)
mock_kill.assert_called_with(pid, mock_signal.CTRL_C_EVENT)
| 5,346,795 |
def get_total_indemnity(date_of_joining, to_date):
"""To Calculate the total Indemnity of an employee based on employee's Joining date.
Args:
date_of_joining ([date]): Employee's Joining Date
to_date ([data]): up until date
Returns:
total_allocation: Total Indemnity Allocation calculated from joining date till 'to_date'.
"""
#get no. of year and days employee has worked.
total_working_year = relativedelta(to_date, date_of_joining ).years
total_working_days = (to_date - date_of_joining).days
#reason: Any no. of days after completing 5 years as different calculation.
five_year_in_days = 5*365
# up until 5 years of working year, the monthly calculation takes "15 days" salary in to consideration.
if total_working_year < 5 or (total_working_year == 5 and total_working_days == 5*365):
#15 days salary is divided over a year and that becomes each day's allocation.
return 15 / 365 * total_working_days
elif total_working_year >= 5 and total_working_days > 5*365:
#calculation takes 15 days salary for 5 years and 30 days salary after 5 years
return (15 / 365 * five_year_in_days) + (30 / 365 * (total_working_days-five_year_in_days))
| 5,346,796 |
def run_species_phylogeny_iqtree(roary_folder, collection_dir, threads=8, overwrite=False, timing_log=None):
"""
Run iqtree to create phylogeny tree from core gene alignment. If the list of samples has
not changed, and none of the samples has changed, the existing tree will be kept unless
overwrite is set to True
Parameters
----------
report: object
A report object
collection_dir: str
working directory of the collection
threads: int
number of threads to use
overwrite: bool
whether to overwrite existing result even if input did not change
timing_log: str
file to log timing
Returns
report object
-------
"""
phylogeny_folder = os.path.join(collection_dir, 'phylogeny')
if not os.path.exists(phylogeny_folder):
os.makedirs(phylogeny_folder)
#report['phylogeny'] = phylogeny_folder
phylogeny_file = os.path.join(phylogeny_folder, 'core_gene_alignment.treefile')
if os.path.isfile(phylogeny_file) and (not overwrite):
logger.info('phylogeny tree exists and input has not changed, skip phylogeny analysis')
return phylogeny_folder
aln_file = os.path.join(phylogeny_folder, 'core_gene_alignment.aln.gz')
if not os.path.isfile(aln_file):
aln_file = os.path.join(report['roary'], 'core_gene_alignment.aln.gz')
cmd = 'iqtree -s {alignment} --prefix {prefix} -B 1000 -T {threads} -czb -keep-ident'.format(
alignment=aln_file, prefix=phylogeny_folder+'/core_gene_alignment', threads=threads)
ret = run_command(cmd, timing_log)
if ret != 0:
raise Exception('iqtree fail to create phylogeny tree from core gene alignment!')
return phylogeny_folder
| 5,346,797 |
def api_2_gamma_oil(value):
"""
converts density in API(American Petroleum Institute gravity) to gamma_oil (oil relative density by water)
:param value: density in API(American Petroleum Institute gravity)
:return: oil relative density by water
"""
return (value + 131.5) / 141.5
| 5,346,798 |
def compute_metrics(pred, label):
"""Compute metrics like True/False Positive, True/False Negative.`
MUST HAVE ONLY 2 CLASSES: BACKGROUND, OBJECT.
Args:
pred (numpy.ndarray): Prediction, one-hot encoded. Shape: [2, H, W], dtype: uint8
label (numpy.ndarray): Ground Truth, one-hot encoded. Shape: [H, W], dtype: uint8
Returns:
float: IOU, TP, TN, FP, FN
"""
if len(pred.shape) > 3:
raise ValueError("pred should have shape [2, H, W], got: {}".format(pred.shape))
if len(label.shape) > 2:
raise ValueError("label should have shape [H, W], got: {}".format(label.shape))
total_pixels = pred.shape[0] * pred.shape[1]
tp = np.sum(np.logical_and(pred == 1, label > 0))
tn = np.sum(np.logical_and(pred == 0, label == 0))
fp = np.sum(np.logical_and(pred == 1, label == 0))
fn = np.sum(np.logical_and(pred == 0, label > 0))
if (tp + tn + fp + fn) != total_pixels:
raise ValueError('The number of total pixels ({}) and sum of tp,fp,tn,fn ({}) is not equal'.format(
total_pixels, (tp + tn + fp + fn)))
iou = tp / (tp + fp + fn)
_tp = tp / np.sum(label == 1)
tp_rate = (tp / (tp + fn)) * 100
fp_rate = (fp / (fp + tn)) * 100
tn_rate = (tn / (tn + fp)) * 100
fn_rate = (fn / (fn + tp)) * 100
return iou, tp_rate, tn_rate, fp_rate, fn_rate
| 5,346,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.