content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def batch_process_image_to_spots(dax_filename, sel_channels,
save_filename,
data_type, region_ids,
ref_filename,
load_file_lock=None,
warp_image=True, correction_args={},
save_image=True, empty_value=0,
fov_savefile_lock=None,
overwrite_image=False,
drift_args={}, save_drift=True,
drift_filename=None,
drift_file_lock=None,
overwrite_drift=False,
fit_spots=True,
fit_in_mask=False,
fitting_args={},
save_spots=True,
spot_file_lock=None,
overwrite_spot=False,
verbose=False):
"""run by multi-processing to batch process images to spots
Inputs:
Outputs:
_spots: fitted spots for this image
"""
## check inputs
# dax_filename
if not os.path.isfile(dax_filename):
raise IOError(f"Dax file: {dax_filename} is not a file, exit!")
if not isinstance(dax_filename, str) or dax_filename[-4:] != '.dax':
raise IOError(f"Dax file: {dax_filename} has wrong data type, exit!")
# selected channels
sel_channels = [str(ch) for ch in sel_channels]
if verbose:
print(f"+ batch process image: {dax_filename} for channels:{sel_channels}")
# save filename
if not os.path.isfile(save_filename):
raise IOError(f"HDF5 file: {save_filename} is not a file, exit!")
if not isinstance(save_filename, str) or save_filename[-5:] != '.hdf5':
raise IOError(f"HDF5 file: {save_filename} has wrong data type, exit!")
# ref_Filename
if isinstance(ref_filename, str):
if not os.path.isfile(ref_filename):
raise IOError(f"Dax file: {ref_filename} is not a file, exit!")
elif ref_filename[-4:] != '.dax':
raise IOError(f"Dax file: {ref_filename} has wrong data type, exit!")
elif isinstance(ref_filename, np.ndarray):
pass
else:
raise TypeError(f"ref_filename should be np.ndarray or string of path, but {type(ref_filename)} is given")
# region ids
if len(region_ids) != len(sel_channels):
raise ValueError(f"Wrong input region_ids:{region_ids}, should of same length as sel_channels:{sel_channels}.")
region_ids = [int(_id) for _id in region_ids] # convert to ints
# judge if images exist
# initiate lock
if 'fov_savefile_lock' in locals() and fov_savefile_lock is not None:
fov_savefile_lock.acquire()
_ims, _warp_flags, _drifts = load_image_from_fov_file(save_filename,
data_type, region_ids,
load_drift=True,
verbose=verbose)
# release lock
if 'fov_savefile_lock' in locals() and fov_savefile_lock is not None:
fov_savefile_lock.release()
# determine which image should be processed
# initialize processing images and channels
_process_flags = []
_process_sel_channels = []
# initialzie carried over images and channels
_carryover_ims = []
_carryover_sel_channels = []
for _im, _flg, _drift, _rid, _ch in zip(_ims, _warp_flags, _drifts, region_ids, sel_channels):
# if decided to overwrite image or overwrite drift, proceed
if overwrite_image or overwrite_drift:
_process_flags.append(True)
_process_sel_channels.append(_ch)
else:
if (_im != empty_value).any() and _flg-1 == int(warp_image): # and (_drift!= empty_value).any() # remove this drift requirement, because it could be zero
# image exist, no need to process from beginning
_process_flags.append(False)
_carryover_ims.append(_im.copy() )
_carryover_sel_channels.append(_ch)
else:
_process_flags.append(True)
_process_sel_channels.append(_ch)
# release RAM
del(_ims)
# convert this processed drifts
_process_drift = list(set([tuple(_dft) for _dft in _drifts]))
# one unique non-zero drift exist, directly use it
if len(_process_drift) == 1 and np.array(_process_drift[0]).any() and not overwrite_drift:
_process_drift = np.array(_process_drift[0])
_corr_drift = False
# no drift
else:
_process_drift = np.zeros(len(_process_drift[0]))
_corr_drift = True
## if any image to be processed:
if np.sum(_process_flags) > 0:
if verbose:
print(f"-- {_process_sel_channels} images are required to process, {_carryover_sel_channels} images are loaded from save file: {save_filename}")
## correct images
if warp_image:
_processed_ims, _drift = correct_fov_image(dax_filename,
_process_sel_channels,
load_file_lock=load_file_lock,
calculate_drift=_corr_drift,
drift=_process_drift,
ref_filename=ref_filename,
warp_image=warp_image,
return_drift=True, verbose=verbose,
**correction_args, **drift_args)
else:
_processed_ims, _processed_warp_funcs, _drift = correct_fov_image(
dax_filename,
_process_sel_channels,
load_file_lock=load_file_lock,
calculate_drift=_corr_drift,
drift=_process_drift,
ref_filename=ref_filename,
warp_image=warp_image,
return_drift=True, verbose=verbose,
**correction_args, **drift_args)
# nothing processed, create empty list
else:
_processed_ims = []
if not warp_image:
_processed_warp_funcs = []
_drift = np.array(_process_drift) # use old drift
## merge processed and carryover images
_sel_ims = []
for _ch, _flg in zip(sel_channels, _process_flags):
if not _flg:
_sel_ims.append(_carryover_ims.pop(0))
else:
_sel_ims.append(_processed_ims.pop(0))
if not warp_image:
_warp_funcs = []
for _ch, _flg in zip(sel_channels, _process_flags):
if not _flg:
from ..correction_tools.chromatic import generate_chromatic_function
_warp_funcs.append(
generate_chromatic_function(correction_args['chromatic_profile'][str(_ch)], _drift)
)
else:
_warp_funcs.append(
_processed_warp_funcs.pop(0)
)
## save image if specified
if save_image:
# initiate lock
if 'fov_savefile_lock' in locals() and fov_savefile_lock is not None:
fov_savefile_lock.acquire()
# run saving
_save_img_success = save_image_to_fov_file(
save_filename, _sel_ims, data_type, region_ids,
warp_image, _drift, overwrite_image, verbose)
# release lock
if 'fov_savefile_lock' in locals() and fov_savefile_lock is not None:
fov_savefile_lock.release()
## save drift if specified
if save_drift:
# judge if drift correction is required
if drift_filename is None:
drift_folder = os.path.join(os.path.dirname(os.path.dirname(dax_filename)),
'Analysis', 'drift')
if not os.path.exists(drift_folder):
print(f'* Create drift folder: {drift_folder}')
os.makedirs(drift_folder)
drift_filename = os.path.join(drift_folder,
os.path.basename(dax_filename).replace('.dax', '_current_cor.pkl'))
_key = os.path.join(os.path.basename(os.path.dirname(dax_filename)),
os.path.basename(dax_filename))
# initiate lock
if 'drift_file_lock' in locals() and drift_file_lock is not None:
drift_file_lock.acquire()
# run saving
_save_drift_success = save_drift_to_file(drift_filename,
dax_filename, _drift,
overwrite_drift, verbose)
# release lock
if 'drift_file_lock' in locals() and drift_file_lock is not None:
drift_file_lock.release()
## multi-fitting
if fit_spots:
# check fit_in_mask
if fit_in_mask:
if 'seed_mask' not in fitting_args or fitting_args['seed_mask'] is None:
raise KeyError(f"seed_mask should be given if fit_in_mask specified")
# translate this mask according to drift
if verbose:
print(f"-- start traslating seed_mask by drift: {_drift}", end=' ')
_translate_start = time.time()
_shifted_mask = ndimage.shift(fitting_args['seed_mask'],
-_drift,
mode='constant',
cval=0)
fitting_args['seed_mask'] = _shifted_mask
if verbose:
print(f"-- in {time.time()-_translate_start:.2f}s.")
_translate_start = time.time()
_raw_spot_list = []
_spot_list = []
for _ich, (_im, _ch) in enumerate(zip(_sel_ims, sel_channels)):
_raw_spots = fit_fov_image(
_im, _ch, verbose=verbose,
**fitting_args,
)
if not warp_image:
# update spot coordinates given warp functions, if image was not warpped.
_func = _warp_funcs[_ich]
_spots = _func(_raw_spots)
#print(f"type: {type(_spots)} for {dax_filename}, region {region_ids[_ich]} channel {_ch}, {_func}")
else:
_spots = _raw_spots.copy()
# append
_spot_list.append(_spots)
_raw_spot_list.append(_raw_spots)
## save fitted_spots if specified
if save_spots:
# initiate lock
if spot_file_lock is not None:
spot_file_lock.acquire()
# run saving
_save_spt_success = save_spots_to_fov_file(
save_filename, _spot_list, data_type, region_ids,
raw_spot_list=_raw_spot_list,
overwrite=overwrite_spot, verbose=verbose)
# release lock
if spot_file_lock is not None:
spot_file_lock.release()
else:
_spot_list = np.array([])
return
| 5,348,500 |
def upload(target='local'):
""" Release to a given pypi server ('local' by default). """
sysmsg("Uploading to pypi server \033[33m{}".format(target))
local('python setup.py sdist register -r "{}"'.format(target))
local('python setup.py sdist upload -r "{}"'.format(target))
| 5,348,501 |
def timer(jarvis, s):
"""
Set a timer
R Reset
SPACE Pause
Q Quit
Usages:
timer 10
timer 1h5m30s
"""
k = s.split(' ', 1)
if k[0] == '':
jarvis.say("Please specify duration")
return
timer_cmd = "python -m termdown " + k[0]
system(timer_cmd)
| 5,348,502 |
def inner_xml(xml_text):
"""
Get the inner xml of an element.
>>> inner_xml('<div>This is some <i><b>really</b> silly</i> text!</div>')
u'This is some <i><b>really</b> silly</i> text!'
"""
return unicode(INNER_XML_RE.match(xml_text).groupdict()['body'])
| 5,348,503 |
def store_tags():
"""Routing: Stores the (updated) tag data for the image."""
data = {
"id": request.form.get("id"),
"tag": request.form.get('tags'),
"SHOWN": 0
}
loader.store(data)
next_image = loader.next_data()
if next_image is None:
return redirect("/finished")
target = "/"
if next_image:
target = f"/?image_id={next_image['id']}"
return redirect(location=target)
| 5,348,504 |
def getAssets(public_key: str) -> list:
"""
Get all the balances an account has.
"""
balances = server.accounts().account_id(public_key).call()['balances']
balances_to_return = [ {"asset_code": elem.get("asset_code"), "issuer": elem.get("asset_issuer"), "balance": elem.get("balance")} for elem in balances ]
balances_to_return[-1]["asset_code"] = "XLM"
return balances_to_return
| 5,348,505 |
def parse_pattern(format_string, env, wrapper=lambda x, y: y):
""" Parse the format_string and return prepared data according to the env.
Pick each field found in the format_string from the env(ironment), apply
the wrapper on each data and return a mapping between field-to-replace and
values for each.
"""
formatter = Formatter()
fields = [x[1] for x in formatter.parse(format_string) if x[1] is not None]
prepared_env = {}
# Create a prepared environment with only used fields, all as list:
for field in fields:
# Search for a movie attribute for each alternative field separated
# by a pipe sign:
for field_alt in (x.strip() for x in field.split('|')):
# Handle default values (enclosed by quotes):
if field_alt[0] in '\'"' and field_alt[-1] in '\'"':
field_values = field_alt[1:-1]
else:
field_values = env.get(field_alt)
if field_values is not None:
break
else:
field_values = []
if not isinstance(field_values, list):
field_values = [field_values]
prepared_env[field] = wrapper(field_alt, field_values)
return prepared_env
| 5,348,506 |
def test_minimize_score_with_worsened_symptom(integration_db):
"""
Minimize ingredients contained in Recipe[0] are:
saury fish: 10 (ref. 10)
cabbage: 200 (ref. 30|200 )
fish: 30 (ref: 20 )
this time the formula would be:
saury-fish (ignored) - (60 * 1.5) - (30 * 1.5)
"""
from datetime import datetime
integration_db.patient_symptoms.insert_one({
'symptom_id': TAGS[0]['tag_id'],
'patient_id': PATIENT['_id'],
'created_at': datetime(2019, 10, 15),
'updated_at': datetime(2019, 10, 15),
'symptoms_scale': 7
})
assert_equal_objects(
MinimizedScore(RECIPES[0]['_id'], PATIENT['_id']).worsen_ingredients,
['saury fish', 'cabbage', 'fish', 'komatsuna', 'pak choi']
)
assert MinimizedScore(RECIPES[0]['_id'], PATIENT['_id']).value == -135
| 5,348,507 |
def u1_series_summation(xarg, a, kmax):
"""
5.3.2 ROUTINE - U1 Series Summation
PLATE 5-10 (p32)
:param xarg:
:param a:
:param kmax:
:return: u1
"""
du1 = 0.25*xarg
u1 = du1
f7 = -a*du1**2
k = 3
while k < kmax:
du1 = f7*du1 / (k*(k-1))
u1old = u1
u1 = u1+du1
if u1 == u1old:
break
k = k+2
return u1
| 5,348,508 |
def mask_iou(masks_a, masks_b, iscrowd=False):
"""
Computes the pariwise mask IoU between two sets of masks of size [a, h, w] and [b, h, w].
The output is of size [a, b].
Wait I thought this was "box_utils", why am I putting this in here?
"""
masks_a = masks_a.view(masks_a.size(0), -1)
masks_b = masks_b.view(masks_b.size(0), -1)
matmul = nn.MatMul()
intersection = matmul(masks_a, masks_b.T)
mask_iou_sum = P.ReduceSum()
expand_dims = P.ExpandDims()
area_a = expand_dims(mask_iou_sum(masks_a, 1), 1)
area_b = expand_dims(mask_iou_sum(masks_b, 1), 0)
return intersection / (area_a + area_b - intersection) if not iscrowd else intersection / area_a
| 5,348,509 |
def async_worker_handler(event: Dict[str, Any], _: Any):
"""Process the tickets"""
_logger.info(event)
job_id = event.get('job_id')
try:
db_table.get(job_id)
tickets = inventory_parser.from_tsv(storage.get(job_id))
total_value = sum([ticket.value for ticket in tickets])
db_table.put({
'job_id': job_id,
'status': STATUSES.SUCCEEDED,
'total_value': total_value
})
except (S3StorageError, DynamoDBError, InvalidInventoryDataFormatError) as e:
_logger.error(e)
db_table.put({
'job_id': job_id,
'status': STATUSES.FAILED
})
raise AsyncWorkerError(f'Unable to proceed job with "job_id":{job_id}')
| 5,348,510 |
def normalized_grid_coords(height, width, aspect=True, device="cuda"):
"""Return the normalized [-1, 1] grid coordinates given height and width.
Args:
height (int) : height of the grid.
width (int) : width of the grid.
aspect (bool) : if True, use the aspect ratio to scale the coordinates, in which case the
coords will not be normalzied to [-1, 1]. (Default: True)
device : the device the tensors will be created on.
"""
aspect_ratio = width/height if aspect else 1.0
window_x = torch.linspace(-1, 1, steps=width, device=device) * aspect_ratio
window_y = torch.linspace(1, -1, steps=height, device=device)
coord = torch.stack(torch.meshgrid(window_x, window_y, indexing='ij')).permute(2,1,0)
return coord
| 5,348,511 |
def test_copy_object_modified_since(log_entry):
"""Test copy_object() with modified since condition."""
# Get a unique bucket_name and object_name
bucket_name = _gen_bucket_name()
object_name = "{0}".format(uuid4())
object_source = object_name + "-source"
object_copy = object_name + "-copy"
log_entry["args"] = {
"bucket_name": bucket_name,
"object_source": object_source,
"object_name": object_copy,
}
try:
_CLIENT.make_bucket(bucket_name)
# Upload a streaming object of 1 KiB
size = 1 * KB
reader = LimitedRandomReader(size)
_CLIENT.put_object(bucket_name, object_source, reader, size)
# Set up the 'modified_since' copy condition
copy_conditions = CopyConditions()
mod_since = datetime(2014, 4, 1, tzinfo=utc)
copy_conditions.set_modified_since(mod_since)
log_entry["args"]["conditions"] = {
'set_modified_since': mod_since.strftime('%c')}
# Perform a server side copy of an object
# and expect the copy to complete successfully
_CLIENT.copy_object(bucket_name, object_copy,
'/' + bucket_name + '/' + object_source,
copy_conditions)
finally:
_CLIENT.remove_object(bucket_name, object_source)
_CLIENT.remove_object(bucket_name, object_copy)
_CLIENT.remove_bucket(bucket_name)
| 5,348,512 |
def _verify_env_variables():
"""
Verifies that required env variable(s) exist and valid.
:return: None
"""
if os.getenv('remote_repo_path') is None and os.getenv('local_repo_path') is None:
complain('One of: remote_repo_path or local_repo_path is required. Aborting.')
if os.getenv('action') not in actions:
complain('\'action\' should be one of: \'build\', \'deploy\'')
required_vars = ['action', 'branch', 'build_from', 's3_bucket', 's3_bucket_prefix', 'default_stack_name']
missing_vars = []
for var in required_vars:
if os.getenv(var) is None:
missing_vars.append(var)
if len(missing_vars) > 0:
complain('Required env variable(s): {} not found. Aborting.'.format(missing_vars))
| 5,348,513 |
def ray_map(task: Task, *item_lists: Iterable[List[Any]], log_dir: Optional[Path] = None) -> List[Any]:
"""
Initialize ray, align item lists and map each item of a list of arguments to a callable and executes in parallel.
:param task: callable to be run
:param item_lists: items to be parallelized
:param log_dir: directory to store worker logs
:return: list of outputs
"""
try:
results = _ray_map_items(task, *item_lists, log_dir=log_dir)
return results
except (RayTaskError, Exception) as exc:
ray.shutdown()
traceback.print_exc()
raise RuntimeError(exc)
| 5,348,514 |
def consensus_kmeans(data=None,
k=0,
linkage='average',
nensemble=100,
kmin=None,
kmax=None):
"""Perform clustering based on an ensemble of k-means partitions.
Parameters
----------
data : array
An m by n array of m data samples in an n-dimensional space.
k : int, optional
Number of clusters to extract; if 0 uses the life-time criterion.
linkage : str, optional
Linkage criterion for final partition extraction; one of 'average',
'centroid', 'complete', 'median', 'single', 'ward', or 'weighted'.
nensemble : int, optional
Number of partitions in the ensemble.
kmin : int, optional
Minimum k for the k-means partitions; defaults to :math:`\\sqrt{m}/2`.
kmax : int, optional
Maximum k for the k-means partitions; defaults to :math:`\\sqrt{m}`.
Returns
-------
clusters : dict
Dictionary with the sample indices (rows from 'data') for each found
cluster; outliers have key -1; clusters are assigned integer keys
starting at 0.
"""
# check inputs
if data is None:
raise TypeError("Please specify input data.")
N = len(data)
if kmin is None:
kmin = int(round(np.sqrt(N) / 2.))
if kmax is None:
kmax = int(round(np.sqrt(N)))
# initialization grid
grid = {
'k': np.random.random_integers(low=kmin, high=kmax, size=nensemble)
}
# run consensus
clusters, = consensus(data=data,
k=k,
linkage=linkage,
fcn=kmeans,
grid=grid)
return utils.ReturnTuple((clusters,), ('clusters',))
| 5,348,515 |
def save_api_dataset(process_df, raw_df, path, query_type, param_class,
data_period):
"""Save processed datasets at regular monthly intervals.
Args:
process_df (pandas DataFrame):
An SDFS formatted dataset for query data returned by the API
service.
raw_df (pandas DataFrame):
A dataset containing unmodified data returned by the API service.
path (str):
The project path where the ``/data/reference_data`` subdirectory
is housed. Data are saved at the refrence data subdirectory
structure within this parent directory.
query_type (str):
The name of the API service used to retreieve query data.
param_class (sstr):
A term for sorting the parameter into one of three environmental
parameter classifications, either ‘PM’ for particulate matter
pollutants, ‘Gases’ for gaseous pollutants, or ‘Met’ for
meteorological environmental parameters.
data_period (list):
A list of length 2, containing the start date for the monthly
period (index position 0), and end date (index position 1). Each
element is a string with date format 'YYYYMMDD'.
Returns:
None.
"""
# Use the site name and AQS ID to name subfolder containing
# site data
try:
site_name = process_df['Site_Name'].mode()[0]
site_list = site_name.title().split(None)
site_name = '_'.join(site_list)
except KeyError:
site_name = 'Unspecified_Site_Name'
try:
site_aqs = process_df['Site_AQS'].mode()[0]
site_aqs = site_aqs.replace('-', '').replace(' ', '')
except KeyError:
site_aqs = 'Unspecified_Site_ID'
folder = '{0}_{1}'.format(site_name, site_aqs)
data_path = os.path.join(path,
'data',
'reference_data',
query_type.lower())
process_path = os.path.join(data_path, 'processed', folder)
raw_path = os.path.join(data_path, 'raw', folder)
if not os.path.exists(process_path):
os.makedirs(process_path)
if not os.path.exists(raw_path):
os.makedirs(raw_path)
year_month = pd.to_datetime(data_period[0]).strftime('%Y%m')
filename = f'H_{year_month}_{param_class}.csv'
process_df.to_csv(os.path.join(process_path, filename))
raw_df.to_csv(os.path.join(raw_path, filename))
| 5,348,516 |
def to_cftime(date, calendar="gregorian"):
"""Convert datetime object to cftime object.
Parameters
----------
date : datetime object
Datetime object.
calendar : str
Calendar of the cftime object.
Returns
-------
cftime : cftime object
Cftime ojbect.
"""
if type(date) == dt.date:
date = dt.datetime.combine(date, dt.time())
elif isinstance(date, cfdt.datetime):
# do nothing
return date
return cfdt.datetime(
date.year,
date.month,
date.day,
date.hour,
date.minute,
date.second,
date.microsecond,
calendar=calendar,
)
| 5,348,517 |
def _test_cross_zernikes(testj=4, nterms=10, npix=500):
"""Verify the functions are orthogonal, by taking the
integrals of a given Zernike times N other ones.
Parameters :
--------------
testj : int
Index of the Zernike polynomial to test against the others
nterms : int
Test that polynomial against those from 1 to this N
npix : int
Size of array to use for this test
"""
zj = zernike.zernike1(testj, npix=npix)
assert np.sum(np.isfinite(zj)) > 0, "Zernike calculation failure; all NaNs."
zbasis = zernike.zernike_basis(nterms=nterms, npix=npix)
for idx, z in enumerate(zbasis):
j = idx + 1
if j == testj or j == 1:
continue # discard piston term and self
prod = z * zj
wg = np.where(np.isfinite(prod))
cross_sum = np.abs(prod[wg].sum())
assert cross_sum < 1e-9, (
"orthogonality failure, Sum[Zernike(j={}) * Zernike(j={})] = {} (> 1e-9)".format(
j, testj, cross_sum)
)
| 5,348,518 |
def poly_to_mask(mask_shape, vertices):
"""Converts a polygon to a boolean mask with `True` for points
lying inside the shape. Uses the bounding box of the vertices to reduce
computation time.
Parameters
----------
mask_shape : np.ndarray | tuple
1x2 array of shape of mask to be generated.
vertices : np.ndarray
Nx2 array of the vertices of the polygon.
Returns
-------
mask : np.ndarray
Boolean array with `True` for points inside the polygon
"""
return polygon2mask(mask_shape, vertices)
| 5,348,519 |
def get_nn_edges(
basis_vectors,
extent,
site_offsets,
pbc,
distance_atol,
order,
):
"""For :code:`order == k`, generates all edges between up to :math:`k`-nearest
neighbor sites (measured by their Euclidean distance). Edges are colored by length
with colors between 0 and `order - 1` in order of increasing length."""
positions, ids = create_padded_sites(
basis_vectors, extent, site_offsets, pbc, order
)
naive_edges_by_order = get_naive_edges(
positions,
order * np.linalg.norm(basis_vectors, axis=1).max() + distance_atol,
order,
)
colored_edges = []
for k, naive_edges in enumerate(naive_edges_by_order):
true_edges = set()
for node1, node2 in naive_edges:
# switch to real node indices
node1 = ids[node1]
node2 = ids[node2]
if node1 == node2:
raise RuntimeError(
f"Lattice contains self-referential edge {(node1, node2)} of order {k}"
)
elif node1 > node2:
node1, node2 = node2, node1
true_edges.add((node1, node2))
for edge in true_edges:
colored_edges.append((*edge, k))
return colored_edges
| 5,348,520 |
def expand(vevent, default_tz, href=''):
"""
:param vevent: vevent to be expanded
:type vevent: icalendar.cal.Event
:param default_tz: the default timezone used when we (icalendar)
don't understand the embedded timezone
:type default_tz: pytz.timezone
:param href: the href of the vevent, used for more informative logging
:type href: str
:returns: list of start and end (date)times of the expanded event
:rtyped list(tuple(datetime, datetime))
"""
# we do this now and than never care about the "real" end time again
if 'DURATION' in vevent:
duration = vevent['DURATION'].dt
else:
duration = vevent['DTEND'].dt - vevent['DTSTART'].dt
# dateutil.rrule converts everything to datetime
allday = not isinstance(vevent['DTSTART'].dt, datetime)
# icalendar did not understand the defined timezone
if (not allday and 'TZID' in vevent['DTSTART'].params and
vevent['DTSTART'].dt.tzinfo is None):
vevent['DTSTART'].dt = default_tz.localize(vevent['DTSTART'].dt)
if 'RRULE' not in vevent.keys():
return [(vevent['DTSTART'].dt, vevent['DTSTART'].dt + duration)]
events_tz = None
if getattr(vevent['DTSTART'].dt, 'tzinfo', False):
events_tz = vevent['DTSTART'].dt.tzinfo
vevent['DTSTART'].dt = vevent['DTSTART'].dt.astimezone(pytz.UTC)
rrulestr = vevent['RRULE'].to_ical()
rrule = dateutil.rrule.rrulestr(rrulestr, dtstart=vevent['DTSTART'].dt)
if not set(['UNTIL', 'COUNT']).intersection(vevent['RRULE'].keys()):
# rrule really doesn't like to calculate all recurrences until
# eternity, so we only do it 15years into the future
dtstart = vevent['DTSTART'].dt
if isinstance(dtstart, date):
dtstart = datetime(*list(dtstart.timetuple())[:-3])
rrule._until = dtstart + timedelta(days=15 * 365)
if ((not getattr(rrule._until, 'tzinfo', True)) and
(getattr(vevent['DTSTART'].dt, 'tzinfo', False))):
rrule._until = vevent['DTSTART'].dt.tzinfo \
.localize(rrule._until)
logger.debug('calculating recurrence dates for {0}, '
'this might take some time.'.format(href))
dtstartl = list(rrule)
if len(dtstartl) == 0:
raise UnsupportedRecursion
if events_tz is not None:
dtstartl = [start.astimezone(events_tz) for start in dtstartl]
elif allday:
dtstartl = [start.date() for start in dtstartl]
dtstartend = [(start, start + duration) for start in dtstartl]
return dtstartend
| 5,348,521 |
def fsapi(session, stream, env, args):
"""Handle FS API requests.
Args:
string of the form <imsi>|<True if for dest_imsi (default is False)>
Subscriber State can be:
active (unblocked), -active (blocked),first_expired (validity expired)
"""
args = args.split('|')
imsi = args[0]
dest_imsi = False
if len(args) > 1:
dest_imsi = True
if len(imsi) < 4: # Toll Free Numbers don't have imsis
subscriber_state = 'active'
else:
subscriber_state = str(
subscriber.status().get_account_status(imsi)).lower()
else:
subscriber_state = str(
subscriber.status().get_account_status(imsi)).lower()
try:
account_status = False
if not dest_imsi:
if 'active' == subscriber_state:
account_status = True
else:
# incoming number status
allowed_states = ['active', 'active*',
'first_expired', 'first_expired*']
if subscriber_state in allowed_states:
account_status = True
except SubscriberNotFound:
account_status = False
consoleLog('info', "Returned FSAPI: " + str(account_status) + "\n")
stream.write(str(account_status))
| 5,348,522 |
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent state
transitions for a biased random walk, in which the rate of downward
motion is greater than the rate in the other three directions.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain or dissolved heavy particle).
The states and transitions are as follows:
Pair state Transition to Process Rate
========== ============= ======= ====
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left motion 1.0
2 (1-0) 1 (0-1) right motion 1.0
3 (1-1) (none) - -
4 (0/0) (none) - -
5 (0/1) 6 (1/0) down motion 1.1
6 (1/0) 5 (0/1) up motion 0.9
7 (1/1) (none) - -
"""
xn_list = []
xn_list.append( Transition((0,1,0), (1,0,0), 1., 'left motion') )
xn_list.append( Transition((1,0,0), (0,1,0), 1., 'right motion') )
xn_list.append( Transition((0,1,1), (1,0,1), 1.1, 'down motion') )
xn_list.append( Transition((1,0,1), (0,1,1), 0.9, 'up motion') )
if _DEBUG:
print()
print('setup_transition_list(): list has',len(xn_list),'transitions:')
for t in xn_list:
print(' From state',t.from_state,'to state',t.to_state,'at rate',t.rate,'called',t.name)
return xn_list
| 5,348,523 |
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. eucledian norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64)
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1.0]))
[1.0]
see: https://github.com/ros/geometry/blob/hydro-devel/tf/src/tf/transformations.py
"""
if out is None:
data = np.array(data, dtype=np.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(np.dot(data, data))
return data
else:
if out is not data:
out[:] = np.array(data, copy=False)
data = out
length = np.atleast_1d(np.sum(data*data, axis))
np.sqrt(length, length)
if axis is not None:
length = np.expand_dims(length, axis)
data /= length
if out is None:
return data
| 5,348,524 |
def negative_f1_score(probs, labels):
"""
Computes the f1 score between output and labels for k classes.
args:
probs (tensor) (size, k)
labels (tensor) (size, 1)
"""
probs = torch.nn.functional.softmax(probs, dim=1)
probs = probs.numpy()
labels = labels.numpy()
pred = np.argmax(probs, axis=1)
return skl.f1_score(labels, pred, pos_label=0)
| 5,348,525 |
def validate_dataset_path(value):
"""Validates the path for input dataset"""
try:
storage_type = get_storage_type(value)
if storage_type == 'local':
dataset_path = Path(value)
if not dataset_path.is_dir():
raise Exception("Directory doesn't exist")
except:
raise ValidationError(
_('Enter a valid storage path!'),
code='invalid'
)
| 5,348,526 |
def search_usb_devices_facets():
"""Facet USB Devices"""
data = {"terms": {"fields": ["status"]}}
usb_url = USB_DEVICES_FACETS.format(HOSTNAME, ORG_KEY)
return requests.post(usb_url, json=data, headers=HEADERS)
| 5,348,527 |
def __casestudy_gen(
ctx: click.Context, project: str, override: bool, version: int,
ignore_blocked: bool, merge_stage: tp.Optional[str], new_stage: bool,
update: bool
) -> None:
"""Generate or extend a CaseStudy Sub commands can be chained to for example
sample revisions but also add the latest."""
ctx.ensure_object(dict)
ctx.obj['project'] = project
ctx.obj['ignore_blocked'] = ignore_blocked
ctx.obj['version'] = version
paper_config = vara_cfg()["paper_config"]["current_config"].value
if not paper_config:
click.echo(
"You need to create a paper config first"
" using vara-pc create"
)
raise click.Abort()
ctx.obj['path'] = Path(
vara_cfg()["paper_config"]["folder"].value
) / (paper_config + f"/{project}_{version}.case_study")
ctx.obj['git_path'] = get_local_project_git_path(project)
if update:
pull_current_branch(ctx.obj['git_path'])
if override or not ctx.obj['path'].exists():
case_study = CaseStudy(ctx.obj['project'], version)
if merge_stage:
case_study.insert_empty_stage(0)
case_study.name_stage(0, merge_stage)
ctx.obj["merge_stage"] = 0
else:
case_study = load_case_study_from_file(ctx.obj['path'])
ctx.obj['custom_stage'] = bool(merge_stage)
if merge_stage:
if new_stage:
stage_index = case_study.num_stages
case_study.insert_empty_stage(stage_index)
case_study.name_stage(stage_index, merge_stage)
else:
stage_index_opt = case_study \
.get_stage_index_by_name(merge_stage)
if not stage_index_opt:
selected_stage = CSStage(merge_stage)
def set_merge_stage(stage: CSStage) -> None:
nonlocal selected_stage
selected_stage = stage
stage_choices = [selected_stage]
stage_choices.extend([
stage for stage in case_study.stages if stage.name
])
cli_list_choice(
f"The given stage({merge_stage}) does not exist,"
f" do you want to create it or select an existing one",
stage_choices, lambda x: x.name
if x.name else "", set_merge_stage
)
if selected_stage.name == merge_stage:
stage_index = case_study.num_stages
case_study.insert_empty_stage(stage_index)
case_study.name_stage(stage_index, selected_stage.name)
else:
stage_index = case_study.stages.index(selected_stage)
else:
stage_index = stage_index_opt
ctx.obj['merge_stage'] = stage_index
else:
if new_stage:
ctx.obj['merge_stage'] = max(case_study.num_stages, 0)
else:
ctx.obj['merge_stage'] = max(case_study.num_stages - 1, 0)
ctx.obj['case_study'] = case_study
| 5,348,528 |
def test_coord_init_representation():
"""
Spherical or Cartesian represenation input coordinates.
"""
coord = SphericalRepresentation(lon=8 * u.deg, lat=5 * u.deg, distance=1 * u.kpc)
sc = SkyCoord(coord, frame='icrs')
assert allclose(sc.ra, coord.lon)
assert allclose(sc.dec, coord.lat)
assert allclose(sc.distance, coord.distance)
with pytest.raises(ValueError) as err:
SkyCoord(coord, frame='icrs', ra='1d')
assert "conflicts with keyword argument 'ra'" in str(err)
coord = CartesianRepresentation(1 * u.one, 2 * u.one, 3 * u.one)
sc = SkyCoord(coord, frame='icrs')
sc_cart = sc.represent_as(CartesianRepresentation)
assert allclose(sc_cart.x, 1.0)
assert allclose(sc_cart.y, 2.0)
assert allclose(sc_cart.z, 3.0)
| 5,348,529 |
def chunked(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i+n]
| 5,348,530 |
def debug(*args):
""" Handy for figuring out what's going on in a template. Usage: {% debug "print" some_var "stuff" %}. """
print(*args)
| 5,348,531 |
def pack4(v):
"""
Takes a 32 bit integer and returns a 4 byte string representing the
number in little endian.
"""
assert 0 <= v <= 0xffffffff
# The < is for little endian, the I is for a 4 byte unsigned int.
# See https://docs.python.org/2/library/struct.html for more info.
return struct.pack('<I', v)
| 5,348,532 |
def index():
"""
"""
category = Category.get_categories()
pitch = Pitch.get_all_pitches()
title = "Welcome to Pitch Hub"
return render_template('index.html', title = title, category = category, pitch =pitch)
| 5,348,533 |
def maximum_sum_increasing_subsequence(numbers, size):
"""
Given an array of n positive integers. Write a program to find the sum of
maximum sum subsequence of the given array such that the integers in the
subsequence are sorted in increasing order.
"""
results = [numbers[i] for i in range(size)]
for i in range(1, size):
for j in range(i):
if numbers[i] > numbers[j] and results[i] < results[j] + numbers[i]:
results[i] = results[j] + numbers[i]
return max(results)
| 5,348,534 |
def lstsqb(a, b):
"""
Return least-squares solution to a = bx.
Similar to MATLAB / operator for rectangular matrices.
If b is invertible then the solution is la.solve(a, b).T
"""
return la.lstsq(b.T, a.T, rcond=None)[0].T
| 5,348,535 |
def test_subset_4D_data_all_argument_permutations(load_esgf_test_data, tmpdir):
"""Tests clisops subset function with:
- no args (collection only)
- time only
- level only
- bbox only
- time + level
- time + bbox
- level + bbox
- time + level + bbox
On completion:
- Check the shape of the response
"""
# Found in file:
# times = ("2015-01-16 12", "MANY MORE", "2024-12-16 12") [120]
# plevs = (100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000,
# 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000, 500, 100) [19]
# lats = (-88.9277353522959, -25.9141861518467, 37.1202943109788) [3]
# lons = (0, 63.28125, 126.5625, 189.84375, 253.125, 316.40625) [6]
# Requested subset
time_input = time_interval("2022-01-01", "2022-06-01")
level_input = level_interval(1000, 1000)
bbox_input = (0.0, -80, 170.0, 65.0)
# Define a set of inputs and the resulting shape expected
test_inputs = [
["coll only", (None, None, None)],
["time only", (time_input, None, None)],
["level only", (None, level_input, None)],
["bbox only", (None, None, bbox_input)],
["time & level", (time_input, level_input, None)],
["time & bbox", (time_input, None, bbox_input)],
["level & bbox", (None, level_input, bbox_input)],
["time, level & bbox", (time_input, level_input, bbox_input)],
]
# Full data shape
initial_shape = [120, 19, 3, 6]
# Test each set of inputs, check the output shape (slice) is correct
for _, inputs in test_inputs:
expected_shape = initial_shape[:]
tm, level, bbox = inputs
if tm:
expected_shape[0] = 5
if level:
expected_shape[1] = 1
if bbox:
expected_shape[2:4] = 2, 3
outputs = subset(
ds=CMIP6_TA,
time=tm,
area=bbox,
level=level,
output_dir=tmpdir,
output_type="xarray",
)
ds = outputs[0]
assert ds.ta.shape == tuple(expected_shape)
| 5,348,536 |
def multivariateGaussian(X, mu, sigma2):
"""
多元高斯分布
:param X:
:param mu:
:param sigma2:
:return:
"""
k = len(mu)
if sigma2.shape[0] > 1:
sigma2 = np.diag(sigma2)
X = X - mu
argu = (2 * np.pi) ** (-k / 2) * np.linalg.det(sigma2) ** (-0.5)
p = argu * np.exp(-0.5 * np.sum(np.dot(X, np.linalg.inv(sigma2)) * X, axis=1))
return p
| 5,348,537 |
def test_open_notebook_in_non_ascii_dir(notebook, qtbot, tmpdir):
"""Test that a notebook can be opened from a non-ascii directory."""
# Move the test file to non-ascii directory
test_notebook = osp.join(LOCATION, 'test.ipynb')
test_notebook_non_ascii = osp.join(str(tmpdir), u'äöüß', 'test.ipynb')
os.mkdir(os.path.join(str(tmpdir), u'äöüß'))
shutil.copyfile(test_notebook, test_notebook_non_ascii)
# Wait for prompt
notebook.open_notebook(filenames=[test_notebook_non_ascii])
nbwidget = notebook.tabwidget.currentWidget().notebookwidget
qtbot.waitUntil(lambda: prompt_present(nbwidget, qtbot),
timeout=NOTEBOOK_UP)
# Assert that the In prompt has "Test" in it
# and the client has the correct name
qtbot.waitUntil(lambda: text_present(nbwidget, qtbot),
timeout=NOTEBOOK_UP)
assert text_present(nbwidget, qtbot)
assert notebook.tabwidget.currentWidget().get_short_name() == "test"
| 5,348,538 |
def get_day(input):
"""
Convert input to a datetime object and extract the Day part
"""
if isinstance(input, str):
input = parse_iso(input)
if isinstance(input, (datetime.date, datetime.datetime)):
return input.day
return None
| 5,348,539 |
def run():
"""Create Conan promote instance and run
Collect user arguments as
"""
promote = ConanPromote()
promote.run(sys.argv[1:])
| 5,348,540 |
def update_organization(instance, language):
"""
Update Elasticsearch indices when an organization was modified and published:
- update the organization document in the Elasticsearch organizations index for the
organization and its direct parent (because the parent ID may change from Parent to Leaf),
- update the course documents in the Elasticsearch courses index for all courses linked to
this organization.
Returns None if the page was related to an organization and the Elasticsearch update is done.
Raises ObjectDoesNotExist if the page instance is not related to an organization.
"""
organization = Organization.objects.get(draft_extension__extended_object=instance)
actions = [
ES_INDICES.courses.get_es_document_for_course(course)
for course in organization.get_courses(language)
if not course.is_snapshot
]
actions.append(
ES_INDICES.organizations.get_es_document_for_organization(organization)
)
# Update the organization's parent only if it exists
try:
parent = organization.extended_object.get_parent_page().organization
except AttributeError:
pass
else:
actions.append(
ES_INDICES.organizations.get_es_document_for_organization(parent)
)
richie_bulk(actions)
| 5,348,541 |
def read_ds(tier, pos_source=None):
"""
Like read_pt above, given a DS tier, return the DepTree object
:param tier:
:type tier: RGTier
"""
# First, assert that the type we're looking at is correct.
assert tier.type == DS_TIER_TYPE
# --1) Root the tree.
root = DepTree.root()
# --2) We will build up a list of edges, then attach the edges to the tree.
edges = []
# --2b) Retrieve the POS tier, if it exists, in advance.
pos_tier = tier.igt.get_pos_tags(tier.attributes.get(DS_DEP_ATTRIBUTE), tag_method=pos_source)
for item in tier:
dep = item.attributes.get(DS_DEP_ATTRIBUTE)
head = item.attributes.get(DS_HEAD_ATTRIBUTE)
# Get the POS tag if it exists
pos = None
if pos_tier:
pos_item = pos_tier.find(alignment=dep)
if pos_item:
pos = pos_item.value()
# Get the word value...
dep_w = tier.igt.find(id=dep)
dep_t = Terminal(dep_w.value(), dep_w.index)
if head is not None:
head_w = tier.igt.find(id=head)
head_t = Terminal(head_w.value(), head_w.index)
else:
head_t = Terminal('ROOT', 0)
e = DepEdge(head=head_t, dep=dep_t, type=item.value(), pos=pos)
edges.append(e)
dt = build_dep_edges(edges)
return dt
| 5,348,542 |
def get_local_ontology_from_file(ontology_file):
""" return ontology class from a local OWL file """
return ow.get_ontology("file://" + ontology_file).load()
| 5,348,543 |
def get_wolframalpha_imagetag(searchterm):
""" Used to get the first image tag from the Wolfram Alpha API. The return value is a dictionary
with keys that can go directly into html.
Takes in:
searchterm: the term to search with in the Wolfram Alpha API
"""
base_url = 'http://api.wolframalpha.com/v2/query?'
app_id = credentials['wolframkey'] # api key
url_params = {'input': searchterm, 'appid': app_id}
headers = {'User-Agent': None}
data = urllib.urlencode(url_params)
req = urllib2.Request(base_url, data, headers)
xml = urllib2.urlopen(req).read()
tree = ET.fromstring(xml)
for e in tree.findall('pod'):
for item in [ef for ef in list(e) if ef.tag == 'subpod']:
for it in [i for i in list(item) if i.tag == 'img']:
if it.tag == 'img':
if float(it.attrib['width']) > 50 and float(it.attrib['height']) > 50:
return it.attrib['src']
| 5,348,544 |
def get_synset_definitions(word):
"""Return all possible definitions for synsets in a word synset ring.
:param word (str): The word to lookup.
:rtype definitions (list): The synset definitions list.
"""
definitions = []
synsets = get_word_synsets(word)
for _synset in synsets:
definitions.append(_synset.definition().split())
return definitions
| 5,348,545 |
def test_edit_write_with_modifications_two_colision():
""" two modifications with same position """
reader = StringIO("{}")
writer = StringIO()
mods = Modifications()
mods.add(0,2, "XX")
mods.add(0,2, "YY")
write_with_modifications(reader, mods, writer)
writer.seek(0)
ret = writer.read()
assert ret == "XX"
| 5,348,546 |
def getResourceDefUsingSession(url, session, resourceName, sensitiveOptions=False):
"""
get the resource definition - given a resource name (and catalog url)
catalog url should stop at port (e.g. not have ldmadmin, ldmcatalog etc...
or have v2 anywhere
since we are using v1 api's
returns rc=200 (valid) & other rc's from the get
resourceDef (json)
"""
print(
"getting resource for catalog:-"
+ url
+ " resource="
+ resourceName
)
apiURL = url + "/access/1/catalog/resources/" + resourceName
if sensitiveOptions:
apiURL += "?sensitiveOptions=true"
# print("\turl=" + apiURL)
header = {"Accept": "application/json"}
tResp = session.get(apiURL, params={}, headers=header, )
print("\tresponse=" + str(tResp.status_code))
if tResp.status_code == 200:
# valid - return the jsom
return tResp.status_code, json.loads(tResp.text)
else:
# not valid
return tResp.status_code, None
| 5,348,547 |
def _merge_sse(sum1, sum2):
"""Merge the partial SSE."""
sum_count = sum1 + sum2
return sum_count
| 5,348,548 |
def earliest_deadline_first(evs, iface):
""" Sort EVs by departure time in increasing order.
Args:
evs (List[EV]): List of EVs to be sorted.
iface (Interface): Interface object. (not used in this case)
Returns:
List[EV]: List of EVs sorted by departure time in increasing order.
"""
return sorted(evs, key=lambda x: x.departure)
| 5,348,549 |
def auto_load(filename):
"""Load any supported raw battery cycler file to the correct Datapath automatically.
Matches raw file patterns to the correct datapath and returns the datapath object.
Example:
auto_load("2017-05-09_test-TC-contact_CH33.csv")
>>> <ArbinDatapath object>
auto_load("PreDiag_000287_000128short.092")
>>> <MaccorDatapath object>
Args:
filename (str, Pathlike): string corresponding to battery cycler file filename.
Returns:
(beep.structure.base.BEEPDatapath): The datapath child class corresponding to this file.
"""
if re.match(ARBIN_CONFIG["file_pattern"], filename) or re.match(FastCharge_CONFIG["file_pattern"], filename):
return ArbinDatapath.from_file(filename)
elif re.match(MACCOR_CONFIG["file_pattern"], filename) or re.match(xTesladiag_CONFIG["file_pattern"], filename):
return MaccorDatapath.from_file(filename)
elif re.match(INDIGO_CONFIG["file_pattern"], filename):
return IndigoDatapath.from_file(filename)
elif re.match(BIOLOGIC_CONFIG["file_pattern"], filename):
return BiologicDatapath.from_file(filename)
elif re.match(NEWARE_CONFIG["file_pattern"], filename):
return NewareDatapath.from_file(filename)
else:
raise ValueError("{} does not match any known file pattern".format(filename))
| 5,348,550 |
def set_stream_color(stream, disabled):
"""
Remember what our original streams were so that we
can colorize them separately, which colorama doesn't
seem to natively support.
"""
original_stdout = sys.stdout
original_stderr = sys.stderr
init(strip=disabled)
if stream != original_stdout:
sys.stdout = original_stdout
sys.stderr = BinaryStreamWrapper(stream, sys.stderr)
if stream != original_stderr:
sys.stderr = original_stderr
sys.stdout = BinaryStreamWrapper(stream, sys.stdout)
| 5,348,551 |
def show_scatter_plot(selected_species_df: pd.DataFrame):
"""
根据选择的某个类别的两个特征画出散点图
"""
st.subheader("Scatter plot")
feature_x = st.selectbox("Which feature on x?", selected_species_df.columns[0:4])
feature_y = st.selectbox("Which feature on y?", selected_species_df.columns[0:4])
fig = px.scatter(selected_species_df, x=feature_x, y=feature_y, color="variety")
st.plotly_chart(fig)
| 5,348,552 |
def print_param_list(param_list, result, decimal_place=2, unit=''):
"""
Return a result string with parameter data appended. The input `param_list` is a list of a tuple
(param_value, param_name), where `param_value` is a float and `param_name` is a string. If `param_value`
is None, it writes 'N/A'.
"""
for param_value, param_name in param_list:
result += '<tr>'
result += r' <td class = "key"><span>{0}</span></td>'.format(param_name)
result += r' <td class="equals">=</td>'
if param_value is None:
result += r' <td class="value">N/A</td>'
else:
param_value = '%.*f' % (decimal_place, param_value)
result += r' <td class="value"><script type="math/tex">{0} \ \mathrm{{ {1!s} }}</script></td>'.format(
param_value, unit)
result += '</tr>\n'
return result
| 5,348,553 |
def test_comments_should_never_be_moved_between_imports_issue_1427():
"""isort should never move comments to different import statement.
See: https://github.com/PyCQA/isort/issues/1427
"""
assert isort.check_code(
"""from package import CONSTANT
from package import * # noqa
""",
force_single_line=True,
show_diff=True,
)
| 5,348,554 |
def get_veh_id(gb_data):
"""
Mapping function for vehicle id
"""
veh_ref = gb_data['Vehicle_Reference']
acc_id = get_acc_id_from_data(gb_data)
veh_id = common.get_gb_veh_id(acc_id, int(veh_ref))
return veh_id
| 5,348,555 |
def linreg_qr_gramschmidt_unencrypted(clientMap, coordinator, encryLv=3, colTrunc=False):
"""
Compute vertical federated linear regression using QR.
QR decomposition is computed by means of Numpy/Scipy builtin algorithm and Gram-Schmidt method.
Parameters
----------
clientMap : List
The list of qrClient objects.
clientInfos : List
The list of machine information of the corresponding qrClient objects.
encryLv : int
The least number of columns the feature matrix of a single client should have to protect its privacy.
colTrunc : bool
Do the column pivoting and truncation or not.
Returns
-------
numpy.array
The computed weights of all the clients. The weights corresponding to the constant term is at the last position.
"""
preprocessing_wo_constaint(clientMap, coordinator.machine_info_client, encryLv, colTrunc)
compute_qr_gramschmidt_unencrypted(clientMap, coordinator.machine_info_client)
apply_q_unencrypted(clientMap, coordinator.machine_info_client)
weights = apply_back_solve_wo_constraint(clientMap, coordinator.machine_info_client)
return weights
| 5,348,556 |
def register_classes():
"""Register these classes with the `LinkFactory` """
AnalyzeExtension.register_class()
AnalyzeExtension_SG.register_class()
| 5,348,557 |
def has_soa_perm(user_level, obj, ctnr, action):
"""
Permissions for SOAs
SOAs are global, related to domains and reverse domains
"""
return {
'cyder_admin': True, #?
'ctnr_admin': action == 'view',
'user': action == 'view',
'guest': action == 'view',
}.get(user_level, False)
| 5,348,558 |
def _move(file, folder, new_name, rel, renamer):
"""Simply rename a file (full path) in a directory (folder)."""
os.rename(file, os.path.join(folder, new_name))
renamer.names[new_name] = rel
| 5,348,559 |
def Count_Tiles(colour_pattern,palette):
"""Count the number of tiles used in each shape and colour.
A report is printed to the console.
"""
mask = np.isnan(colour_pattern[8][0])
shapes_used = np.floor(colour_pattern[8][0][~mask])
colours_used = colour_pattern[8][2][~mask]
for i in range (0,len(palette[:,0])):
colour = palette[i,0]
shape = palette[i,1]
amount = palette[i,2]
count = np.where(colours_used==colour)
if len(count[0]) > amount:
print("The number of tiles of colour {}, shape {} required is {}. Only {} available.".format(colour,shape,len(count[0]),amount))
else:
print("The number of tiles of colour {}, shape {} required is {}/{}.".format(colour,shape,len(count[0]),amount))
| 5,348,560 |
def GetMaxImageMemory():
""" """
pass
| 5,348,561 |
def parse_test(project, path):
"""Compares the dynamic graph to the parsed one."""
inputs, outputs, built_by, graph = parse_graph(project.graph)
fuzzed = sorted([f for f in inputs - outputs if project.filter_in(f)])
count = len(fuzzed)
root = project.buildPath
G = defaultdict(list)
with open(path, 'r') as f:
for line in f.readlines():
src, deps = line.strip().split(':')
src = os.path.normpath(os.path.join(root, src))
for dep in (w.strip() for w in deps.split(', ')):
G[os.path.normpath(os.path.join(root, dep))].append(src)
def traverse_graph(node, viz):
if node in viz:
return viz
for next in G[node]:
viz.add(node)
traverse_graph(next, viz)
return viz
for idx, input in zip(range(count), fuzzed):
print('[{0}/{1}] {2}:'.format(idx + 1, count, input))
expected = graph.find_deps(input) & outputs
actual = traverse_graph(input, set())
if actual != expected:
for f in sorted(actual):
if f not in expected:
print(' +', f)
for f in sorted(expected):
if f not in actual:
print(' -', f)
| 5,348,562 |
def upload_artifact(args: Any, file_path: str, org_id: Any = None) -> Dict[str, Any]:
"""
Upload artifact using Pyxis API
Args:
args (Any): CLI arguments
file_path (str): Path to a artifact file
org_id (Any): organization ID - optional
Returns:
Dict[str, Any]: Pyxis response
"""
upload_url = urljoin(
args.pyxis_url, f"v1/projects/certification/id/{args.cert_project_id}/artifacts"
)
file_name = os.path.basename(file_path)
file_size = os.path.getsize(file_path)
with open(file_path, "rb") as artifact:
content = artifact.read()
base64_content = base64.b64encode(content).decode("utf8")
mime = magic.from_file(file_path, mime=True)
artifact_payload = {
"content": base64_content,
"certification_hash": args.certification_hash,
"content_type": mime,
"filename": file_name,
"file_size": file_size,
"operator_package_name": args.operator_package_name,
"version": args.operator_version,
}
if org_id:
artifact_payload["org_id"] = org_id
return pyxis.post(upload_url, artifact_payload)
| 5,348,563 |
def balance_command(chat, message, args):
"""Show your token balance"""
try:
# push txn
asyncio.get_event_loop().run_until_complete(balance(chat.id, chat))
except EosRpcException as e:
e = str(e).replace("\'", "\"")
code_idx = e.find('code')
code_val = int(e[code_idx+7:(code_idx+14)])
# print(code_idx)
# print(code_val)
# print(type(code_val))
if code_idx != -1: # found "code" key
if code_val == 3010001: # Case-1: invalid name
chat.send("Sorry! Your EOSIO account name doesn\'t exist on this chain.")
elif code_val == 3050003: # Case-1: incorrect quantity or symbol
chat.send("Sorry! Your EOSIO account doesn\'t have any balances corresponding to parsed quantity or symbol on this chain.")
elif code_val == 3080004:
chat.send("Sorry! The contract account \'tippertipper\' doesn\'t have enough CPU to handle this activity on this chain. Please contact the Bot owner {bot.owner}.")
else:
chat.send("Sorry! Some other Exception occured. Please contact the Bot owner {bot.owner}.")
else: # NOT found "code" key
chat.send("Sorry! No code no. is present in the error. Please contact the Bot owner {bot.owner}.")
except EosAccountDoesntExistException:
chat.send(f'Your EOSIO account name doesn\'t exist on this chain.')
except EosAssertMessageException as e:
e = str(e).replace("\'", "\"") # replace single quotes (') with double quotes (") to make it as valid JSON & then extract the 'message' value.
# chat.send(f"{str(e)}", syntax="plain") # print full error dict
chat.send(f"Assertion Error msg --> {json.loads(e)['details'][0]['message']}") # print the message
except EosDeadlineException:
chat.send(f'Transaction timed out. Please try again.')
| 5,348,564 |
def show_absolute(signal, kind, unshuffled=False, unshuffle=False, map_backward=None, vmin=-4, vmax=4):
"""
Plot the absolute values of the given signal matrix.
Parameters
----------
signal : numpy.ndarray, shape=(n_samples, n_features)
True signal matrix.
kind : str, values=('Bias', 'Signal')
Type of absolute value matrix to be shown (used as annotation on plot).
unshuffled : bool
If the input data is unshuffled.
unshuffle : bool
If to unshuffle the input data.
map_backward : dict, value=('feature', 'sample'), values=dict
Map from new annotation to old annotion.
vmin : int
Minimum absolute value on color scale.
vmax : int
Maximum absolute value on color scale.
"""
cmap = sb.diverging_palette(
250, 15, s=75, l=40, as_cmap=True, center="dark")
indices_x = np.arange(signal.shape[0], dtype=int)
indices_y = np.arange(signal.shape[1], dtype=int)
fig = pl.figure(figsize=(7 * (signal.shape[1] / signal.shape[0]), 7))
ax = fig.add_subplot(111)
if unshuffle:
ax.set_title('{} (unshuffled)'.format(kind))
indices_x = np.asarray([map_backward['sample'][i] for i in indices_x])
indices_y = np.asarray([map_backward['feature'][i] for i in indices_y])
signal = signal[indices_x]
signal = signal[:, indices_y]
if unshuffled:
ax.set_title('{} (unshuffled)'.format(kind))
indices_x = np.asarray([map_backward['sample'][i] for i in indices_x])
indices_y = np.asarray([map_backward['feature'][i] for i in indices_y])
else:
ax.set_title('{}'.format(kind))
ax_seaborn = sb.heatmap(signal, vmin=vmin, vmax=vmax, cmap=cmap, ax=ax, cbar_kws={
'shrink': 0.5}, xticklabels=indices_y, yticklabels=indices_x)
ax.tick_params(axis='both', which='both', length=0)
ax.set_xlabel('Features')
ax.set_ylabel('Samples')
| 5,348,565 |
def main(base_dir,
out_dir,
use_interpenetration=True,
n_betas=10,
flength=5000.,
pix_thsh=25.,
use_neutral=False,
viz=True):
"""Set up paths to image and joint data, saves results.
:param base_dir: folder containing LSP images and data
:param out_dir: output folder
:param use_interpenetration: boolean, if True enables the interpenetration term
:param n_betas: number of shape coefficients considered during optimization
:param flength: camera focal length (an estimate)
:param pix_thsh: threshold (in pixel), if the distance between shoulder joints in 2D
is lower than pix_thsh, the body orientation as ambiguous (so a fit is run on both
the estimated one and its flip)
:param use_neutral: boolean, if True enables uses the neutral gender SMPL model
:param viz: boolean, if True enables visualization during optimization
"""
img_dir = join(abspath(base_dir), 'images/lsp')
data_dir = join(abspath(base_dir), 'results/lsp')
if not exists(out_dir):
makedirs(out_dir)
# Render degrees: List of degrees in azimuth to render the final fit.
# Note that rendering many views can take a while.
do_degrees = [0.]
sph_regs = None
if not use_neutral:
_LOGGER.info("Reading genders...")
# File storing information about gender in LSP
with open(join(data_dir, 'lsp_gender.csv')) as f:
genders = f.readlines()
model_female = load_model(MODEL_FEMALE_PATH)
model_male = load_model(MODEL_MALE_PATH)
if use_interpenetration:
sph_regs_male = np.load(SPH_REGS_MALE_PATH)
sph_regs_female = np.load(SPH_REGS_FEMALE_PATH)
else:
gender = 'neutral'
model = load_model(MODEL_NEUTRAL_PATH)
if use_interpenetration:
sph_regs = np.load(SPH_REGS_NEUTRAL_PATH)
# Load joints
est = np.load(join(data_dir, 'est_joints.npz'))['est_joints']
# Load images
img_paths = sorted(glob(join(img_dir, '*[0-9].jpg')))
for ind, img_path in enumerate(img_paths):
out_path = '%s/%04d.pkl' % (out_dir, ind)
if not exists(out_path):
_LOGGER.info('Fitting 3D body on `%s` (saving to `%s`).', img_path,
out_path)
img = cv2.imread(img_path)
if img.ndim == 2:
_LOGGER.warn("The image is grayscale!")
img = np.dstack((img, img, img))
joints = est[:2, :, ind].T
conf = est[2, :, ind]
if not use_neutral:
gender = 'male' if int(genders[ind]) == 0 else 'female'
if gender == 'female':
model = model_female
if use_interpenetration:
sph_regs = sph_regs_female
elif gender == 'male':
model = model_male
if use_interpenetration:
sph_regs = sph_regs_male
params, vis = run_single_fit(
img,
joints,
conf,
model,
regs=sph_regs,
n_betas=n_betas,
flength=flength,
pix_thsh=pix_thsh,
scale_factor=2,
viz=viz,
do_degrees=do_degrees)
if viz:
import matplotlib.pyplot as plt
plt.ion()
plt.show()
plt.subplot(121)
plt.imshow(img[:, :, ::-1])
if do_degrees is not None:
for di, deg in enumerate(do_degrees):
plt.subplot(122)
plt.cla()
plt.imshow(vis[di])
plt.draw()
plt.title('%d deg' % deg)
plt.pause(1)
raw_input('Press any key to continue...')
with open(out_path, 'w') as outf:
pickle.dump(params, outf)
# This only saves the first rendering.
if do_degrees is not None:
cv2.imwrite(out_path.replace('.pkl', '.png'), vis[0])
| 5,348,566 |
def check_skyscrapers(input_path: str) -> bool:
"""
Main function to check the status of skyscraper game board.
Return True if the board status is compliant with the rules,
False otherwise.
"""
board = read_input(input_path)
return check_not_finished_board(board) and check_uniqueness_in_rows(board) and \
check_horizontal_visibility(board) and check_columns(board)
| 5,348,567 |
def test_invalid_tri():
"""Invalid triangle yields false"""
check50.run("./is_valid_tri").stdin("4").stdin("2").stdin("7").stdout("false\n", "false\n").exit(0)
| 5,348,568 |
def test_pidfile_is_absolute_path():
"""Test that the pidfile is converted to an absolute path."""
pidfile = "~/test.pid"
user = getpass.getuser()
m = simple.SimplePidManager(pidfile=pidfile)
assert "~" not in m.pidfile
assert m.pidfile == "{0}/test.pid".format(os.path.expanduser("~"))
assert user in m.pidfile
| 5,348,569 |
async def get_station(station: avwx.Station, token: Optional[Token]) -> dict:
"""Log and returns station data as dict"""
await app.station.add(station.lookup_code, "station")
return await station_data_for(station, token=token) or {}
| 5,348,570 |
def add_check_numerics_ops():
"""Connect a `check_numerics` to every floating point tensor.
`check_numerics` operations themselves are added for each `half`, `float`,
or `double` tensor in the graph. For all ops in the graph, the
`check_numerics` op for all of its (`half`, `float`, or `double`) inputs
is guaranteed to run before the `check_numerics` op on any of its outputs.
Note: This API is not compatible with the use of `tf.cond` or
`tf.while_loop`, and will raise a `ValueError` if you attempt to call it
in such a graph.
Returns:
A `group` op depending on all `check_numerics` ops added.
Raises:
ValueError: If the graph contains any numeric operations in a control flow
structure.
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Not compatible with eager execution. To check for `Inf`s and `NaN`s under
eager execution, call tfe.seterr(inf_or_nan='raise') once before executing
the checked operations.
@enc_compatibility
"""
if context.executing_eagerly():
raise RuntimeError(
"add_check_numerics_ops() is not compatible with eager execution. "
"To check for Inf's and NaN's under eager execution, call "
"tfe.seterr(inf_or_nan='raise') once before executing the "
"checked operations.")
check_op = []
# This code relies on the ordering of ops in get_operations().
# The producer of a tensor always comes before that tensor's consumer in
# this list. This is true because get_operations() returns ops in the order
# added, and an op can only be added after its inputs are added.
for op in ops.get_default_graph().get_operations():
for output in op.outputs:
if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
if op._get_control_flow_context() is not None: # pylint: disable=protected-access
raise ValueError("`tf.add_check_numerics_ops() is not compatible "
"with TensorFlow control flow operations such as "
"`tf.cond()` or `tf.while_loop()`.")
message = op.name + ":" + str(output.value_index)
with ops.control_dependencies(check_op):
check_op = [array_ops.check_numerics(output, message=message)]
return control_flow_ops.group(*check_op)
| 5,348,571 |
def get_text(part):
"""Gmailの本文をdecode"""
if not part['filename'] and \
part['body']['size'] > 0 and \
'data' in part['body'].keys():
content_type = header(part['headers'], 'Content-Type')
encode_type = header(part['headers'], 'Content-Transfer-Encoding')
data = decode_data(content_type, encode_type, part['filename'], part['body']['data'])
if data["data_type"]=="text":
return data['data']
return ''
| 5,348,572 |
def clear():
"""
Clear PlaceOrder screen variables and assign new values.
"""
global price_thread1, price_thread2, price_thread3, price_thread4,\
lsize_thread1, lsize_thread2, lsize_thread3, lsize_thread4
ls.set(-1)
cp.set(-1)
st1.set(0)
st2.set(0)
st3.set(0)
st4.set(0)
# instrument.set("")
instu.set("")
lots.set(1)
expiry.set("")
Bid_label['text'] = "None"
Ask_label['text'] = "None"
inable_all()
price_thread1 = 0
price_thread2 = 0
price_thread3 = 0
price_thread4 = 0
lsize_thread1 = 0
lsize_thread2 = 0
lsize_thread3 = 0
lsize_thread4 = 0
| 5,348,573 |
def serve_forever(host, port):
"""
Start mail services.
:param host: Host
:param port: Port
"""
print("Starting mail-in/out on {}:{}".format(host, port))
inbox.serve(address=host, port=port)
| 5,348,574 |
def test_create_run_action_with_missing_id(
decoy: Decoy,
run_store: RunStore,
unique_id: str,
current_time: datetime,
client: TestClient,
) -> None:
"""It should 404 if the run ID does not exist."""
not_found_error = RunNotFoundError(run_id="run-id")
decoy.when(run_store.get(run_id="run-id")).then_raise(not_found_error)
response = client.post(
"/runs/run-id/actions",
json={"data": {"actionType": "play"}},
)
verify_response(
response,
expected_status=404,
expected_errors=RunNotFound(detail=str(not_found_error)),
)
| 5,348,575 |
def run(ex: "interactivity.Execution"):
"""Specify the target function(s) and/or layer(s) to target."""
selection: "definitions.Selection" = ex.shell.selection
is_exact = ex.args.get("exact", False)
functions = ex.args.get("functions", False)
layers = ex.args.get("layers", False)
both = not functions and not layers
names = _get_names(ex)
if both and names == ["*"]:
status = "ALL"
message = "Selection has been cleared. All items are now selected."
ex.shell.selection = dataclasses.replace(
selection,
function_needles=["*"],
layer_needles=["*"],
bundle_all=True,
)
elif is_exact:
status = "EXACT"
message = "Exact selection has been applied."
ex.shell.selection = _update_exact_selection(
names=names,
functions=functions,
layers=layers,
selection=selection,
)
else:
status = "MATCH"
message = "Matching items have been selected."
ex.shell.selection = _update_fuzzy_selection(
names=names,
functions=functions,
layers=layers,
selection=selection,
)
targets = ex.shell.context.get_selected_targets(ex.shell.selection)
return ex.finalize(
status=status,
message=message,
echo=True,
info={
"functions": _to_names(targets.function_targets),
"layers": _to_names(targets.layer_targets),
},
)
| 5,348,576 |
def main():
"""Shows basic usage of the Apps Script API.
Creates a Apps Script API service object and uses it to call an
Apps Script function to print out a list of folders in the user's root
directory.
"""
SCRIPT_ID = 'ENTER_YOUR_SCRIPT_ID_HERE'
# Authorize and create a service object.
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('script', 'v1', http=http)
# Create an execution request object.
request = {"function": "getFoldersUnderRoot"}
try:
# Make the API request.
response = service.scripts().run(body=request,
scriptId=SCRIPT_ID).execute()
if 'error' in response:
# The API executed, but the script returned an error.
# Extract the first (and only) set of error details. The values of
# this object are the script's 'errorMessage' and 'errorType', and
# an list of stack trace elements.
error = response['error']['details'][0]
print("Script error message: {0}".format(error['errorMessage']))
if 'scriptStackTraceElements' in error:
# There may not be a stacktrace if the script didn't start
# executing.
print("Script error stacktrace:")
for trace in error['scriptStackTraceElements']:
print("\t{0}: {1}".format(trace['function'],
trace['lineNumber']))
else:
# The structure of the result depends upon what the Apps Script
# function returns. Here, the function returns an Apps Script Object
# with String keys and values, and so the result is treated as a
# Python dictionary (folderSet).
folderSet = response['response'].get('result', {})
if not folderSet:
print('No folders returned!')
else:
print('Folders under your root folder:')
for (folderId, folder) in folderSet.iteritems():
print("\t{0} ({1})".format(folder, folderId))
except errors.HttpError as e:
# The API encountered a problem before the script started executing.
print(e.content)
| 5,348,577 |
def get_mixture_mse_accuracy(output_dim, num_mixes):
"""Construct an MSE accuracy function for the MDN layer
that takes one sample and compares to the true value."""
# Construct a loss function with the right number of mixtures and outputs
def mse_func(y_true, y_pred):
# Reshape inputs in case this is used in a TimeDistribued layer
y_pred = tf.reshape(y_pred, [-1, (2 * num_mixes * output_dim) + num_mixes], name='reshape_ypreds')
y_true = tf.reshape(y_true, [-1, output_dim], name='reshape_ytrue')
out_mu, out_sigma, out_pi = tf.split(y_pred, num_or_size_splits=[num_mixes * output_dim,
num_mixes * output_dim,
num_mixes],
axis=1, name='mdn_coef_split')
cat = tfd.Categorical(logits=out_pi)
component_splits = [output_dim] * num_mixes
mus = tf.split(out_mu, num_or_size_splits=component_splits, axis=1)
sigs = tf.split(out_sigma, num_or_size_splits=component_splits, axis=1)
coll = [tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale
in zip(mus, sigs)]
mixture = tfd.Mixture(cat=cat, components=coll)
samp = mixture.sample()
mse = tf.reduce_mean(tf.square(samp - y_true), axis=-1)
# Todo: temperature adjustment for sampling functon.
return mse
# Actually return the loss_func
with tf.name_scope('MDNLayer'):
return mse_func
| 5,348,578 |
def ByName(breakdown_metric_name):
"""Return a BreakdownMetric class by name."""
breakdown_mapping = {
'distance': ByDistance,
'num_points': ByNumPoints,
'rotation': ByRotation,
'difficulty': ByDifficulty
}
if breakdown_metric_name not in breakdown_mapping:
raise ValueError('Invalid breakdown name: %s, valid names are %s' %
(breakdown_metric_name, list(breakdown_mapping.keys())))
return breakdown_mapping[breakdown_metric_name]
| 5,348,579 |
def test_ap_wpa_psk_ext_eapol(dev, apdev):
"""WPA2-PSK AP using external EAPOL supplicant"""
(bssid,ssid,hapd,snonce,pmk,addr,wpae) = eapol_test(apdev[0], dev[0],
wpa2=False)
msg = recv_eapol(hapd)
anonce = msg['rsn_key_nonce']
logger.info("Replay same data back")
send_eapol(hapd, addr, build_eapol(msg))
logger.info("Too short data")
send_eapol(hapd, addr, build_eapol(msg)[0:98])
(ptk, kck, kek) = pmk_to_ptk(pmk, addr, bssid, snonce, anonce)
msg['descr_type'] = 2
reply_eapol("2/4(invalid type)", hapd, addr, msg, 0x010a, snonce, wpae, kck)
msg['descr_type'] = 254
reply_eapol("2/4", hapd, addr, msg, 0x010a, snonce, wpae, kck)
msg = recv_eapol(hapd)
if anonce != msg['rsn_key_nonce']:
raise Exception("ANonce changed")
logger.info("Replay same data back")
send_eapol(hapd, addr, build_eapol(msg))
reply_eapol("4/4", hapd, addr, msg, 0x030a, None, None, kck)
hapd_connected(hapd)
| 5,348,580 |
def deserialize_structure(serialized_structure, dtype=np.int32):
"""Converts a string to a structure.
Args:
serialized_structure: A structure produced by `serialize_structure`.
dtype: The data type of the output numpy array.
Returns:
A numpy array with `dtype`.
"""
return np.asarray(
[token for token in serialized_structure.split(domains.SEP_TOKEN)],
dtype=dtype)
| 5,348,581 |
def get_all_text_elements(dataset_name: str) -> List[TextElement]:
"""
get all the text elements of the given dataset
:param dataset_name:
"""
return data_access.get_all_text_elements(dataset_name=dataset_name)
| 5,348,582 |
def form_x(form_file,*args):
"""
same as above, except assumes all tags in the form are number, and uses the additional arguments in *args to fill out those tag values.
:param form_file: file which we use for replacements
:param *args: optional arguments which contain the form entries for the file in question, by number.
"""
form_dict = {}
count = 0
for arg in args:
count += 1
form_dict[str(count)] = str(arg)
return form(form_file,form_dict)
| 5,348,583 |
def init():
"""Manage IAM users."""
formatter = cli.make_formatter('aws_user')
@click.group()
def user():
"""Manage IAM users."""
pass
@user.command()
@click.option('--create',
is_flag=True,
default=False,
help='Create if it does not exist')
@click.option('--path',
default='/',
help='Path for user name.')
@click.option('--inline-policy',
type=cli.LIST,
required=False,
help='Inline user policy name:file')
@click.option('--attached-policy',
type=cli.LIST,
required=False,
help='global:PolicyName or local:PolicyName')
@click.option('--attached-policy',
type=cli.LIST,
required=False,
help='global:PolicyName or local:PolicyName')
@click.argument('user-name',
required=True,
callback=aws_cli.sanitize_user_name)
@cli.admin.ON_EXCEPTIONS
def configure(create,
path,
inline_policy,
attached_policy,
user_name):
"""Create/configure/get IAM user."""
iam_conn = awscontext.GLOBAL.iam
try:
user = iamclient.get_user(iam_conn, user_name)
except exc.NotFoundError:
if not create:
raise
user = None
if not user:
user = iamclient.create_user(iam_conn, user_name, path)
if inline_policy:
_set_user_policy(iam_conn, user_name, inline_policy)
if attached_policy:
_set_attached_policy(iam_conn, user_name, attached_policy)
user['UserPolicies'] = iamclient.list_user_policies(iam_conn,
user_name)
user['AttachedPolicies'] = iamclient.list_attached_user_policies(
iam_conn,
user_name)
cli.out(formatter(user))
@user.command(name='list')
@cli.admin.ON_EXCEPTIONS
@click.option('--path',
default='/',
help='Path for user name.')
def list_users(path):
"""List IAM users.
"""
iam_conn = awscontext.GLOBAL.iam
users = iamclient.list_users(iam_conn, path)
cli.out(formatter(users))
@user.command()
@click.option('--force',
is_flag=True,
default=False,
help='Delete user, even is user has policies attached.')
@click.argument('user-name')
@cli.admin.ON_EXCEPTIONS
def delete(force, user_name):
"""Delete IAM user."""
iam_conn = awscontext.GLOBAL.iam
if force:
user_policies = iamclient.list_user_policies(iam_conn,
user_name)
for policy in user_policies:
_LOGGER.info('deleting inline policy: %s', policy)
iamclient.delete_user_policy(iam_conn, user_name, policy)
attached_pols = iamclient.list_attached_user_policies(iam_conn,
user_name)
for policy in attached_pols:
_LOGGER.info('detaching policy: %s', policy['PolicyArn'])
iamclient.detach_user_policy(iam_conn,
user_name,
policy['PolicyArn'])
groups = iamclient.list_groups_for_user(iam_conn,
user_name)
for group in groups:
_LOGGER.info('removing user from group: %s', group)
iamclient.remove_user_from_group(iam_conn,
user_name,
group)
try:
iamclient.delete_user(iam_conn=iam_conn, user_name=user_name)
except iam_conn.exceptions.DeleteConflictException:
raise click.UsageError('User [%s] has inline or attached '
'policies, or is a member of one or '
'more group, use --force to force '
'delete.' % user_name)
del configure
del list_users
del delete
return user
| 5,348,584 |
def fix_units(dims):
"""Fill in missing units."""
default = [d.get("units") for d in dims][-1]
for dim in dims:
dim["units"] = dim.get("units", default)
return dims
| 5,348,585 |
def annotate_movement(raw, pos, rotation_velocity_limit=None,
translation_velocity_limit=None,
mean_distance_limit=None, use_dev_head_trans='average'):
"""Detect segments with movement.
Detects segments periods further from rotation_velocity_limit,
translation_velocity_limit and mean_distance_limit. It returns an
annotation with the bad segments.
Parameters
----------
raw : instance of Raw
Data to compute head position.
pos : array, shape (N, 10)
The position and quaternion parameters from cHPI fitting. Obtained
with `mne.chpi` functions.
rotation_velocity_limit : float
Head rotation velocity limit in radians per second.
translation_velocity_limit : float
Head translation velocity limit in radians per second.
mean_distance_limit : float
Head position limit from mean recording in meters.
use_dev_head_trans : 'average' (default) | 'info'
Identify the device to head transform used to define the
fixed HPI locations for computing moving distances.
If ``average`` the average device to head transform is
computed using ``compute_average_dev_head_t``.
If ``info``, ``raw.info['dev_head_t']`` is used.
Returns
-------
annot : mne.Annotations
Periods with head motion.
hpi_disp : array
Head position over time with respect to the mean head pos.
See Also
--------
compute_average_dev_head_t
"""
sfreq = raw.info['sfreq']
hp_ts = pos[:, 0].copy() - raw.first_time
dt = np.diff(hp_ts)
hp_ts = np.concatenate([hp_ts, [hp_ts[-1] + 1. / sfreq]])
orig_time = raw.info['meas_date']
annot = Annotations([], [], [], orig_time=orig_time)
# Annotate based on rotational velocity
t_tot = raw.times[-1]
if rotation_velocity_limit is not None:
assert rotation_velocity_limit > 0
# Rotational velocity (radians / sec)
r = _angle_between_quats(pos[:-1, 1:4], pos[1:, 1:4])
r /= dt
bad_mask = (r >= np.deg2rad(rotation_velocity_limit))
onsets, offsets = _mask_to_onsets_offsets(bad_mask)
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(u'Omitting %5.1f%% (%3d segments): '
u'ω >= %5.1f°/s (max: %0.1f°/s)'
% (bad_pct, len(onsets), rotation_velocity_limit,
np.rad2deg(r.max())))
annot += _annotations_from_mask(
hp_ts, bad_mask, 'BAD_mov_rotat_vel', orig_time=orig_time)
# Annotate based on translational velocity limit
if translation_velocity_limit is not None:
assert translation_velocity_limit > 0
v = np.linalg.norm(np.diff(pos[:, 4:7], axis=0), axis=-1)
v /= dt
bad_mask = (v >= translation_velocity_limit)
onsets, offsets = _mask_to_onsets_offsets(bad_mask)
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(u'Omitting %5.1f%% (%3d segments): '
u'v >= %5.4fm/s (max: %5.4fm/s)'
% (bad_pct, len(onsets), translation_velocity_limit,
v.max()))
annot += _annotations_from_mask(
hp_ts, bad_mask, 'BAD_mov_trans_vel', orig_time=orig_time)
# Annotate based on displacement from mean head position
disp = []
if mean_distance_limit is not None:
assert mean_distance_limit > 0
# compute dev to head transform for fixed points
use_dev_head_trans = use_dev_head_trans.lower()
if use_dev_head_trans not in ['average', 'info']:
raise ValueError('use_dev_head_trans must be either' +
' \'average\' or \'info\': got \'%s\''
% (use_dev_head_trans,))
if use_dev_head_trans == 'average':
fixed_dev_head_t = compute_average_dev_head_t(raw, pos)
elif use_dev_head_trans == 'info':
fixed_dev_head_t = raw.info['dev_head_t']
# Get static head pos from file, used to convert quat to cartesian
chpi_pos = sorted([d for d in raw.info['hpi_results'][-1]
['dig_points']], key=lambda x: x['ident'])
chpi_pos = np.array([d['r'] for d in chpi_pos])
# Get head pos changes during recording
chpi_pos_mov = np.array([apply_trans(_quat_to_affine(quat), chpi_pos)
for quat in pos[:, 1:7]])
# get fixed position
chpi_pos_fix = apply_trans(fixed_dev_head_t, chpi_pos)
# get movement displacement from mean pos
hpi_disp = chpi_pos_mov - np.tile(chpi_pos_fix, (pos.shape[0], 1, 1))
# get positions above threshold distance
disp = np.sqrt((hpi_disp ** 2).sum(axis=2))
bad_mask = np.any(disp > mean_distance_limit, axis=1)
onsets, offsets = _mask_to_onsets_offsets(bad_mask)
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(u'Omitting %5.1f%% (%3d segments): '
u'disp >= %5.4fm (max: %5.4fm)'
% (bad_pct, len(onsets), mean_distance_limit, disp.max()))
annot += _annotations_from_mask(
hp_ts, bad_mask, 'BAD_mov_dist', orig_time=orig_time)
_adjust_onset_meas_date(annot, raw)
return annot, disp
| 5,348,586 |
def set_weight_send_next(request, responder):
"""
When the user provides their weight, save the answer and move to the next question.
"""
process_answer_with_entity(request, responder, pd.Q_WEIGHT)
| 5,348,587 |
def run_in_executor(
func: F,
executor: ThreadPoolExecutor = None,
args: Any = (),
kwargs: Any = MappingProxyType({}),
) -> Future:
"""将耗时函数加入到线程池 ."""
loop = get_event_loop()
# noinspection PyTypeChecker
return loop.run_in_executor( # type: ignore
executor, context_partial(func, *args, **kwargs),
)
| 5,348,588 |
def validate_params(canvas_size, border_width):
"""validate_params(canvas_size, border_width) -> None
Assert that canvas and border size are both non-negative ints,
canvas size is not zero, and canvas size is larger than border"""
assert_is_int('size', canvas_size)
assert_is_int('border', border_width)
if canvas_size == 0:
raise InterpreterFailureException('Invalid size: cannot be 0\n')
if border_width > canvas_size:
raise InterpreterFailureException(
'Invalid border %dpx: cannot be bigger than size (%dpx)\n'
% (border_width, canvas_size)
)
| 5,348,589 |
def reset_slave(server):
"""Function: reset_slave
Description: Clear replication configuration in a slave.
Arguments:
(input) server -> Server instance.
"""
# Semantic change in MySQL 8.0.22
slave = "replica" if server.version >= (8, 0, 22) else "slave"
server.cmd_sql("reset " + slave + " all")
| 5,348,590 |
def find_entry_with_minimal_scale_at_prime(self, p):
"""
Finds the entry of the quadratic form with minimal scale at the
prime p, preferring diagonal entries in case of a tie. (I.e. If
we write the quadratic form as a symmetric matrix M, then this
entry M[i,j] has the minimal valuation at the prime p.)
Note: This answer is independent of the kind of matrix (Gram or
Hessian) associated to the form.
INPUT:
`p` -- a prime number > 0
OUTPUT:
a pair of integers >= 0
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 2, [6, 2, 20]); Q
Quadratic form in 2 variables over Integer Ring with coefficients:
[ 6 2 ]
[ * 20 ]
sage: Q.find_entry_with_minimal_scale_at_prime(2)
(0, 1)
sage: Q.find_entry_with_minimal_scale_at_prime(3)
(1, 1)
sage: Q.find_entry_with_minimal_scale_at_prime(5)
(0, 0)
"""
n = self.dim()
min_val = Infinity
ij_index = None
val_2 = valuation(2, p)
for d in range(n): ## d = difference j-i
for e in range(n - d): ## e is the length of the diagonal with value d.
## Compute the valuation of the entry
if d == 0:
tmp_val = valuation(self[e, e+d], p)
else:
tmp_val = valuation(self[e, e+d], p) - val_2
## Check if it's any smaller than what we have
if tmp_val < min_val:
ij_index = (e,e+d)
min_val = tmp_val
## Return the result
return ij_index
| 5,348,591 |
def _gcs_get(url: str, temp_filename: str) -> None:
"""Pull a file directly from GCS."""
blob = _get_gcs_blob(url)
blob.download_to_filename(temp_filename)
| 5,348,592 |
def from_arrow(array, highlevel=True, behavior=None):
"""
Args:
array (`pyarrow.Array`, `pyarrow.ChunkedArray`, `pyarrow.RecordBatch`,
or `pyarrow.Table`): Apache Arrow array to convert into an
Awkward Array.
highlevel (bool): If True, return an #ak.Array; otherwise, return
a low-level #ak.layout.Content subclass.
behavior (None or dict): Custom #ak.behavior for the output array, if
high-level.
"""
import awkward._v2._connect.pyarrow
out = awkward._v2._connect.pyarrow.handle_arrow(array, pass_empty_field=True)
return ak._v2._util.wrap(out, behavior, highlevel)
| 5,348,593 |
def _basis_search(equiv_lib, source_basis, target_basis, heuristic):
"""Search for a set of transformations from source_basis to target_basis.
Args:
equiv_lib (EquivalenceLibrary): Source of valid translations
source_basis (Set[Tuple[gate_name: str, gate_num_qubits: int]]): Starting basis.
target_basis (Set[gate_name: str]): Target basis.
heuristic (Callable[[source_basis, target_basis], int]): distance heuristic.
Returns:
Optional[List[Tuple[gate, equiv_params, equiv_circuit]]]: List of (gate,
equiv_params, equiv_circuit) tuples tuples which, if applied in order
will map from source_basis to target_basis. Returns None if no path
was found.
"""
source_basis = frozenset(source_basis)
target_basis = frozenset(target_basis)
open_set = set() # Bases found but not yet inspected.
closed_set = set() # Bases found and inspected.
# Priority queue for inspection order of open_set. Contains Tuple[priority, count, basis]
open_heap = []
# Map from bases in closed_set to predecessor with lowest cost_from_source.
# Values are Tuple[prev_basis, gate_name, params, circuit].
came_from = {}
basis_count = iter_count() # Used to break ties in priority.
open_set.add(source_basis)
heappush(open_heap, (0, next(basis_count), source_basis))
# Map from basis to lowest found cost from source.
cost_from_source = defaultdict(lambda: np.inf)
cost_from_source[source_basis] = 0
# Map from basis to cost_from_source + heuristic.
est_total_cost = defaultdict(lambda: np.inf)
est_total_cost[source_basis] = heuristic(source_basis, target_basis)
logger.debug('Begining basis search from %s to %s.',
source_basis, target_basis)
while open_set:
_, _, current_basis = heappop(open_heap)
if current_basis in closed_set:
# When we close a node, we don't remove it from the heap,
# so skip here.
continue
if {gate_name for gate_name, gate_num_qubits in current_basis}.issubset(target_basis):
# Found target basis. Construct transform path.
rtn = []
last_basis = current_basis
while last_basis != source_basis:
prev_basis, gate_name, gate_num_qubits, params, equiv = came_from[last_basis]
rtn.append((gate_name, gate_num_qubits, params, equiv))
last_basis = prev_basis
rtn.reverse()
logger.debug('Transformation path:')
for gate_name, gate_num_qubits, params, equiv in rtn:
logger.debug('%s/%s => %s\n%s', gate_name, gate_num_qubits, params, equiv)
return rtn
logger.debug('Inspecting basis %s.', current_basis)
open_set.remove(current_basis)
closed_set.add(current_basis)
for gate_name, gate_num_qubits in current_basis:
equivs = equiv_lib._get_equivalences((gate_name, gate_num_qubits))
basis_remain = current_basis - {(gate_name, gate_num_qubits)}
neighbors = [
(frozenset(basis_remain | {(inst.name, inst.num_qubits)
for inst, qargs, cargs in equiv.data}),
params,
equiv)
for params, equiv in equivs]
# Weight total path length of transformation weakly.
tentative_cost_from_source = cost_from_source[current_basis] + 1e-3
for neighbor, params, equiv in neighbors:
if neighbor in closed_set:
continue
if tentative_cost_from_source >= cost_from_source[neighbor]:
continue
open_set.add(neighbor)
came_from[neighbor] = (current_basis, gate_name, gate_num_qubits, params, equiv)
cost_from_source[neighbor] = tentative_cost_from_source
est_total_cost[neighbor] = tentative_cost_from_source \
+ heuristic(neighbor, target_basis)
heappush(open_heap, (est_total_cost[neighbor],
next(basis_count),
neighbor))
return None
| 5,348,594 |
def Get_EstimatedRedshifts( scenario={} ):
""" obtain estimated source redshifts written to npy file """
return np.genfromtxt( FilenameEstimatedRedshift( scenario ), dtype=None, delimiter=',', names=True, encoding='UTF-8')
| 5,348,595 |
def request_init(c, options, server, request, task):
"""`RequestManager` callback to initialise URL of the connection."""
print server + urllib.quote(request)
c.setopt(pycurl.URL, server + urllib.quote(request))
| 5,348,596 |
def get_national_museums(db_connection, export_to_csv, export_path):
"""
Get national museum data from DB
"""
df = pd.read_sql('select * from optourism.state_national_museum_visits', con=db_connection)
if export_to_csv:
df.to_csv(f"{export_path}_nationalmuseums_raw.csv", index=False)
return df
| 5,348,597 |
def new_database(uri):
"""Drop the database at ``uri`` and create a brand new one."""
destroy_database(uri)
create_database(uri)
| 5,348,598 |
def hrm_configure_pr_group_membership():
"""
Configures the labels and CRUD Strings of pr_group_membership
"""
T = current.T
s3db = current.s3db
settings = current.deployment_settings
request = current.request
function = request.function
table = s3db.pr_group_membership
if settings.get_hrm_teams() == "Team":
table.group_id.label = T("Team Name")
table.group_head.label = T("Team Leader")
if function == "group":
current.response.s3.crud_strings["pr_group_membership"] = Storage(
title_create = T("Add Member"),
title_display = T("Membership Details"),
title_list = T("Team Members"),
title_update = T("Edit Membership"),
title_search = T("Search Members"),
subtitle_create = T("Add New Team Member"),
label_list_button = T("List Members"),
label_create_button = T("Add Team Member"),
label_delete_button = T("Delete Membership"),
msg_record_created = T("Team Member added"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Membership deleted"),
msg_list_empty = T("No Members currently registered"))
else:
table.group_head.label = T("Group Leader")
phone_label = settings.get_ui_label_mobile_phone()
site_label = settings.get_org_site_label()
if function == "group":
db = current.db
ptable = db.pr_person
controller = request.controller
def hrm_person_represent(id, row=None):
if row:
id = row.id
elif id:
row = db(ptable.id == id).select(ptable.first_name,
limitby=(0, 1)
).first()
else:
return current.messages["NONE"]
return A(row.first_name,
_href=URL(c=controller, f="person", args=id))
table.person_id.represent = hrm_person_represent
list_fields = ["id",
(T("First Name"), "person_id"),
"person_id$middle_name",
"person_id$last_name",
"group_head",
(T("Email"), "person_id$email.value"),
(phone_label, "person_id$phone.value"),
(current.messages.ORGANISATION,
"person_id$human_resource.organisation_id"),
(site_label, "person_id$human_resource.site_id"),
]
orderby = "pr_person.first_name"
else:
list_fields = ["id",
"group_id",
"group_head",
"group_id$description",
]
orderby = table.group_id
s3db.configure("pr_group_membership",
list_fields=list_fields,
orderby=orderby)
| 5,348,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.