content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def unset(config, field):
"""Unset a field of the configuration.
Parameters
----------
config : Configurable
The configuration to manipulate.
field : list of str
The series of field names.
Raises
------
ValueError
If there is no such field.
"""
type(config).get_field_hint(field)
for name in field[:-1]:
config = config.__dict__.get(name)
else:
delattr(config, field[-1])
| 5,353,800 |
def get_trigger_function(trigger_message, waiter):
"""Función auxiliar que genera un activador
Args:
trigger_message: mensaje o instruccion para continuar.
waiter: función que pausa el flujo de instrucciones.
"""
def trigger_function():
# Se imprime la instrucción para detonar el activador
print(trigger_message)
waiter()
# Se reproduce un audio confirmando que el activador fue
# detonado.
reproducir_audio(TRIGGER_AUDIO_PATH)
return trigger_function
| 5,353,801 |
def find_peaks(amplitude):
"""
A value is considered to be a peak if it is higher than its four closest
neighbours.
"""
# Pad the array with -1 at the beginning and the end to avoid overflows.
padded = np.concatenate((-np.ones(2), amplitude, -np.ones(2)))
# Shift the array by one/two values to the left/right
shifted_l2 = padded[:-4]
shifted_l1 = padded[1:-3]
shifted_r1 = padded[3:-1]
shifted_r2 = padded[4:]
# Compare the original array with the shifted versions.
peaks = ((amplitude >= shifted_l2) & (amplitude >= shifted_l1) &
(amplitude >= shifted_r1) & (amplitude >= shifted_r2))
return peaks
| 5,353,802 |
def sync(
*,
client: Client,
json_body: CustomFieldOptionsCreateRequestBody,
) -> Optional[CustomFieldOptionsCreateResponseBody]:
"""Create Custom Field Options
Create a custom field option. If the sort key is not supplied, it'll default to 1000, so the option
appears near the end of the list.
Args:
json_body (CustomFieldOptionsCreateRequestBody): Example: {'custom_field_id':
'01FCNDV6P870EA6S7TK1DSYDG0', 'sort_key': 10, 'value': 'Product'}.
Returns:
Response[CustomFieldOptionsCreateResponseBody]
"""
return sync_detailed(
client=client,
json_body=json_body,
).parsed
| 5,353,803 |
def decrypt_with_private_key(data, private_key):
"""Decrypts the PKCS#1 padded shared secret using the private RSA key"""
return _pkcs1_unpad(private_key.decrypt(data))
| 5,353,804 |
def loadvars(builddir):
"""if builddir does not exist or does not have a cache, returns an
empty odict"""
v = odict()
if builddir is None or not os.path.exists(builddir):
return v
c = os.path.join(builddir, 'CMakeCache.txt')
if os.path.exists(c):
with open(c, 'r') as f:
for line in f:
# logdbg("loadvars0", line.strip())
if not re.match(_cache_entry, line):
continue
ls = line.strip()
name = re.sub(_cache_entry, r'\1', ls)
vartype = re.sub(_cache_entry, r'\2', ls)[1:]
value = re.sub(_cache_entry, r'\3', ls)
# logdbg("loadvars1", name, vartype, value)
v[name] = CMakeCacheVar(name, value, vartype)
return v
| 5,353,805 |
def deprecated (func):
"""
This is a decorator which can be used to mark functions as deprecated. It
will result in a warning being emitted when the function is used.
:param func: original function
:type func: :any:`collections.Callable`
:return: decorated func
:rtype: :any:`collections.Callable`
"""
@wraps(func)
def newFunc (*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return newFunc
| 5,353,806 |
def prediction():
"""
A function that takes a JSON with two fields: "text" and "maxlen"
Returns: the summarized text of the paragraphs.
"""
print(request.form.values())
paragraphs = request.form.get("paragraphs")
paragraphs = re.sub("\d+", "", paragraphs)
maxlen = int(request.form.get("maxlen"))
summary = summarizer(paragraphs, max_length=maxlen, min_length=49, do_sample=False)
return render_template('index.html', prediction_text = '" {} "'.format(summary[0]["summary_text"])), 200
| 5,353,807 |
def check_relation_equivalence(
record: tinydb.database.Document,
r_type: str
) -> None:
""" Tests if specified record is self-consistent and enforces hierarchy
downstream. The "relations" field only exists when a record is obtained
through a query (i.e. .read(...), .read_all(...))
# C1: Check hierarchy-enforcing field "relations" exist
# C2: Check that all downstream relations have been captured
"""
# Ensure that a cloned record is no different from its original
cloned_record = copy.deepcopy(record)
assert cloned_record == record
# C1
assert 'relations' in cloned_record.keys()
# C2
relations = cloned_record.pop('relations')
assert (set(relations.keys()) == set(RELATIONS_MAPPINGS[r_type]))
| 5,353,808 |
def is_reviewer(user):
"""Return True if this user is a financial aid reviewer"""
# no need to cache here, all the DB lookups used during has_perm
# are already cached
return user.has_perm("finaid.review_financial_aid")
| 5,353,809 |
def getLogMessage(commitSHA):
"""Get the log message for a given commit hash"""
output = check_output(["git","log","--format=%B","-n","1",commitSHA])
return output.strip()
| 5,353,810 |
def get_old_stacks(cfn, old_instances, debug=True):
""" Gets all of the stacks for the old RDS instances """
old_stacks = get_cfn_stack_for_rds(cfn, old_instances, debug)
if debug:
print("DEBUG: Old stacks found: %s" % len(old_stacks))
return old_stacks
| 5,353,811 |
def set_initialized(initialized):
"""
set the initialization state of the local node
@param initialized: True if node initialized
@type initialized: bool
"""
global _client_ready
_client_ready = initialized
| 5,353,812 |
def sup(content, accesskey:str ="", class_: str ="", contenteditable: str ="",
data_key: str="", data_value: str="", dir_: str="", draggable: str="",
hidden: str="", id_: str="", lang: str="", spellcheck: str="",
style: str="", tabindex: str="", title: str="", translate: str=""):
"""
Returns superscript.\n
`content`: Contents of the superscript.\n
"""
g_args = global_args(accesskey, class_, contenteditable, data_key, data_value,
dir_, draggable, hidden, id_, lang, spellcheck, style,
tabindex, title, translate)
return f"<sup {g_args}>{content}</sup>\n"
| 5,353,813 |
def isnonempty(value):
"""
Return whether the value is not empty
Examples::
>>> isnonempty('a')
True
>>> isnonempty('')
False
:param value: string to validate whether value is not empty
"""
return value != ''
| 5,353,814 |
def InstallSystem(config, deployment, options):
"""Install the local host from the sysync deployment configuration files."""
installed = {}
# Create fresh temporary directory
Log('Clearing temporary deployment path: %s' % config['deploy_temp_path'])
run.Run('/bin/rm -rf %s' % config['deploy_temp_path'])
run.Run('/bin/mkdir -p %s' % config['deploy_temp_path'])
# Install the packages
result = InstallPackagesLocally(config, deployment, options)
return result
| 5,353,815 |
def preprocess_input(x):
"""前処理。"""
return tf.keras.applications.imagenet_utils.preprocess_input(x, mode="torch")
| 5,353,816 |
def attribute_to_partner_strict(partner, partner_string_or_spec, amount):
"""Return the amount attributable to the given partner."""
spec = (
partner_string_or_spec
if isinstance(partner_string_or_spec, dict)
else parse_partner_string(partner_string_or_spec)
)
if partner not in spec:
raise ValueError("Partner not found in partner string: %s" % partner)
v100 = spec[partner] * float(amount.abs())
f_floor = round if isclose(v100, round(v100)) else floor
v = amount.sign() * 0.01 * f_floor(v100)
return Amount(str(v)).with_commodity(amount.commodity)
| 5,353,817 |
def _set_op_arguments(mx_operators):
"""Fetch and set operator arguments - nargs, arg_names, arg_types
"""
for op_name in mx_operators:
operator_arguments = mx.operator.get_operator_arguments(op_name)
mx_operators[op_name]["params"] = {"narg": operator_arguments.narg,
"arg_names": operator_arguments.names,
"arg_types": operator_arguments.types}
| 5,353,818 |
def drawLines(img, lines, color=(255,0,0)):
"""
Draw lines on an image
"""
centroids = list()
r_xs = list()
r_ys = list()
for line_ in lines:
for rho,theta in line_:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
slope = (y1 - y0) / float(x1 - x0)
angle = math.degrees(math.atan(slope))
if abs(angle) > 80:
# print(img.shape[1])
h_layout = line((0, 0), (img.shape[1], 0))
h_layout_lower = line((0, img.shape[0]), (img.shape[1], img.shape[0]))
r = intersection2(h_layout, line((x1, y1), (x2, y2)))
r_lower = intersection2(h_layout_lower, line((x1, y1), (x2, y2)))
# cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)
# cv2.line(img, (int(r[0]), int(r[1])), (int(r_lower[0]), int(r_lower[1])), color, 2)
# print('min(r, r_lower), max(r, r_lower) :', np.min(np.array([r, r_lower])), np.max(np.array([r, r_lower])))
# min max 의 최소 최대 Range 를 정해주어야 한다. #
if np.min(np.array([r, r_lower])) >= 0 and np.max(np.array([r, r_lower])) < max(img.shape):
center_p = (int((r[0] + r_lower[0]) / 2), int((r[1] + r_lower[1])/ 2))
centroids.append(center_p)
r_xs.append((r[0], r_lower[0]))
r_ys.append((r[1], r_lower[1]))
# cv2.circle(img, center_p, 10, (255, 0, 255), -1)
# cv2.line(img, (int(0), int(0)), (int(0), int(img.shape[0])), color, 2)
# cv2.line(img, (int(img.shape[1]), int(0)), (int(img.shape[1]), int(img.shape[0])), color, 2)
# cv2.circle(img, (0, int(img.shape[0] / 2)), 10, (255, 0, 255), -1)
# cv2.circle(img, (img.shape[1], int(img.shape[0] / 2)), 10, (255, 0, 255), -1)
centroids.append((0, int(img.shape[0] / 2)))
centroids.append((img.shape[1], int(img.shape[0] / 2)))
return r_xs, r_ys, centroids
| 5,353,819 |
def moments_of_inertia(geo, amu=True):
""" principal inertial axes (atomic units if amu=False)
"""
ine = inertia_tensor(geo, amu=amu)
moms, _ = numpy.linalg.eigh(ine)
moms = tuple(moms)
return moms
| 5,353,820 |
def greenblatt_earnings_yield(stock, date=None, lookback_period=timedelta(days=0), period='FY'):
"""
:param stock: ticker(s) in question. Can be a string (i.e. 'AAPL') or a list of strings (i.e. ['AAPL', 'BA']).
:param date: Can be a datetime (i.e. datetime(2019, 1, 1)) or list of datetimes. The most recent date of reporting from that date will be used. By default, date=None.
:param lookback_period: lookback from date (used to compare against previous year or quarter etc.) i.e. timedelta(days=90).
:param period: 'FY' for fiscal year, 'Q' for quarter, 'YTD' for calendar year to date, 'TTM' for trailing twelve months.
:return: .. math:: \\text{Greenblatt Earnings Yield} = \\frac{\\text{EBIT}}{\\text{EV}}
"""
return earnings_before_interest_and_taxes(stock=stock, date=date, lookback_period=lookback_period, period=period) \
/ enterprise_value(stock=stock, date=date, lookback_period=lookback_period, period=period)
| 5,353,821 |
def today(context=None):
"""Overwrite <today>.json.gz with all data since 00:00 UTC"""
start = last_midnight_utc()
end = now_utc()
data = getdata(end=end, start=start, context=context)
prettydate = f"{end.date().isoformat()}"
log_head_tail(data)
with gzip.open(prettydate + ".json.gz", "wt", encoding="ascii") as f:
f.write(json.dumps(data))
| 5,353,822 |
def remove_directory(dir_path):
"""Delete a directory"""
if isdir(dir_path):
try:
shutil.rmtree(dir_path)
return ok_resp(f'Directory removed {dir_path}')
except TypeError as err_obj:
return err_resp(f'Failed to remove directory. {err_obj}')
except FileNotFoundError as err_obj:
return err_resp(f'Directory not found: {err_obj}')
except OSError as err_obj:
return err_resp(f'Failed to delete directory: {err_obj}')
except PermissionError as err_obj:
return err_resp(f'Failed to delete directory: {err_obj}')
return ok_resp(f'Not a directory {dir_path}')
| 5,353,823 |
def deploy(c, rebuild_=False, stack=False, prod=False, ngrok=False):
"""
Deploy the airflow instance.
Args:
c: invoke context
rebuild_: rebuild the images prior to deployment
stack: use docker swarm mode
prod: deploy to production
ngrok: deploy locally, but expose to internet via ngrok
"""
configure_prod_or_local(c, prod)
if ngrok:
if rebuild_:
rebuild(c)
atexit.register(c.run, "docker-compose down")
c.run("docker-compose up -d")
c.run("ngrok http 8080", pty=True)
elif prod or stack:
if prod or rebuild_:
rebuild(c)
c.run(
f"docker stack deploy -c docker-compose.yaml -c docker-compose.prod.yaml {c.config.stack_name}"
)
else:
if rebuild_:
rebuild(c)
c.run(f"docker-compose up")
| 5,353,824 |
def new_eps_after(since_ep):
"""
:param since_ep: Episode instance
:return: Number of episodes since then
"""
session = Session.object_session(since_ep)
series = since_ep.series
series_eps = session.query(Episode).join(Episode.series).\
filter(Series.id == series.id)
if series.identified_by == 'ep':
if since_ep.season is None or since_ep.number is None:
log.debug('new_eps_after for %s falling back to timestamp because latest dl in non-ep format' %
series.name)
return series_eps.filter(Episode.first_seen > since_ep.first_seen).count()
return series_eps.filter((Episode.identified_by == 'ep') &
(((Episode.season == since_ep.season) & Episode.number > since_ep.number) |
Episode.season > since_ep.season)).count()
elif series.identified_by == 'seq':
return series_eps.filter(Episode.number > since_ep.number).count()
elif series.identified_by == 'id':
return series_eps.filter(Episode.first_seen > since_ep.first_seen).count()
else:
log.debug('unsupported identified_by %s', series.identified_by)
return 0
| 5,353,825 |
def parse_json_main_index(out_dir: Path=OUTPUT_DIR) -> Iterator[Link]:
"""parse an archive index json file and return the list of links"""
index_path = os.path.join(out_dir, JSON_INDEX_FILENAME)
if os.path.exists(index_path):
with open(index_path, 'r', encoding='utf-8') as f:
links = pyjson.load(f)['links']
for link_json in links:
try:
yield Link.from_json(link_json)
except KeyError:
try:
detail_index_path = Path(OUTPUT_DIR) / ARCHIVE_DIR_NAME / link_json['timestamp']
yield parse_json_link_details(str(detail_index_path))
except KeyError:
# as a last effort, try to guess the missing values out of existing ones
try:
yield Link.from_json(link_json, guess=True)
except KeyError:
print(" {lightyellow}! Failed to load the index.json from {}".format(detail_index_path, **ANSI))
continue
return ()
| 5,353,826 |
def commercetools_api(functionapp_env):
"""
Intercept any HTTP requests.
This uses requests mocker, so be careful with mocking other endpoints.
"""
with backend_mocker() as m:
yield m
| 5,353,827 |
def header_maxperdisc(ctx, institution, requirement_id):
"""
header_maxperdisc : maxperdisc label? ;
"""
if DEBUG:
print(f'*** header_maxperdisc({class_name(ctx)=}, {institution=}, {requirement_id=}',
file=sys.stderr)
return_dict = {'label': get_label(ctx)}
maxperdisc_ctx = ctx.maxperdisc()
return_dict.update(maxperdisc(maxperdisc_ctx, institution, requirement_id))
return {'header_maxperdisc': return_dict}
| 5,353,828 |
def get_solrj_connection_affinity(zk_client):
"""
Get information about where SolrJ clients are connected
This shows information about SolrJ clients, the ip-addresses they are comming from, and the Zookeeper
hosts they are connected to, as well as an overview of their session ids.
Because multiple sessions can be associated with a single ip address, we will group clients by remote IP address.
"""
| 5,353,829 |
async def run_server(server: rptminigameshub.network.ClientsListener, updater: rptminigameshub.checkout.StatusUpdater, dry_run: bool = False):
"""Runs event's main loop for serving, SIGINT listening and updating tasks until SIGINT is handled or until status updater crashes,
will throw if it happens.
If dry_run is `True`, then this function will immediately return by cancelling all tasks before running them."""
# Set to True when finally clause is reach or if it is a dry run, means it is normal if coroutine tasks are cancelled
stopped_gracefully = False
def graceful_shutdown():
stopped_gracefully = True # Cancelling tasks will cause them to raise a CancelledError, we notifies except clause it is expected
# Gracefully stop each running task
serving_task.cancel()
updater_task.cancel()
wait_for_sigint_task.cancel()
# Stops the server when Ctrl+C is hit
try:
asyncio.get_running_loop().add_signal_handler(signal.SIGINT, require_stop)
except NotImplementedError: # Might fail under Windows for example
logger.warning("SIGINT handling isn't supported on this platform.")
# Runs server until it is stopped by Ctrl+C OR until status checkout crashes
serving_task = asyncio.create_task(server.start())
updater_task = asyncio.create_task(updater.start()) # Must be cancellable if server stops
wait_for_sigint_task = asyncio.create_task(run_until_stopped(serving_task, updater_task)) # Must be cancellable if server stops
try: # Handles case where one of the two tasks stops unexpectedly
try: # Handles case where updater_task is cancelled
if dry_run: # For a dry run, gather() will return immediately because every task will already be cancelled
graceful_shutdown()
await asyncio.gather(wait_for_sigint_task, updater_task, serving_task)
except asyncio.CancelledError:
# If updater_task or serving_task have been cancelled but not from final clause or running task post-await statement,
# then this CancelledError is unexpected and must be propagated
if not stopped_gracefully and (updater_task.cancelled() and serving_task.cancelled()) and not wait_for_sigint_task.done(): # End of running task: updating is cancelled
raise
finally: # Ensures both tasks will be stop before program to avoid destroying them as pending
graceful_shutdown()
| 5,353,830 |
def set_route_queue(path_list,user_position,sudden_id,sudden_xy,pi):
"""
最後の患者が一番近い医師が行くようにする
"""
minimum_dis = 100
minimum_idx = 0
for i in range(len(path_list)):
dis = np.sqrt((user_position[path_list[i][-2]][0] - sudden_xy[0])**2 + (user_position[path_list[i][-2]][1] - sudden_xy[1])**2)
if(dis < minimum_dis):
minimum_dis = dis
minimum_idx = path_list[i][-2]
pi_idx = [i for i, x in enumerate(pi) if x == minimum_idx]
pi.insert(pi_idx[0]+1,sudden_id)
return pi
| 5,353,831 |
def add(x, y):
"""Add two numbers together."""
return x+y
| 5,353,832 |
def required_values(
*,
schema: types.Schema,
schemas: types.Schemas,
stay_within_model: bool = False,
) -> typing.Iterator[typing.Any]:
"""
Return iterable with all values of the required key of the constructable schema.
Checks for $ref, if it is there resolves to the underlying schema and recursively
processes that schema.
Checks for allOf, if it is there recursively processes each schema.
Otherwise yields the required key value.
Args:
schema: The constructable schems.
schemas: All defined schemas (not just the constructable ones).
stay_within_model: Ensures that each required value is only returned once. For
both single and joined table inheritance no reference to the parent is
followed.
Returns:
An iterator with all required key values.
"""
skip_name: typing.Optional[str] = None
try:
skip_name = _calculate_skip_name(
schema=schema,
schemas=schemas,
stay_within_tablename=False,
stay_within_model=stay_within_model,
)
except (
exceptions.MalformedSchemaError,
exceptions.InheritanceError,
exceptions.SchemaNotFoundError,
):
return
yield from _any_key(
schema=schema,
schemas=schemas,
skip_name=skip_name,
key=types.OpenApiProperties.REQUIRED,
)
| 5,353,833 |
def calcPerSegmentStatsTiled(imgfile, imgbandnum, segfile,
statsSelection):
"""
Calculate selected per-segment statistics for the given band
of the imgfile, against the given segment raster file.
Calculated statistics are written to the segfile raster
attribute table (RAT), so this file format must support RATs.
Calculations are carried out in a memory-efficient way, allowing
very large rasters to be processed. Raster data is handled in
small tiles, attribute table is handled in fixed-size chunks.
The statsSelection parameter is a list of tuples, one for each
statistics to be included. Each tuple is either 2 or 3 elements,
(columnName, statName) or (columnName, statName, parameter)
The 3-element form is used for any statistic which requires
a parameter, which currently is only the percentile.
The columnName is a string, used to name the column in the
output RAT.
The statName is a string used to identify which statistic
is to be calculated. Available options are:
'min', 'max', 'mean', 'stddev', 'median', 'mode', 'percentile'.
The 'percentile' statistic requires the 3-element form, with
the 3rd element being the percentile to be calculated.
For example
[('Band1_Mean', 'mean'),
('Band1_stdDev', 'stddev'),
('Band1_LQ', 'percentile', 25),
('Band1_UQ', 'percentile', 75)
]
would create 4 columns, for the per-segment mean and
standard deviation of the given band, and the lower and upper
quartiles, with corresponding column names.
"""
segds = segfile
if not isinstance(segds, gdal.Dataset):
segds = gdal.Open(segfile, gdal.GA_Update)
segband = segds.GetRasterBand(1)
imgds = imgfile
if not isinstance(imgds, gdal.Dataset):
imgds = gdal.Open(imgfile, gdal.GA_ReadOnly)
imgband = imgds.GetRasterBand(imgbandnum)
if (imgband.DataType == gdal.GDT_Float32 or
imgband.DataType == gdal.GDT_Float64):
raise PyShepSegTilingError("Float image types not supported")
if segband.XSize != imgband.XSize or segband.YSize != imgband.YSize:
raise PyShepSegTilingError("Images must be same size")
if segds.GetGeoTransform() != imgds.GetGeoTransform():
raise PyShepSegTilingError("Images must have same spatial extent and pixel size")
if not equalProjection(segds.GetProjection(), imgds.GetProjection()):
raise PyShepSegTilingError("Images must be in the same projection")
attrTbl = segband.GetDefaultRAT()
existingColNames = [attrTbl.GetNameOfCol(i)
for i in range(attrTbl.GetColumnCount())]
histColNdx = checkHistColumn(existingColNames)
segSize = attrTbl.ReadAsArray(histColNdx).astype(numpy.uint32)
# Create columns, as required
colIndexList = createStatColumns(statsSelection, attrTbl, existingColNames)
(statsSelection_fast, numIntCols, numFloatCols) = (
makeFastStatsSelection(colIndexList, statsSelection))
# Loop over all tiles in image
tileSize = 1024
(nlines, npix) = (segband.YSize, segband.XSize)
numXtiles = int(numpy.ceil(npix / tileSize))
numYtiles = int(numpy.ceil(nlines / tileSize))
segDict = createSegDict()
pagedRat = createPagedRat()
for tileRow in range(numYtiles):
for tileCol in range(numXtiles):
topLine = tileRow * tileSize
leftPix = tileCol * tileSize
xsize = min(tileSize, npix-leftPix)
ysize = min(tileSize, nlines-topLine)
tileSegments = segband.ReadAsArray(leftPix, topLine, xsize, ysize)
tileImageData = imgband.ReadAsArray(leftPix, topLine, xsize, ysize)
accumulateSegDict(segDict, tileSegments, tileImageData)
calcStatsForCompletedSegs(segDict, pagedRat, statsSelection_fast,
segSize, numIntCols, numFloatCols)
writeCompletePages(pagedRat, attrTbl, statsSelection_fast)
# all pages should now be written. Raise an error if this not the case.
if len(pagedRat) > 0:
raise PyShepSegTilingError('Not all pixels found during processing')
| 5,353,834 |
def _get_thintar_prefix(tarname):
"""
Make sure thintar temporary name is concurrent and secure.
:param tarname: name of the chosen tarball
:return: prefixed tarname
"""
tfd, tmp_tarname = tempfile.mkstemp(
dir=os.path.dirname(tarname),
prefix=".thin-",
suffix=os.path.splitext(tarname)[1],
)
os.close(tfd)
return tmp_tarname
| 5,353,835 |
def retry_session(tries=2,
backoff_factor=0.1,
status_forcelist=(500, 502, 504),
session=None):
"""
Parameters
----------
tries : int, number of retires.
backoff_factor : A backoff factor to apply between attempts after the
second try (most errors are resolved immediately by a second try without
a delay). urllib3 will sleep for: {backoff factor} * (2 ^ ({number of
total retries} - 1)) seconds. If the backoff_factor is 0.1, then sleep()
will sleep for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be
longer than Retry.BACKOFF_MAX.
status_forcelist :
Retries are made on any HTTP responses in this list. Default values
include the following:
- 500: Internal Server Error.
- 502: Bad Gateway.
- 504: Gateway Timeout.
session
Returns
-------
"""
session = session or requests.Session()
retry = Retry(
total=tries,
read=tries,
connect=tries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry, pool_block=True)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
| 5,353,836 |
def vc(t, delta, beta):
"""velocity correlation of locus on rouse polymer. beta = alpha/2."""
return ( np.power(np.abs(t - delta), beta)
+ np.power(np.abs(t + delta), beta)
- 2*np.power(np.abs(t), beta)
)/( 2*np.power(delta, beta) )
| 5,353,837 |
def algorithm(name):
"""
A function decorator that is used to add an algorithm's Python class to the
algorithm_table.
Args:
A human readable label for the algorithm that is used to identify it in
the GUI
"""
def decorator(class_):
algorithm_table[name] = class_
return class_
return decorator
| 5,353,838 |
def comp_easy():
"""Get easy components."""
return Components(ewlaps, gi_setting.DEFAULT_EASY)
| 5,353,839 |
def play(player1, player2, rounds=1, verbose=False, symdict=None):
"""Play a number of `rounds` matches between the two players and return
the score $S = sum_j a_j$, where
a_j = 1 if player1 wone --or-- -1 if player2 wone --or-- 0 otherwise.
"""
if player1 is player2:
raise AttributeError("Players match...")
if player1._rules is not player2._rules:
raise AttributeError("Different rules sets...")
if symdict is None:
symdict = range(len(pl1._rules))
score = [0, 0, 0]
results = ["Player1 wins.", "Tie.", "Player 2 wins."]
playiter = xrange(rounds) if verbose else Progress(xrange(rounds))
for i in playiter:
res1, res2 = player1.play(), player2.play()
player1._memory.append((res1, res2))
player2._memory.append((res2, res1))
resind = 1 - player1._rules[res1][res2]
score[resind] += 1
if verbose:
print("{} vs {}: {}".format(symdict[res1], symdict[res2],
results[resind]))
print(score)
return score
| 5,353,840 |
def test_replace_neighbor(test_image):
"""
Tests replace_neighbor function in transform_data
by asserting the output shape
"""
img = test_image
new_img = td.replace_neighbor(img, (0, 500, img.shape[1], 600))
assert img.shape == new_img.shape
img = test_image
new_img = td.replace_constant(img, (0, 1, img.shape[1], 101), 0)
assert img.shape == new_img.shape
img = test_image
new_img = td.replace_constant(
img,
(0, img.shape[0] - 101, img.shape[1], img.shape[0] - 1),
0
)
assert img.shape == new_img.shape
| 5,353,841 |
def calc_deltabin_3bpavg(seq, files, bin_freqs, seqtype = "fastq"):
"""
At each position (starting at i), count number of sequences where
region (i):(i+3) is mutated. This is sort of a rolling average and not critical
to the result. It just ends up a bit cleaner than if we looked at a single
base pair since. We are assuming that on average a mutation messes up binding,
however this is not always the case. For example, especially with RNAP, there might
be a couple positions that are not-at-all optimal for DNA binding.
Parameter
---------
seq: wild-type sequence of library region
files: filenames (used to identify bin number, '...bin*.fastq')
bin_freqs: numpy array (np.zeros([# bins, # letters (i.e. 4),
length sequence]) that contained the letter frequences from each
bin.
seqtype: sequence file type (i.e. '.fastq' or '.fasta')
Returns
-------
avgBin_counts: array 1*seqLength; contains counts used to calculate average
of mutated nucleotides at each position.
avgBin-avgbin_WT: average bin of mutated nucleotides at each position
relative to wild-type average bin.
"""
seqLength = len(seq)
avgBin_counts = np.zeros([len(files),seqLength])
avgBin = np.zeros(seqLength)
#filecount = 0
avgbin_WT = 0
for j in range(0,len(files)):
avgbin_WT += ( (j+1)*bin_freqs[j,:,0].sum() )/ bin_freqs[:,:,0].sum()
print('average_bin_WT', avgbin_WT)
for i in range(0,seqLength-2):
for j, fname in enumerate(files):
count = 0
binnumber = int(fname[-7]) - 1
for rec in SeqIO.parse(fname, seqtype):
if (rec.seq[i:(i+2)] != seq[i:(i+2)]):
count += 1
avgBin_counts[binnumber,i] = count
for i in range(0,seqLength-2):
for j in range(0,len(files)):
avgBin[i] += ( (j+1)*avgBin_counts[j,i] )/avgBin_counts[:,i].sum()
return avgBin_counts, (avgBin-avgbin_WT)
| 5,353,842 |
def make_right_handed(l_csl_p1, l_p_po):
"""
The function makes l_csl_p1 right handed.
Parameters
----------------
l_csl_p1: numpy.array
The CSL basis vectors in the primitive reference frame of crystal 1.
l_p_po: numpy.array
The primitive basis vectors of the underlying lattice in the orthogonal
reference frame.
Returns
-----------
t1_array: numpy.array
Right handed array
"""
l_csl_po1 = l_p_po.dot(l_csl_p1)
t1_array = np.array(l_csl_p1, dtype='double')
t2_array = np.array(l_csl_p1, dtype='double')
if (nla.det(l_csl_po1) < 0):
t1_array[:, 0] = t2_array[:, 1]
t1_array[:, 1] = t2_array[:, 0]
return t1_array
| 5,353,843 |
def process_task(f, module_name, class_name, ftype, f_parameters, f_returns,
task_kwargs, num_nodes, replicated, distributed,
on_failure, time_out):
"""
Function that submits a task to the runtime.
:param f: Function or method
:param module_name: Name of the module containing the function/method
(including packages, if any)
:param class_name: Name of the class (if method)
:param ftype: Function type
:param f_parameters: Function parameters (dictionary {'param1':Parameter()}
:param f_returns: Function returns (dictionary {'*return_X':Parameter()}
:param task_kwargs: Decorator arguments
:param num_nodes: Number of nodes that the task must use
:param replicated: Boolean indicating if the task must be replicated
:param distributed: Boolean indicating if the task must be distributed
:param on_failure: Action on failure
:param time_out: Time for a task time out
:return: The future object related to the task return
"""
if __debug__:
logger.debug("TASK: %s of type %s, in module %s, in class %s" %
(f.__name__, ftype, module_name, class_name))
app_id = 0
# Check if the function is an instance method or a class method.
has_target = ftype == FunctionType.INSTANCE_METHOD
fo = None
if f_returns:
fo = _build_return_objects(f_returns)
num_returns = len(f_returns)
# Get path
if class_name == '':
path = module_name
else:
path = module_name + '.' + class_name
# Infer COMPSs types from real types, except for files
_serialize_objects(f_parameters)
# Build values and COMPSs types and directions
vtdsc = _build_values_types_directions(ftype,
f_parameters,
f_returns,
f.__code_strings__)
values, names, compss_types, compss_directions, compss_streams, \
compss_prefixes, content_types = vtdsc # noqa
# Get priority
has_priority = task_kwargs['priority']
# Signature and other parameters:
signature = '.'.join([path, f.__name__])
# num_nodes = 1 # default due to not MPI decorator yet
# replicated = False # default due to not replicated tag yet
# distributed = False # default due to not distributed tag yet
if __debug__:
# Log the task submission values for debugging purposes.
if logger.isEnabledFor(logging.DEBUG):
values_str = ' '.join(str(v) for v in values)
types_str = ' '.join(str(t) for t in compss_types)
direct_str = ' '.join(str(d) for d in compss_directions)
streams_str = ' '.join(str(s) for s in compss_streams)
prefixes_str = ' '.join(str(p) for p in compss_prefixes)
names_str = ' '.join(x for x in names)
ct_str = ' '.join(str(x) for x in content_types)
logger.debug("Processing task:")
logger.debug("\t- App id: " + str(app_id))
logger.debug("\t- Path: " + path)
logger.debug("\t- Function name: " + f.__name__)
logger.debug("\t- On failure behavior: " + on_failure)
logger.debug("\t- Task time out: " + str(time_out))
logger.debug("\t- Signature: " + signature)
logger.debug("\t- Priority: " + str(has_priority))
logger.debug("\t- Has target: " + str(has_target))
logger.debug("\t- Num nodes: " + str(num_nodes))
logger.debug("\t- Replicated: " + str(replicated))
logger.debug("\t- Distributed: " + str(distributed))
logger.debug("\t- Values: " + values_str)
logger.debug("\t- Names: " + names_str)
logger.debug("\t- COMPSs types: " + types_str)
logger.debug("\t- COMPSs directions: " + direct_str)
logger.debug("\t- COMPSs streams: " + streams_str)
logger.debug("\t- COMPSs prefixes: " + prefixes_str)
logger.debug("\t- Content Types: " + ct_str)
# Check that there is the same amount of values as their types, as well
# as their directions, streams and prefixes.
assert (len(values) == len(compss_types) == len(compss_directions) ==
len(compss_streams) == len(compss_prefixes) == len(content_types))
# Submit task to the runtime (call to the C extension):
# Parameters:
# 0 - <Integer> - application id (by default always 0 due to it is
# not currently needed for the signature)
# 1 - <String> - path of the module where the task is
#
# 2 - <String> - behavior if the task fails
#
# 3 - <String> - function name of the task (to be called from the
# worker)
# 4 - <String> - priority flag (true|false)
#
# 5 - <String> - has target (true|false). If the task is within an
# object or not.
# 6 - [<String>] - task parameters (basic types or file paths for
# objects)
# 7 - [<Integer>] - parameters types (number corresponding to the type
# of each parameter)
# 8 - [<Integer>] - parameters directions (number corresponding to the
# direction of each parameter)
# 9 - [<Integer>] - parameters streams (number corresponding to the
# stream of each parameter)
# 10 - [<String>] - parameters prefixes (sting corresponding to the
# prefix of each parameter)
compss.process_task(app_id,
signature,
on_failure,
time_out,
has_priority,
num_nodes,
replicated,
distributed,
has_target,
num_returns,
values,
names,
compss_types,
compss_directions,
compss_streams,
compss_prefixes,
content_types)
# Return the future object/s corresponding to the task
# This object will substitute the user expected return from the task and
# will be used later for synchronization or as a task parameter (then the
# runtime will take care of the dependency.
return fo
| 5,353,844 |
def write_point_cloud(name, verts):
"""Write a .obj file for a point cloud.
Parameters
----------
name : str
Ouput file name.
verts : array
Spatial coordinates for vertices as returned by skimage.measure.marching_cubes_lewiner().
"""
with open(name, "w") as thefile:
for item in verts:
thefile.write("v {0} {1} {2}\n".format(item[0], item[1], item[2]))
| 5,353,845 |
def upgrade(migrate_engine):
"""Add UUID primary key column to encryption."""
meta = MetaData()
meta.bind = migrate_engine
encryptions = Table('encryption', meta, autoload=True)
# NOTE: SQLite doesn't support 'drop constraint' statament
if migrate_engine.name == 'sqlite':
_upgrade_sqlite(meta, encryptions)
else:
encryption_id_column_kwargs = {}
if migrate_engine.name == 'ibm_db_sa':
# NOTE(junxiebj): DB2 10.5 doesn't support primary key
# constraints over nullable columns, so we have to
# make the column non-nullable in the DB2 case.
encryption_id_column_kwargs['nullable'] = False
encryption_id = Column('encryption_id', String(36),
**encryption_id_column_kwargs)
encryptions.create_column(encryption_id)
encryption_items = list(encryptions.select().execute())
for item in encryption_items:
encryptions.update().\
where(encryptions.c.volume_type_id == item['volume_type_id']).\
values(encryption_id=str(uuid.uuid4())).execute()
# NOTE (e0ne): need to drop FK first for MySQL
if migrate_engine.name == 'mysql':
ref_table = Table('volume_types', meta, autoload=True)
params = {'columns': [encryptions.c['volume_type_id']],
'refcolumns': [ref_table.c['id']],
'name': 'encryption_ibfk_1'}
volume_type_fk = ForeignKeyConstraint(**params)
volume_type_fk.drop()
volume_type_pk = PrimaryKeyConstraint('volume_type_id',
table=encryptions)
volume_type_pk.drop()
pkey = PrimaryKeyConstraint(encryptions.columns.encryption_id)
pkey.create()
| 5,353,846 |
def company(anon, obj, field, val):
"""
Generates a random company name
"""
return anon.faker.company(field=field)
| 5,353,847 |
def delete_schedule():
"""
При GET запросе возвращает страницу для удаления расписания.
При POST запросе, удаляет выбранное расписани
(Запрос на удаление идэт с главной страницы(func index), шаблона(template) функция не имеет).
"""
if not check_admin_status():
flash(f'У вас нет прав для просмотра данной страницы!', 'error')
app.logger.warning(f"Сотрудник с недостаточным уровнем допуска попытался удалить расписание: {get_user_info()}")
return redirect(url_for('index'))
schedule_id = request.args.get('schedule_id')
ScheduleCleaning.query.filter_by(id=schedule_id).delete()
db.session.commit()
return redirect(url_for('index'))
| 5,353,848 |
def dict_from_JSON(JSON_file: str) -> dict:
"""
Takes a WDL-mapped json file and creates a dict containing the bindings.
:param JSON_file: A required JSON file containing WDL variable bindings.
"""
json_dict = {}
# TODO: Add context support for variables within multiple wdl files
with open(JSON_file) as data_file:
data = json.load(data_file)
for d in data:
if isinstance(data[d], str):
json_dict[d] = f'"{data[d]}"'
else:
json_dict[d] = data[d]
return json_dict
| 5,353,849 |
def show(id):
"""Renderiza a página de um político específico."""
p = Politico.query.get_or_404(id)
# Aplica os filtros de mes, ano e a paginação
mes, ano, tipo, page = (request.args.get("mes"),
request.args.get("ano", 2020, type=int),
request.args.get("tipo"),
request.args.get("page", 1, type=int))
form = form_filtro_despesas(parlamentar=p, ano=ano)
if form.validate_on_submit():
# Retira os filtros do tipo `mes=""` (Todos os meses)
# Deixa somente os definidos como `mes=1`, etc.
# Depois redireciona para aplicar os filtros
params = {k: v for k, v in form.data.items() if v}
# Remove o csrf_token antes de redirecionar
params.pop("csrf_token")
return redirect(url_for("pages.show", id=id, **params))
pagination = p.despesas(ano, mes).paginate(page, 40,
error_out=True)
total_gasto = Reembolso.total_gasto(p, ano=ano, mes=mes)
return render_template("pages/show.html",
parlamentar=p,
pagination=pagination,
total_gasto=total_gasto,
form=form)
| 5,353,850 |
def map_datapoint(data_point: DATAPOINT_TYPE) -> SFX_OUTPUT_TYPE:
"""
Create dict value to send to SFX.
:param data_point: Dict with values to send
:type data_point: dict
:return: SignalFx data
:rtype: dict
"""
return {
"metric": data_point["metric"],
"value": data_point["value"],
"dimensions": dict(data_point["dimensions"], **default_dimensions) if "dimensions" in data_point else default_dimensions,
}
| 5,353,851 |
def write_obs(mdict, obslist, flag=0):
"""
"""
# Print epoch
epoch = mdict['epoch']
res = epoch.strftime("> %Y %m %d %H %M %S.") + '{0:06d}0'.format(int(epoch.microsecond))
# Epoch flag
res += " {0:2d}".format(flag)
# Num sats
res += " {0:2d}".format(len(mdict)-1)
res += '\n'
# For each satellite, print obs
for sat in mdict:
if sat == 'epoch':
continue
res += sat
obstypes = obslist[sat[0]]
for o in obstypes:
try:
meas = mdict[sat][o]
except KeyError:
meas = 0.0
# BeiDou satellites can have long ranges if GEO satellites are used
if meas > 40e6:
meas = 0.0
res += '{0:14.3f}00'.format(meas)
res += '\n'
return res
| 5,353,852 |
def open_view(
path: str,
*,
filesystem: Optional[Union[fsspec.AbstractFileSystem, str]] = None,
synchronizer: Optional[sync.Sync] = None,
) -> view.View:
"""Open an existing view.
Args:
path: View storage directory.
filesystem: The file system used to access the view.
synchronizer: The synchronizer used to synchronize the view.
Returns:
The opened view.
Example:
>>> view = open_view("/home/user/myview")
"""
return view.View.from_config(path,
filesystem=filesystem,
synchronizer=synchronizer)
| 5,353,853 |
def smi2xyz(smi, forcefield="mmff94", steps=50):
"""
Example:
utils.smi2xyz("CNC(C(C)(C)F)C(C)(F)F")
returns:
C 1.17813 0.06150 -0.07575
N 0.63662 0.20405 1.27030
C -0.86241 0.13667 1.33270
C -1.46928 -1.21234 0.80597
C -0.94997 -2.44123 1.55282
C -2.99527 -1.22252 0.74860
F -1.08861 -1.36389 -0.50896
C -1.34380 0.44926 2.78365
C -0.84421 1.76433 3.34474
F -2.70109 0.48371 2.84063
F -0.94986 -0.53971 3.63106
H 0.78344 0.82865 -0.74701
H 0.99920 -0.92873 -0.50038
H 2.26559 0.18049 -0.03746
H 1.03185 -0.51750 1.87094
H -1.24335 0.93908 0.68721
H -1.29943 -2.47273 2.58759
H -1.27996 -3.36049 1.05992
H 0.14418 -2.47324 1.55471
H -3.35862 -0.36599 0.16994
H -3.34471 -2.11983 0.22567
H -3.46364 -1.21709 1.73400
H -1.20223 2.60547 2.74528
H -1.22978 1.89248 4.36213
H 0.24662 1.79173 3.40731
"""
mol = pybel.readstring("smi", smi)
mol.addh() # add hydrogens, if this function is not called, pybel will output xyz string with no hydrogens.
mol.make3D(forcefield=forcefield, steps=steps)
# possible forcefields: ['uff', 'mmff94', 'ghemical']
mol.localopt()
return _to_pyscf_atom(mol)
| 5,353,854 |
def pgm_to_pointcloud(
depth_image: np.ndarray, color_image: Optional[np.ndarray],
intrinsics: Tuple[float, float, float, float],
distortion: List[float]) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Fast conversion of opencv images to pointcloud.
Takes ~7 ms per 1280x720 RGBD on my corp laptop (hirak).
Args:
depth_image: OpenCV image.
color_image: Corresponding color image, if colors for each point is desired.
intrinsics: fx, fy, cx, cy.
distortion: Standard distoriton params k1, k2, p1, p2, [k3, [k4, k5, k6]].
Returns:
points: Nx3 array of points in space.
colors: Nx3 array of colors, each row an RGB. None if color_image is None.
"""
# The code below is optimized for speed, further optimizations may also be
# possible.
x_axis, y_axis = np.mgrid[0:depth_image.shape[1], 0:depth_image.shape[0]]
valid = ~np.isnan(depth_image)
x_axis = x_axis.T[valid]
y_axis = y_axis.T[valid]
depth = depth_image[valid] * _DEPTH_SCALE
x_and_y = np.vstack([x_axis, y_axis]).astype(float)
fx, fy, cx, cy = intrinsics
camera_matrix = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
x_and_y = cv2.undistortPoints(x_and_y, camera_matrix, np.array(distortion))
x_and_y = x_and_y.T.reshape(2, -1)
points = np.vstack([x_and_y * depth, depth]).T
colors = None
if color_image is not None:
colors = color_image[valid]
if len(colors.shape) > 1 and colors.shape[1] == 3:
# OpenCV uses BGR. Point cloud libraries like to use RGB.
colors[:, [0, 2]] = colors[:, [2, 0]]
else:
colors = np.vstack([colors, colors, colors]).T
return points, colors
| 5,353,855 |
def plot_runs_with_avg(run_data, only=None):
"""Plot results of simulations sharing a configuration,
with their average results"""
# individual runs
labels_paths = list(enumerate(run_data['runs']))
# output to the run directory + /plots
output_path = os.path.join(run_data['path'], 'plots')
# plot
only = ['general'] + only if only is not None else ['general']
plot(labels_paths, output_path, {}, avg=(run_data['avg_type'], run_data['avg']), only=only)
| 5,353,856 |
def fromfile(file, shape=None):
"""Not supported for ObjectArray"""
raise TypeError("ObjectArray can't be read from a file.")
| 5,353,857 |
def matching_poss(poss_1, poss_2):
"""Count how many rows the possibilities have in common.
Arguments:
poss_1 {np.array} -- possibilities 1
poss_2 {np.array} -- possibilities 2
Returns:
int -- the count/matches
"""
matches = 0
for row_2 in poss_2:
for row_1 in poss_1:
if np.array_equal(row_1, row_2):
matches += 1
return matches
| 5,353,858 |
def p_statscmdcont_nocomma(p):
"""statscmdcont : STATS_FN
| COMMON_FN
| EVAL"""
p[1] = canonicalize(p[1])
p[0] = ParseTreeNode('_STATSCMDCONT')
fn_node = ParseTreeNode('FUNCTION', raw=p[1])
p[0].add_child(fn_node)
| 5,353,859 |
def unk_emb_stats(sentences, emb):
"""Compute some statistics about unknown tokens in sentences
such as "how many sentences contain an unknown token?".
emb can be gensim KeyedVectors or any other object implementing
__contains__
"""
from collections import Counter
stats = {
"sents": 0,
"tokens": 0,
"unk_tokens": 0,
"unk_types": 0,
"unk_tokens_lower": 0,
"unk_types_lower": 0,
"sents_with_unk_token": 0,
"sents_with_unk_token_lower": 0}
all_types = set()
for sent in sentences:
stats["sents"] += 1
any_unk_token = False
any_unk_token_lower = False
types = Counter(sent)
for ty, freq in types.items():
all_types.add(ty)
stats["tokens"] += freq
unk = ty not in emb
if unk:
any_unk_token = True
stats["unk_types"] += 1
stats["unk_tokens"] += freq
if unk and ty.lower() not in emb:
any_unk_token_lower = True
stats["unk_types_lower"] += 1
stats["unk_tokens_lower"] += freq
if any_unk_token:
stats["sents_with_unk_token"] += 1
if any_unk_token_lower:
stats["sents_with_unk_token_lower"] += 1
stats["types"] = len(all_types)
return stats
| 5,353,860 |
def basename(path: str) -> str:
"""Returns the basename removing path and extension."""
return os.path.splitext(os.path.basename(path))[0]
| 5,353,861 |
async def search_dcu(
ldap_conn: LDAPConnection, dcu_id: str = None, uid: str = None, fullname: str = None
) -> List[DCUUser]:
"""
Seach DCU AD for user
Args:
ldap_conn: LDAP connection to use for searching
uid: Usersname to search for
dcu_id: dcu student id number
fullname: Users full name
Returns:
A list of user found in ad matching search criteria
"""
query = "".join(
filter(
None,
[
f"(displayName={fullname})" if fullname else None,
f"(cn={uid})" if uid else None,
f"(id={dcu_id})" if dcu_id else None,
],
)
)
if not query:
return []
res = await ldap_conn.search("o=ad,o=dcu,o=ie", f"(&{query})", attributes=DCU_ATTR)
return [DCUUser.from_ldap(user) for user in res]
| 5,353,862 |
def jump(inst_ptr, program, direction):
"""Jump the instruction pointer in the program until matching bracket"""
count = direction
while count != 0:
inst_ptr += direction
char = program[inst_ptr]
if char == '[':
count += 1
elif char == ']':
count -= 1
else:
pass
return inst_ptr
| 5,353,863 |
def htmlResponse(environ, start_response=None, checkuser=False):
"""
htmlResponse - return a Html Page
"""
if checkuser and not check_user_permissions(environ):
environ["url"] = justpath(environ["SCRIPT_FILENAME"])+"/back.html"
return htmlResponse(environ, start_response)
url = environ["url"] if "url" in environ else normpath(environ["SCRIPT_FILENAME"])
url = forceext(url, "html")
DOCUMENT_ROOT = environ["DOCUMENT_ROOT"] if "DOCUMENT_ROOT" in environ else ""
#HTTP_COOKIE = getCookies(environ)
if not isfile(url):
return httpResponseNotFound(start_response)
workdir = justpath(url)
index_html = justfname(url)
jss = (DOCUMENT_ROOT + "/lib/js",
justpath(url),)
csss = (DOCUMENT_ROOT + "/lib/css",
DOCUMENT_ROOT + "/lib/js",
DOCUMENT_ROOT + "/lib/images",
justpath(url),)
env = Environment(loader=FileSystemLoader(workdir))
t = env.get_template(index_html)
import gecosistema_core
variables = {
"loadjs": loadlibs(jss,"js"),
"loadcss": loadlibs(csss,"css"),
"APPNAME": juststem(workdir),
"os": os,
"math": math,
"gecosistema_core": gecosistema_core,
"environ":environ,
"__file__":url
}
html = t.render(variables).encode("utf-8","replace")
return httpResponseOK(html, start_response)
| 5,353,864 |
def center_image(IM, method='com', odd_size=True, square=False, axes=(0, 1),
crop='maintain_size', verbose=False, center=_deprecated,
**kwargs):
"""
Center image with the custom value or by several methods provided in
:func:`find_origin()` function.
Parameters
----------
IM : 2D np.array
The image data.
method : tuple or str
either a tuple (float, float), the coordinate of the origin of the
image in the (row, column) format, or a string to specify an automatic
centering method:
``image_center``
the center of the image is used as the origin. The trivial result.
``com``
the origin is found as the center of mass.
``convolution``
the origin is found as the maximum of autoconvolution of the image
projections along each axis.
``gaussian``
the origin is extracted from a fit to a Gaussian function.
This is probably only appropriate if the data resembles a
gaussian.
``slice``
the image is broken into slices, and these slices compared for
symmetry.
odd_size : boolean
if ``True``, the returned image will contain an odd number of columns.
Most of the transform methods require this, so it's best to set this
to ``True`` if the image will subsequently be Abel-transformed.
square : bool
if ``True``, the returned image will have a square shape.
crop : str
determines how the image should be cropped. The options are:
``maintain_size``
return image of the same size. Some regions of the original image
may be lost, and some regions may be filled with zeros.
``valid_region``
return the largest image that can be created without padding.
All of the returned image will correspond to the original image.
However, portions of the original image will be lost.
If you can tolerate clipping the edges of the image, this is
probably the method to choose.
``maintain_data``
the image will be padded with zeros such that none of the original
image will be cropped.
axes : int or tuple
center image with respect to axis ``0`` (vertical), ``1`` (horizontal),
or both axes ``(0, 1)`` (default).
Returns
-------
out : 2D np.array
centered image
"""
if center is not _deprecated:
_deprecate('abel.tools.center.center_image() '
'argument "center" is deprecated, use "method" instead.')
method = center
rows, cols = IM.shape
if odd_size and cols % 2 == 0:
# drop rightside column
IM = IM[:, :-1]
rows, cols = IM.shape
if square and rows != cols:
# make rows == cols, but maintain approx. center
if rows > cols:
diff = rows - cols
trim = diff//2
if trim > 0:
IM = IM[trim: -trim] # remove even number of rows off each end
if diff % 2:
IM = IM[: -1] # remove one additional row
else:
# make rows == cols, check row oddness
if odd_size and rows % 2 == 0:
IM = IM[:-1, :]
rows -= 1
xs = (cols - rows)//2
IM = IM[:, xs:-xs]
rows, cols = IM.shape
# origin is in (row, column) format!
if isinstance(method, string_types):
origin = find_origin(IM, method=method, verbose=verbose, **kwargs)
else:
origin = method
centered_data = set_center(IM, origin=origin, crop=crop, axes=axes,
verbose=verbose)
return centered_data
| 5,353,865 |
def check_header(install_path):
"""Method to check the final genomics headers have a header or not
check_header
============
This method is going to go through each of the files that were created by the recipe,
and it will check if the those files have a header or not.
sam/bam/cram, vcf/bcf, gtf/gff/gff3, bed/bedGraph, csv, txt files require a header and if no header is provided
check-recipe will fail.
Other files that don't have header will be given a warning. GGD expects most files to have
a header. Some files are okay not to have headers, but if a header can be added it should be.
For each file, the file header and first 5 lines of the file body will be provided to stdout.
Parameters:
-----------
1) install_path: (str) The path to the directory where the files have been installed into.
Returns:
+++++++
(bool) True or False.
- True if a header exist or if only a warning was given
- False if a header does not exists and is required
"""
print(
":ggd:check-recipe: Checking that the final files have headers if appropriate\n"
)
installed_files = os.listdir(install_path)
for file_name in [
x for x in installed_files if os.path.isfile(os.path.join(install_path, x))
]:
f_path = os.path.join(install_path, file_name)
## Check for an index file
if file_name.strip().split(".")[-1] in set(
["tbi", "bai", "crai", "fai", "tar", "bz2", "bw", "csi", "gzi"]
):
continue
## Skip fasta or fastq files
if any(x in file_name for x in [".fasta", ".fa", ".fastq", ".fq"]):
continue
## Check for sam/bam/cram files
if any(x in file_name for x in [".sam", ".bam", ".cram"]):
import pysam
try:
samfile = pysam.AlignmentFile(f_path, check_sq=False)
header = samfile.header
if any(header.lengths):
print(
":ggd:check-recipe: Header found in file {name}\n".format(
name=file_name
)
)
print("Head of file:")
print("---------------------------")
print(str(header).strip())
for i, read in enumerate(samfile):
print(read)
if i >= 4:
break
print("---------------------------\n")
else:
print(
":ggd:check-recipe: !!ERROR!! No header found for file {name}\n".format(
name=file_name
)
)
print(
":ggd:check-recipe: !!ERROR!! A header is required for sam/bam/cram files\n"
)
return False
except (ValueError, IOError, Exception) as e:
print(str(e))
print(
":ggd:check-recipe: !!ERROR!! No header found for file {name}\n".format(
name=file_name
)
)
print(
":ggd:check-recipe: !!ERROR!! A header is required for sam/bam/cram files\n"
)
return False
## Check vcf/bcf files
elif any(x in file_name for x in [".vcf", ".bcf"]):
from cyvcf2 import VCF
try:
vcffile = VCF(f_path)
header = str(vcffile.raw_header)
if header:
print(
":ggd:check-recipe: Header found in file {name}\n".format(
name=file_name
)
)
print("Head of file:")
print("---------------------------")
print(str(header).strip())
for i, var in enumerate(vcffile):
print(var)
if i >= 4:
break
print("---------------------------\n")
else:
print(
":ggd:check-recipe: !!ERROR!! No header found for file {name}\n".format(
name=file_name
)
)
print(
":ggd:check-recipe: !!ERROR!! A header is required for vcf/bcf files\n"
)
return False
except IOError as e:
print(str(e))
print(
":ggd:check-recipe: !!ERROR!! No header found for file {name}\n".format(
name=file_name
)
)
print(
":ggd:check-recipe: !!ERROR!! A header is required for vcf/bcf files\n"
)
return False
## Check other files
else:
import gzip
try:
file_handler = (
gzip.open(f_path) if f_path.endswith(".gz") else open(f_path)
)
header = []
body = []
try:
for line in file_handler:
if type(line) != str:
line = line.strip().decode("utf-8")
if len(line) > 0 and str(line)[0] in set(["#", "!", "^"]):
header.append(str(line).strip())
else:
body.append(str(line).strip())
if len(body) > 4:
break
except UnicodeDecodeError:
print(
":ggd:check-recipe: Cannot decode file contents into unicode.\n"
)
pass
if header:
print(
":ggd:check-recipe: Header found in file {name}\n".format(
name=file_name
)
)
print("Head of file:")
print("---------------------------")
print("\n".join(header))
print("\n".join(body))
print("---------------------------\n")
elif any(
x in file_name
for x in [
".gtf",
".gff",
".gff3",
".bed",
".bedGraph",
".csv",
".txt",
]
):
print(
":ggd:check-recipe: !!ERROR!! No header found for file {name}\n".format(
name=file_name
)
)
print(
":ggd:check-recipe: !!ERROR!! A header is required for this type of file\n"
)
print("First 5 lines of file body:")
print("---------------------------")
print("\n".join(body))
print("---------------------------\n")
return False
else:
print(
":ggd:check-recipe: !!WARNING!! No header found for file {name}\n".format(
name=file_name
)
)
print("First 5 lines of file body:")
print("---------------------------")
print("\n".join(body))
print("---------------------------\n")
print(
":ggd:check-recipe: !!WARNING!! GGD requires that any file that can have a header should. Please either add a header or if the file cannot have a header move forward.\n"
)
print(
":ggd:check-recipe: !!WARNING!! IF you move forward without adding a header when one should be added, this recipe will be rejected until a header is added.\n"
)
except IOError as e:
print(":ggd:check-recipe: !!ERROR!!")
print(str(e))
return False
return True
| 5,353,866 |
def poc_plot(*args, **kwds):
# based on Ignacio Serrano-Pedraza Excel spreadsheet
# "sdt_serranopedraza (version 1).xls"
"""
Probability of Occurence Curves (POC)
args:
1 argument:
sdt_metrics.SDT object
2 arguments:
pHI
pFA
4 arguments:
hit count
miss count
correction rejection count
false alarm count
kwds:
xmax: sets maximum x-limit
fname: outputname
dpi: resolution of plot
"""
# process keyword arguments
fname = kwds.get('fname','poc_plot.png')
dpi = kwds.get('dpi',150)
# xmax handled later (need dprime)
# assume arg is an sdt object
if len(args) == 1:
sdt_obj = args[0]
hi,mi,cr,fa = sdt_obj['HI'],sdt_obj['MI'],sdt_obj['CR'],sdt_obj['FA']
dprime = sdt_obj.dprime()
criterion = ltqnorm(1.-sdt_obj.p('HI')) + dprime
# assume args are hit and false alarm rates
elif len(args) == 2:
pHI,pFA = args
dprime = sdt_metrics.dprime(*args)
criterion = ltqnorm(1.-pHI) + dprime
# assume args hit, miss, cr, and fa counts
elif len(args) == 4:
hi,mi,cr,fa = args
sdt_obj = sdt_metrics.SDT(HI=hi,MI=mi,CR=cr,FA=fa)
dprime = sdt_obj.dprime()
criterion = ltqnorm(1.-sdt_obj.p('HI')) + dprime
# This represents the x-axis for the normal curves
# in practice it is much longer than we need it, but we
# want to be sure the end of the tail doesn't show with
# high dprime values
Z = np.linspace(-10,10,512)
# this is a normal distribution -10 < Z < 10
fxn = _normdist(Z)
# initialize the figure (16/9 aspect ratio)
pylab.figure(figsize=(8,4.5))
## pylab.figure(figsize=(7,3))
pylab.subplots_adjust(left=.08,right=.98)
# plot the noise distribution
pylab.plot(Z, fxn, 'r', lw=2., label = r'$f(x|n)$')#,alpha=.4)
pylab.fill(Z, fxn, 'r', lw=0., alpha=.25)
# plot the signal distribution
pylab.plot(Z + dprime, fxn, 'b--', lw=2., label = r'$f(x|s)$')#,alpha=.4)
pylab.fill(Z + dprime, fxn, 'b', lw=0., alpha=.25)
# annotate the peak of the signal distribution
pylab.text(dprime, .41, r'$%.3f$'%dprime, horizontalalignment='center')
# plot the criterion
pylab.axvline(criterion, color='g', ls=':', lw=2., label=r'$criterion$')
# annotate the criterion
arrow_props = dict(facecolor='k', shrink=0.05, width=.25, headwidth=4., frac=.2)
pylab.annotate(r'$%.3f$'%criterion, xy=(criterion, .44), xytext=(criterion-1.1, .46),
arrowprops=arrow_props, verticalalignment='bottom')
## pylab.text(-2.5,.1, r'$sdt\_metrics$', fontsize=60) # used for Sphinx doc logo
# format the y-axis
pylab.ylim([0.0, 0.5])
pylab.yticks([.0,.1,.2,.3,.4,.5])
# add counts if available
if len(args) != 2:
pylab.plot([-3.6,-2.0],[.4,.4],'k')
pylab.plot([-2.8,-2.8],[.35,.45],'k')
pylab.text(-2.9, .41, '$%i$'%hi, horizontalalignment='right', verticalalignment='bottom')
pylab.text(-2.9, .39, '$%i$'%fa, horizontalalignment='right', verticalalignment='top')
pylab.text(-2.7, .41, '$%i$'%mi, horizontalalignment='left', verticalalignment='bottom')
pylab.text(-2.7, .39, '$%i$'%cr, horizontalalignment='left', verticalalignment='top')
# format the x-axis
if 'xmax' in kwds:
pylab.xlim([-4., kwds['xmax']])
else:
pylab.xlim([-4., 4.6+dprime])
# show the legend
# by default it is located in the upper right corner
pylab.legend()
# save and close figure
pylab.savefig(fname,dpi=dpi)
pylab.close()
| 5,353,867 |
async def test__json_request(aresponses):
"""Test JSON response is handled correctly."""
aresponses.add(
"example.com",
"/api/test",
"GET",
aresponses.Response(
status=200,
headers={"Content-Type": "application/json"},
text='{"status": "ok"}',
),
)
async with aiohttp.ClientSession() as session:
p1monitor = P1Monitor("example.com", session=session)
await p1monitor.request("test")
await p1monitor.close()
| 5,353,868 |
def get_mapping(mapping_name):
"""
Reads in the given mapping and returns a dictionary of letters to keys. If the given mapping is a dictionary,
does nothing an returns the mapping
mpaping_name can be a path to different file formats
"""
# read in mapping
if type(mapping_name) == str:
if mapping_name.split(".")[-1] == "mst":
mapping = create_map_from_reformulation(mapping_name)
elif mapping_name.split(".")[-1] == "txt":
mapping = create_map_from_txt(mapping_name)
return mapping
else:
return mapping_name
| 5,353,869 |
def evaluate(model, reward_gen, n_steps=1000000, delta=1):
"""Evaulate the regrets and rewards of a given model based on a given reward
generator
Args:
model (TYPE): Description
n_steps (int, optional): Description
delta (int, optional): Number of steps for feedback delay
reward_gen (TYPE, optional): Description
Returns:
regrets (list): List of regrets for each round. Regret is the maximum
reward minus the selected action's reward for the round
rewards (list): List of rewards for actions taken
"""
regrets = []
rewards = []
last_rewards = []
last_changes = []
last_selected_actions = []
# initialize successs and failures to 0 for all items
successes = np.zeros(model.n_items)
failures = np.zeros(model.n_items)
for step in range(1, n_steps + 1):
reward_vector, item_changed = reward_gen.get_rewards()
# reinitialize the successes and failures if the item has changed
if item_changed:
successes = np.zeros(model.n_items)
failures = np.zeros(model.n_items)
selected_action = model.get_action(item_changed, successes, failures)
regret = (
np.max(reward_gen.reward_probs) - reward_gen.reward_probs[selected_action]
)
regrets.append(regret)
rewards.append(reward_vector[selected_action])
last_rewards.append(reward_vector[selected_action])
last_changes.append(item_changed)
last_selected_actions.append(selected_action)
# record success or failure of action at appropriate index in
#successes or failures
if reward_vector[selected_action] == 1:
successes[selected_action] += 1
else:
failures[selected_action] += 1
# Feedback if delta steps have passed
if step % delta == 0:
model.update(last_selected_actions, last_rewards, last_changes)
last_rewards = []
last_changes = []
last_selected_actions = []
return regrets, rewards
| 5,353,870 |
def get_photo_from_response(response: dict):
"""
parse json response and return an Photo
Keyword arguments:
response -- meetup api response in a dict
return -> get or create Photo
"""
photo, create = Photo.objects.get_or_create(meetup_id=response["id"])
# add optional fields
if "highres_link" in response:
photo.highres_link = response["highres_link"]
if "base_url" in response:
photo.base_url = response["base_url"]
if "photo_link" in response:
photo.photo_link = response["photo_link"]
if "thumb_link" in response:
photo.thumb_link = response["thumb_link"]
if "type" in response:
photo.photo_type = response["type"]
photo.save()
return photo
| 5,353,871 |
def composition_plot(adata: AnnData, by: str, condition: str, stacked: bool = True, normalize: bool = True,
condition_sort_by: str = None, cmap: Union[str, List[str], Tuple[str]] = None,
**kwds) -> hv.core.element.Element:
"""
Generate a composition plot, which shows the percentage of observations from every condition within each cluster (by).
Args:
adata: Annotated data matrix.
by: Key for accessing variables of adata.var_names or a field of adata.obs used to group the data.
condition: Key for accessing variables of adata.var_names or a field of adata.obs used to compute counts within a group.
stacked: Whether bars are stacked.
normalize: Normalize counts within each group to sum to one.
condition_sort_by: Sort condition within each group by max, mean, natsorted, or None.
cmap: Color map name (hv.plotting.list_cmaps()) or a list of hex colors. See http://holoviews.org/user_guide/Styling_Plots.html for more information.
"""
adata_raw = __get_raw(adata, False)
keys = [by, condition]
adata_df = __get_df(adata, adata_raw, keys)
for column in adata_df:
if not pd.api.types.is_categorical_dtype(adata_df[column]):
adata_df[column] = adata_df[column].astype(str).astype('category')
cmap = __get_category_cmap(adata_raw, adata_df, condition) if cmap is None else __fix_cmap(adata_df, condition,
cmap)
keywords = dict(stacked=stacked, group_label=condition)
keywords.update(kwds)
invert = keywords.get('invert', False)
if not invert and 'rot' not in keywords:
keywords['rot'] = 90
dummy_df = pd.get_dummies(adata_df[condition])
df = pd.concat([adata_df, dummy_df], axis=1)
df = df.groupby(by).agg(np.sum)
if normalize:
df = df.T.div(df.sum(axis=1)).T
if not (pd.api.types.is_categorical_dtype(df.index) and df.index.dtype.ordered):
df = df.loc[natsorted(df.index)]
secondary = dummy_df.columns.values
if condition_sort_by == 'max' or condition_sort_by == 'mean':
secondary_sort = df.values.max(axis=0) if condition_sort_by == 'max' else df.values.mean(axis=0)
index = np.flip(np.argsort(secondary_sort))
secondary = secondary[index]
elif condition_sort_by == 'natsorted':
secondary = natsorted(secondary)
secondary = list(secondary)
p = df.hvplot.bar(by, secondary, cmap=cmap, **keywords)
p.df = df
return p
| 5,353,872 |
def node_clone_for_pipeline(graph, orig_op, micro_batch_idx, device):
"""Clone a operation to 'device' from 'orig_op' for pipeline."""
micro_batch_prefix = common.get_micro_batch_prefix(micro_batch_idx)
# get node def
node_def = copy.deepcopy(orig_op.node_def)
node_def.name = micro_batch_prefix + node_def.name
frame_name = node_def.attr.get('frame_name')
if frame_name:
node_def.attr.get('frame_name').s = compat.as_bytes(micro_batch_prefix, constant.ENCODING) + frame_name.s
op_def = copy.deepcopy(orig_op.op_def)
output_types = orig_op.output_types[:]
input_types = orig_op.input_types[:]
graph.unready_inputs_cache[node_def.name] = dict()
graph.unready_control_inputs_cache[node_def.name] = dict()
# get inputs
inputs = []
for inp_idx, inp in enumerate(orig_op.inputs):
if graph.is_dataset_type(orig_op) or \
graph.is_dataset_related(inp.producer) or \
graph.is_vars_related(inp.producer) or \
graph.is_global_step_related(inp):
name = inp.name
else:
name = micro_batch_prefix + inp.name
if name in graph.tensors:
inputs.append(graph.get_tensor_by_name(name).primitive_obj)
else:
tensor = graph.get_tensor_by_name(inp.name)
if tensor.producer.get_control_flow_context() is not None:
graph.original_context_cache[tensor.producer.name] = tensor.producer.get_control_flow_context()
tensor.producer.set_control_flow_context(None)
inputs.append(tensor.primitive_obj)
graph.unready_inputs_cache[node_def.name][inp_idx] = name
# get control inputs
control_inputs = []
old_control_inputs = list(orig_op.control_inputs)
for c_inp in old_control_inputs:
c_inp = c_inp if isinstance(c_inp, ops.Operation) else c_inp.producer
c_inp = graph.get_operation_by_name(c_inp.name)
if graph.is_dataset_related(orig_op) or \
graph.is_dataset_related(c_inp) or \
graph.is_vars_related(c_inp) or \
graph.is_global_step_related(c_inp):
name = c_inp.name
else:
name = micro_batch_prefix + c_inp.name
if name in graph.operations:
control_inputs.append(graph.get_operation_by_name(name).primitive_obj)
else:
op = graph.get_operation_by_name(c_inp.name)
if op.get_control_flow_context() is not None:
graph.original_context_cache[op.name] = op.get_control_flow_context()
op.set_control_flow_context(None)
control_inputs.append(op.primitive_obj)
graph.unready_control_inputs_cache[node_def.name][op.name] = name
if not graph.unready_inputs_cache[node_def.name]:
del graph.unready_inputs_cache[node_def.name]
if not graph.unready_control_inputs_cache[node_def.name]:
del graph.unready_control_inputs_cache[node_def.name]
with ModelPhase(orig_op.phase):
graph.current_cloned_taskgraph = orig_op.taskgraph
new_op = ops.Operation(node_def,
ops.get_default_graph(),
inputs,
output_types,
control_inputs,
input_types,
None,
op_def=op_def)
new_op._set_device(device) # pylint: disable=protected-access
| 5,353,873 |
def threadbased():
"""threadbased-session
This is threadbased-session test
"""
port = get_free_port()
print('free port', port)
run_as_function(target())
| 5,353,874 |
def test_read_cwd_file(config_file, config, tmpdir):
"""
:GIVEN: Nothing.
:WHEN: Loading the config file from the cwd.
:THEN: Verify the correct file is loaded.
"""
with tmpdir.as_cwd():
assert sut.Config._read_cwd_file() == config
| 5,353,875 |
def region_of_province(province_in: str) -> str:
"""
Return the corresponding key in ITALY_MAP whose value contains province_in
:param province_in: str
:return: str
"""
region = None
for r in ITALY_MAP:
for p in ITALY_MAP[r]:
if province_in == p:
region = r
return region
| 5,353,876 |
def filter_stopwords(words:list)->iter:
"""
Filter the stop words
"""
words = filter(is_not_stopword, words)
return words
| 5,353,877 |
def numpy_jaccard(box_a, box_b):
"""计算两组矩形两两之间的iou
Args:
box_a: (tensor) bounding boxes, Shape: [A, 4].
box_b: (tensor) bounding boxes, Shape: [B, 4].
Return:
ious: (tensor) Shape: [A, B]
"""
A = box_a.shape[0]
B = box_b.shape[0]
box_a_x1y1 = np.reshape(box_a[:, 2:], (A, 1, 2))
box_a_x1y1 = np.tile(box_a_x1y1, (1, B, 1))
box_b_x1y1 = np.reshape(box_b[:, 2:], (1, B, 2))
box_b_x1y1 = np.tile(box_b_x1y1, (A, 1, 1))
box_a_x0y0 = np.reshape(box_a[:, :2], (A, 1, 2))
box_a_x0y0 = np.tile(box_a_x0y0, (1, B, 1))
box_b_x0y0 = np.reshape(box_b[:, :2], (1, B, 2))
box_b_x0y0 = np.tile(box_b_x0y0, (A, 1, 1))
max_xy = np.minimum(box_a_x1y1, box_b_x1y1)
min_xy = np.maximum(box_a_x0y0, box_b_x0y0)
inter = np.clip((max_xy - min_xy), 0.0, np.inf)
inter = inter[:, :, 0] * inter[:, :, 1]
area_a = ((box_a[:, 2]-box_a[:, 0]) * (box_a[:, 3]-box_a[:, 1]))
area_a = np.reshape(area_a, (A, 1))
area_a = np.tile(area_a, (1, B))
area_b = ((box_b[:, 2]-box_b[:, 0]) * (box_b[:, 3]-box_b[:, 1]))
area_b = np.reshape(area_b, (1, B))
area_b = np.tile(area_b, (A, 1))
union = area_a + area_b - inter
return inter / union
| 5,353,878 |
def test_get_rule(client_rule_factory, client_response_factory, registered_rule):
"""Check request data that client uses to get a rule.
1. Create a subclass of the abstract client.
2. Implement send request so that it checks the request parameters.
3. Invoke the get_rule method.
4. Check the rule, returned by the method call.
"""
rule_id = str(uuid.uuid4())
class _Client(AbstractClient):
def _send_request(self, url, method="GET", json=None):
assert url == "rule/{0}".format(rule_id), "Wrong url"
assert method == "GET", "Wrong method"
assert json is None, "Data has been specified"
response_json = {"rule_id": rule_id}
response_json.update(self._rule_factory.serialize_rule(rule=registered_rule))
return response_json
client = _Client(
configuration_url="/",
rule_factory=client_rule_factory,
response_factory=client_response_factory,
)
obtained_rule = client.get_rule(rule_id=rule_id)
assert obtained_rule.rule_id == rule_id, "Rule ID has not been set"
| 5,353,879 |
def summaryhsl(all_summaries, summary):
"""
Choose a color for the given system summary to distinguish it from other types of systems.
Returns hue, saturation, and luminance for the start of the range, and how much the hue can be randomly varied while staying distinguishable.
"""
lowest_att = min(att for att, ms in all_summaries)
highest_att = max(att for att, ms in all_summaries)
att_range = highest_att - lowest_att + 1
attractors, monotonic_species = summary
lowest_ms = min(ms for att, ms in all_summaries if att == attractors)
highest_ms = max(ms for att, ms in all_summaries if att == attractors)
ms_range = highest_ms - lowest_ms + 1
bin_width = 1 / (ms_range + 1) / att_range
hue = ((highest_att - attractors) / att_range) + (highest_ms - monotonic_species) * bin_width
variability_squeeze = (2 if att_range > 1 else 1) * (2 if ms_range > 1 else 1)
return hue, 1, colorsys.ONE_THIRD, bin_width / variability_squeeze
| 5,353,880 |
def _get_cached_values(instance, translated_model, language_code, use_fallback=False):
"""
Fetch an cached field.
"""
if not appsettings.PARLER_ENABLE_CACHING or not instance.pk or instance._state.adding:
return None
key = get_translation_cache_key(translated_model, instance.pk, language_code)
values = cache.get(key)
if not values:
return None
# Check for a stored fallback marker
if values.get('__FALLBACK__', False):
# Internal trick, already set the fallback marker, so no query will be performed.
instance._translations_cache[translated_model][language_code] = MISSING
# Allow to return the fallback language instead.
if use_fallback:
lang_dict = get_language_settings(language_code)
# iterate over list of fallback languages, which should be already
# in proper order
for fallback_lang in lang_dict['fallbacks']:
if fallback_lang != language_code:
return _get_cached_values(
instance, translated_model, fallback_lang,
use_fallback=False
)
return None
values['master'] = instance
values['language_code'] = language_code
return values
| 5,353,881 |
def configure(tm_env, app, container_dir):
"""Configures all plugins.
"""
for hook in plugin_manager.load_all(_PLUGINS_NS):
_LOGGER.info('Configuring plugin %r.', hook)
hook(tm_env).configure(app, container_dir)
| 5,353,882 |
def tensor_index_by_list(data, list_index):
"""Tensor getitem by list of int and bool"""
data_shape = F.shape(data)
indexes_types = hyper_map(F.typeof, list_index)
if const_utils.judge_indexes_types(indexes_types, mstype.int_type + (mstype.bool_,)):
sub_tuple_index = const_utils.transform_sequence_index(list_index, data_shape[0], const_utils.TENSOR_GETITEM)
if not sub_tuple_index:
data_rank = len(data_shape)
if data_rank == 1:
return const_utils.make_tensor([], data.dtype, ())
return const_utils.make_tensor([], data.dtype, data_shape[1:])
tensor_index = const_utils.make_tensor(sub_tuple_index, mstype.int64)
return F.gather(data, tensor_index, 0)
tuple_index_new = ()
for index in list_index:
tuple_index_new += (index,)
return tensor_index_by_tuple(data, tuple_index_new)
| 5,353,883 |
def sample_ingridient(user, name='Salt'):
"""Create and return a sample ingridient"""
return Ingridient.objects.create(user=user, name=name)
| 5,353,884 |
def remove_duplicates_from_list(params_list):
"""
Common function to remove duplicates from a list
Author: [email protected]
:param params_list:
:return:
"""
if params_list:
return list(dict.fromkeys(params_list))
return list()
| 5,353,885 |
def _exec_task(fn, task, d, quieterr):
"""Execute a BB 'task'
Execution of a task involves a bit more setup than executing a function,
running it with its own local metadata, and with some useful variables set.
"""
if not d.getVarFlag(task, 'task', False):
event.fire(TaskInvalid(task, d), d)
logger.error("No such task: %s" % task)
return 1
logger.debug(1, "Executing task %s", task)
localdata = _task_data(fn, task, d)
tempdir = localdata.getVar('T', True)
if not tempdir:
bb.fatal("T variable not set, unable to build")
# Change nice level if we're asked to
nice = localdata.getVar("BB_TASK_NICE_LEVEL", True)
if nice:
curnice = os.nice(0)
nice = int(nice) - curnice
newnice = os.nice(nice)
logger.debug(1, "Renice to %s " % newnice)
ionice = localdata.getVar("BB_TASK_IONICE_LEVEL", True)
if ionice:
try:
cls, prio = ionice.split(".", 1)
bb.utils.ioprio_set(os.getpid(), int(cls), int(prio))
except:
bb.warn("Invalid ionice level %s" % ionice)
bb.utils.mkdirhier(tempdir)
# Determine the logfile to generate
logfmt = localdata.getVar('BB_LOGFMT', True) or 'log.{task}.{pid}'
logbase = logfmt.format(task=task, pid=os.getpid())
# Document the order of the tasks...
logorder = os.path.join(tempdir, 'log.task_order')
try:
with open(logorder, 'a') as logorderfile:
logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase))
except OSError:
logger.exception("Opening log file '%s'", logorder)
pass
# Setup the courtesy link to the logfn
loglink = os.path.join(tempdir, 'log.{0}'.format(task))
logfn = os.path.join(tempdir, logbase)
if loglink:
bb.utils.remove(loglink)
try:
os.symlink(logbase, loglink)
except OSError:
pass
prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True)
postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True)
class ErrorCheckHandler(logging.Handler):
def __init__(self):
self.triggered = False
logging.Handler.__init__(self, logging.ERROR)
def emit(self, record):
if getattr(record, 'forcelog', False):
self.triggered = False
else:
self.triggered = True
# Handle logfiles
si = open('/dev/null', 'r')
try:
bb.utils.mkdirhier(os.path.dirname(logfn))
logfile = open(logfn, 'w')
except OSError:
logger.exception("Opening log file '%s'", logfn)
pass
# Dup the existing fds so we dont lose them
osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]
# Replace those fds with our own
os.dup2(si.fileno(), osi[1])
os.dup2(logfile.fileno(), oso[1])
os.dup2(logfile.fileno(), ose[1])
# Ensure Python logging goes to the logfile
handler = logging.StreamHandler(logfile)
handler.setFormatter(logformatter)
# Always enable full debug output into task logfiles
handler.setLevel(logging.DEBUG - 2)
bblogger.addHandler(handler)
errchk = ErrorCheckHandler()
bblogger.addHandler(errchk)
localdata.setVar('BB_LOGFILE', logfn)
localdata.setVar('BB_RUNTASK', task)
flags = localdata.getVarFlags(task)
event.fire(TaskStarted(task, logfn, flags, localdata), localdata)
try:
for func in (prefuncs or '').split():
exec_func(func, localdata)
exec_func(task, localdata)
for func in (postfuncs or '').split():
exec_func(func, localdata)
except FuncFailed as exc:
if quieterr:
event.fire(TaskFailedSilent(task, logfn, localdata), localdata)
else:
errprinted = errchk.triggered
logger.error(str(exc))
event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata)
return 1
finally:
sys.stdout.flush()
sys.stderr.flush()
bblogger.removeHandler(handler)
# Restore the backup fds
os.dup2(osi[0], osi[1])
os.dup2(oso[0], oso[1])
os.dup2(ose[0], ose[1])
# Close the backup fds
os.close(osi[0])
os.close(oso[0])
os.close(ose[0])
si.close()
logfile.close()
if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
logger.debug(2, "Zero size logfn %s, removing", logfn)
bb.utils.remove(logfn)
bb.utils.remove(loglink)
event.fire(TaskSucceeded(task, logfn, localdata), localdata)
if not localdata.getVarFlag(task, 'nostamp', False) and not localdata.getVarFlag(task, 'selfstamp', False):
make_stamp(task, localdata)
return 0
| 5,353,886 |
def mediaRecognitionApi():
"""
Retrieve the resource id, name, author
and time index of a sampled media.
"""
#TODO: Improve recognition
if 'file' not in request.files:
abort(400, "No file.")
file = request.files['file']
if file.filename == '':
abort(400, "No selected file")
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = pth.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
asynctask = recognizeMedia.delay(filepath)
return jsonify({"uuid": asynctask.task_id}), 202
abort(400, "Bad request")
| 5,353,887 |
def _scale_and_shift(
x: chex.Array,
params: Sequence[chex.Array],
has_scale: bool,
has_shift: bool,
) -> chex.Array:
"""Example of a scale and shift function."""
if has_scale and has_shift:
scale, shift = params
return x * scale + shift
elif has_scale:
assert len(params) == 1
return x * params[0]
elif has_shift:
assert len(params) == 1
return x + params[0]
else:
raise ValueError("You must have either `has_scale` or `has_shift` set "
"to True.")
| 5,353,888 |
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects
:param aLocation1: starting location
:param aLocation2: ending location
:return:
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
dlong_c = dlong*math.cos(math.radians(aLocation1.lat))
return math.sqrt((dlat * dlat) + (dlong_c * dlong_c)) * 1.113195e5
| 5,353,889 |
def merge_components(a,c,corr_img_all_r,U,V,normalize_factor,num_list,patch_size,merge_corr_thr=0.6,merge_overlap_thr=0.6,plot_en=False):
""" want to merge components whose correlation images are highly overlapped,
and update a and c after merge with region constrain
Parameters:
-----------
a: np.ndarray
matrix of spatial components (d x K)
c: np.ndarray
matrix of temporal components (T x K)
corr_img_all_r: np.ndarray
corr image
U, V: low rank decomposition of Y
normalize_factor: std of Y
num_list: indices of components
patch_size: dimensions for data
merge_corr_thr: scalar between 0 and 1
temporal correlation threshold for truncating corr image (corr(Y,c)) (default 0.6)
merge_overlap_thr: scalar between 0 and 1
overlap ratio threshold for two corr images (default 0.6)
Returns:
--------
a_pri: np.ndarray
matrix of merged spatial components (d x K')
c_pri: np.ndarray
matrix of merged temporal components (T x K')
corr_pri: np.ndarray
matrix of correlation images for the merged components (d x K')
flag: merge or not
"""
f = np.ones([c.shape[0],1]);
############ calculate overlap area ###########
a = csc_matrix(a);
a_corr = scipy.sparse.triu(a.T.dot(a),k=1);
cor = csc_matrix((corr_img_all_r>merge_corr_thr)*1);
temp = cor.sum(axis=0);
cor_corr = scipy.sparse.triu(cor.T.dot(cor),k=1);
cri = np.asarray((cor_corr/(temp.T)) > merge_overlap_thr)*np.asarray((cor_corr/temp) > merge_overlap_thr)*((a_corr>0).toarray());
a = a.toarray();
connect_comps = np.where(cri > 0);
if len(connect_comps[0]) > 0:
flag = 1;
a_pri = a.copy();
c_pri = c.copy();
G = nx.Graph();
G.add_edges_from(list(zip(connect_comps[0], connect_comps[1])))
comps=list(nx.connected_components(G))
merge_idx = np.unique(np.concatenate([connect_comps[0], connect_comps[1]],axis=0));
a_pri = np.delete(a_pri, merge_idx, axis=1);
c_pri = np.delete(c_pri, merge_idx, axis=1);
corr_pri = np.delete(corr_img_all_r, merge_idx, axis=1);
num_pri = np.delete(num_list,merge_idx);
for comp in comps:
comp=list(comp);
print("merge" + str(num_list[comp]+1));
a_zero = np.zeros([a.shape[0],1]);
a_temp = a[:,comp];
if plot_en:
spatial_comp_plot(a_temp, corr_img_all_r[:,comp].reshape(patch_size[0],patch_size[1],-1,order="F"),num_list[comp],ini=False);
mask_temp = np.where(a_temp.sum(axis=1,keepdims=True) > 0)[0];
a_temp = a_temp[mask_temp,:];
y_temp = np.matmul(a_temp, c[:,comp].T);
a_temp = a_temp.mean(axis=1,keepdims=True);
c_temp = c[:,comp].mean(axis=1,keepdims=True);
model = NMF(n_components=1, init='custom')
a_temp = model.fit_transform(y_temp, W=a_temp, H = (c_temp.T));
a_zero[mask_temp] = a_temp;
c_temp = model.components_.T;
corr_temp = vcorrcoef(U/normalize_factor, V.T, c_temp);
a_pri = np.hstack((a_pri,a_zero));
c_pri = np.hstack((c_pri,c_temp));
corr_pri = np.hstack((corr_pri,corr_temp));
num_pri = np.hstack((num_pri,num_list[comp[0]]));
return flag, a_pri, c_pri, corr_pri, num_pri
else:
flag = 0;
return flag
| 5,353,890 |
def RenderSubpassStartInputAttachmentsVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartInputAttachmentsVector(builder, numElems)
| 5,353,891 |
def GetCLInfo(cl_info_str):
"""Gets CL's repo_name and revision."""
return cl_info_str.split('/')
| 5,353,892 |
def acyclic_run(pipeline):
"""
@summary: 逆转反向边
@return:
"""
deformed_flows = {'{}.{}'.format(flow[PWE.source], flow[PWE.target]): flow_id
for flow_id, flow in pipeline[PWE.flows].items()}
reversed_flows = {}
while True:
no_circle = validate_graph_without_circle(pipeline)
if no_circle['result']:
break
source = no_circle['error_data'][-2]
target = no_circle['error_data'][-1]
circle_flow_key = '{}.{}'.format(source, target)
flow_id = deformed_flows[circle_flow_key]
reversed_flows[flow_id] = deepcopy(pipeline[PWE.flows][flow_id])
pipeline[PWE.flows][flow_id].update({
PWE.source: target,
PWE.target: source
})
source_node = pipeline['all_nodes'][source]
delete_flow_id_from_node_io(source_node, flow_id, PWE.outgoing)
add_flow_id_to_node_io(source_node, flow_id, PWE.incoming)
target_node = pipeline['all_nodes'][target]
delete_flow_id_from_node_io(target_node, flow_id, PWE.incoming)
add_flow_id_to_node_io(target_node, flow_id, PWE.outgoing)
return reversed_flows
| 5,353,893 |
def TextAreaFieldWidget(field, request): # pylint: disable=invalid-name
"""IFieldWidget factory for TextWidget."""
return FieldWidget(field, TextAreaWidget(request))
| 5,353,894 |
def _cgroup_limit(cpu, memory_size, pid):
"""Modify 'cgroup' files to set resource limits.
Each pod(worker) will have cgroup folders on the host cgroup filesystem,
like '/sys/fs/cgroup/<resource_type>/kubepods/<qos_class>/pod<pod_id>/',
to limit memory and cpu resources that can be used in pod.
For more information about cgroup, please see [1], about sharing PID
namespaces in kubernetes, please see also [2].
Return None if successful otherwise a Flask.Response object.
[1]https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-creating_cgroups
[2]https://github.com/kubernetes/kubernetes/pull/51634
"""
hostname = os.getenv('HOSTNAME')
pod_id = os.getenv('POD_UID')
qos_class = None
if os.getenv('QOS_CLASS') == 'BestEffort':
qos_class = 'besteffort'
elif os.getenv('QOS_CLASS') == 'Burstable':
qos_class = 'burstable'
elif os.getenv('QOS_CLASS') == 'Guaranteed':
qos_class = ''
if not pod_id or qos_class is None:
return make_response("Failed to get current worker information", 500)
memory_base_path = os.path.join('/qinling_cgroup', 'memory', 'kubepods',
qos_class, 'pod%s' % pod_id)
cpu_base_path = os.path.join('/qinling_cgroup', 'cpu', 'kubepods',
qos_class, 'pod%s' % pod_id)
memory_path = os.path.join(memory_base_path, hostname)
cpu_path = os.path.join(cpu_base_path, hostname)
if os.path.isdir(memory_base_path):
if not os.path.isdir(memory_path):
os.makedirs(memory_path)
if os.path.isdir(cpu_base_path):
if not os.path.isdir(cpu_path):
os.makedirs(cpu_path)
try:
# set cpu and memory resource limits
with open('%s/memory.limit_in_bytes' % memory_path, 'w') as f:
f.write('%d' % int(memory_size))
with open('%s/cpu.cfs_period_us' % cpu_path, 'w') as f:
f.write('%d' % PERIOD)
with open('%s/cpu.cfs_quota_us' % cpu_path, 'w') as f:
f.write('%d' % ((int(cpu)*PERIOD/1000)))
# add pid to 'tasks' files
with open('%s/tasks' % memory_path, 'w') as f:
f.write('%d' % pid)
with open('%s/tasks' % cpu_path, 'w') as f:
f.write('%d' % pid)
except Exception as e:
return make_response("Failed to modify cgroup files: %s"
% str(e), 500)
| 5,353,895 |
def s_wexler(T_K):
"""
Calculates slope of saturation vapor pressure curve over water at each temperature
based on Wexler 1976, with coefficients from Hardy 1998 (ITS-90).
Args:
T_K (np.ndarray (dimension<=2), float, list of floats) : Air or Dewpoint Temperatures [K]
Returns:
s : np.ndarray of slopes [Pa / deg C]
"""
powers = np.arange(-3, 4).reshape((1, 1, 7))
pow_coeffs = powers.copy() + 1
T_K = np.atleast_3d(T_K).astype(dtype=np.float64)
temps = np.repeat(T_K, 8, axis=-1)
temps[..., :-1] = pow_coeffs * c.gs[..., :-1] * np.power(temps[..., :-1], powers)
temps[..., -1] = -1. * c.gs[..., -1] * temps[..., -1] ** -1
s = np.squeeze(temps.sum(axis=-1)) * es_wexler(T_K)
return s
| 5,353,896 |
def get_additional_bases():
"""
Looks for additional view bases in settings.REST_EASY_VIEW_BASES.
:return:
"""
resolved_bases = []
from importlib import import_module
for base in getattr(settings, 'REST_EASY_VIEW_BASES', []):
mod, cls = base.rsplit('.', 1)
resolved_bases.append(getattr(import_module(mod), cls))
return resolved_bases
| 5,353,897 |
def load_mask_from_shp(shp_file: Path, metad: dict) -> np.ndarray:
"""
Load a mask containing geometries from a shapefile,
using a reference dataset
Parameters
----------
shp_file : str
shapefile containing a polygon
metad : dict
rasterio-style metadata dictionary
Returns
-------
mask_im : numpy.ndarray
mask image
Notes
-----
1) Pixels outside of the polygon are assigned
as nodata in the mask
2) Exception is raised if no Polygon geometry exists
in the shapefile
"""
sf = gpd.read_file(shp_file).to_crs(metad["crs"])
# extract non-empty polygons from the shapefile
geoms = [
g for g in sf.geometry if g.type.lower() == "polygon" and g.is_empty is False
]
nshapes = len(geoms)
if nshapes == 0:
raise Exception("input shapefile does not have any 'Polygon' geometry")
if nshapes > 1:
warnings.warn(
f"{nshapes} Polygons found in shapefile. It is recommended only to have one",
UserWarning,
stacklevel=1,
)
mask_im = rasterio.features.geometry_mask(
geoms,
out_shape=(metad["height"], metad["width"]),
transform=metad["transform"],
all_touched=False,
invert=True,
)
return mask_im
| 5,353,898 |
def est_corner_plot(estimation, settings=None, show=True, save=None):
"""Wrapper to corner plot of `corner <https://corner.readthedocs.io/en/latest/>`_ module;
visualisation of the parameter posterior distribution by all 2-dimensional and
1-dimensional marginals.
Parameters
----------
estimation : memocell.estimation.Estimation
A memocell estimation object.
settings : dict of dict, optional
Optional labels for parameters.
show : bool, optional
Plot is shown if `show=True`.
save : None or str, optional
Provide a path to save the plot.
Returns
-------
fig : matplotlib.figure.Figure
axes : list or array of matplotlib.axes
"""
# if not given, create some default settings
if settings==None:
settings = dict()
for theta_id in estimation.net.net_theta_symbolic:
param = estimation.net.net_rates_identifier[theta_id]
settings[param] = {'label': param}
# get plotting information from estimation instance
samples, labels = estimation._samples_corner_parameters(settings)
# use corner package for this plot
fig = corner.corner(samples, labels=labels)
# save/show figure
if save!=None:
plt.savefig(save, bbox_inches='tight')
if show:
plt.show(fig, block=False)
return fig, fig.axes
| 5,353,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.