content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def matches_filters(row, field_to_index, transformed_filters):
"""
Validate field name in transformed filter_expressions, return TRUE for rows matching all filters
Parameters
------------
row : str
row in `list` registry table (manager.show())
field_to_index : dict
key = column names, val = column index, in registry table (or manager.show())
transformed_filters : list
transformed/formatted fields for filtering rows
Returns
--------
bool
return TRUE for rows matching all filters
"""
field_to_index_lower = dict(
(k.lower(), v) for k, v in field_to_index.items()
) # to accept case-insensitive comparison
for tfilter in transformed_filters:
[field, op, value] = tfilter
if field not in field_to_index_lower:
raise DSGInvalidParameter(
f"field='{field}' is not a valid column name, valid fields: {list(field_to_index.keys())}"
)
obj_val = row[field_to_index_lower[field]].lower() # to accept case-insensitive comparison
if not matches_filter(val=obj_val, op=op, required_value=value):
return False
return True
| 5,349,700 |
def div(f, other):
"""Element-wise division applied to the `Functional` objects.
# Arguments
f: Functional object.
other: A python number or a tensor or a functional object.
# Returns
A Functional.
"""
validate_functional(f)
inputs = f.inputs.copy()
if is_functional(other):
inputs += to_list(other.inputs)
lmbd = [Lambda(lambda x: x[0]/x[1], name=graph_unique_name("div")) for X in f.outputs]
else:
_warn_for_ndarray(other)
lmbd = [Lambda(lambda x: x/other, name=graph_unique_name("div")) for X in f.outputs]
Functional = f.get_class()
res = Functional(
inputs = unique_tensors(inputs),
outputs = _apply_operation(lmbd, f, other),
layers = lmbd
)
return res
| 5,349,701 |
def select_latest_versions(files):
"""Select only the latest version of files."""
result = []
def same_file(file):
"""Return a versionless identifier for a file."""
# Dataset without the version number
dataset = file.dataset.rsplit('.', 1)[0]
return (dataset, file.name)
files = sorted(files, key=same_file)
for _, versions in itertools.groupby(files, key=same_file):
versions = sorted(versions, reverse=True)
latest_version = versions[0]
result.append(latest_version)
if len(versions) > 1:
logger.debug("Only using the latest version %s, not %s",
latest_version, versions[1:])
return result
| 5,349,702 |
def _gen_input(storyline, nsims, mode, site, chunks, current_c, nperc, simlen, swg_dir, fix_leap):
"""
:param storyline: loaded storyline
:param SWG_path: path to the directory with contining the files from the SWG
:param nsims: number of sims to run
:param mode: one of ['irrigated', 'dryland']
:param site: one of ['eyrewell', 'oxford']
:param chunks: the number of chunks
:param current_c: the current chunk (from range(chunks)
:param nperc: number of simulations that can be run per chunk
:return:
"""
# manage chunks
if chunks == 1:
num_to_pull = nsims
elif chunks > 1:
num_to_pull = nperc
if current_c + 1 == chunks:
# manage last chunk
num_to_pull = nsims - (current_c * nperc)
else:
raise ValueError('shouldnt get here')
params, doy_irr = get_params_doy_irr(mode, site)
matrix_weathers = []
days_harvests = []
# get restriction data
if mode == 'dryland':
rest_data = np.repeat([None], num_to_pull)
elif mode == 'irrigated':
rest_data = get_irr_data(num_to_pull, storyline, simlen)
else:
raise ValueError('weird arg for mode: {}'.format(mode))
# get weather data
weather_data = _get_weather_data(storyline=storyline, nsims=num_to_pull, simlen=simlen, swg_dir=swg_dir, site=site,
fix_leap=fix_leap)
# make all the other data
for rest, weather in zip(rest_data, weather_data):
if rest is None:
rest_temp = None
else:
rest_temp = pd.DataFrame(data=rest, index=weather.index, columns=['frest'])
matrix_weather = create_matrix_weather(mode, weather_data=weather, restriction_data=rest_temp,
rest_key='frest', fix_leap=fix_leap)
matrix_weathers.append(matrix_weather)
days_harvests.append(create_days_harvest(mode, matrix_weather, site, fix_leap=fix_leap))
return params, doy_irr, matrix_weathers, days_harvests
| 5,349,703 |
def guess_mime_mimedb (filename):
"""Guess MIME type from given filename.
@return: tuple (mime, encoding)
"""
mime, encoding = None, None
if mimedb is not None:
mime, encoding = mimedb.guess_type(filename, strict=False)
if mime not in ArchiveMimetypes and encoding in ArchiveCompressions:
# Files like 't.txt.gz' are recognized with encoding as format, and
# an unsupported mime-type like 'text/plain'. Fix this.
mime = Encoding2Mime[encoding]
encoding = None
return mime, encoding
| 5,349,704 |
def get_paths(uast_file, max_length, max_width, token_extractor, split_leaves=True):
"""
Creates a list of all the paths given the max_length and max_width restrictions.
:param uast_file: file containing a bblfsh UAST as string and binary-coded
:param max_length:
:param max_width:
:param token_extractor: function to transform a node into a single string token
:param split_leaves: get leaves token as a different node
:return: list(list(str)) list of paths (which are list of strings)
"""
print("Processing file: {}".format(uast_file))
uast = Node.FromString(open(uast_file, 'rb').read())
tree, leaves = extend_tree(uast)
paths = []
if len(leaves) > 1:
for i in range(len(leaves)):
for j in range(i + 1, min(i + max_width, len(leaves))):
u, v = leaves[i], leaves[j]
# TODO decide where to filter comments and decouple bblfsh
if not is_noop_line(u) and not is_noop_line(v):
ancestor = lca(u, v)
d = distance(u, v, ancestor)
if d <= max_length:
node_path = get_path(u, v, ancestor, split_leaves=split_leaves)
# convert nodes to its desired representation
paths.append([token_extractor(p) for p in node_path])
return paths
| 5,349,705 |
def get_members():
"""
Get a list of all members in FreeIPA
"""
members = []
ldap_conn = ldap.get_con()
res = ldap_conn.search_s(
"cn=users,cn=accounts,dc=csh,dc=rit,dc=edu",
pyldap.SCOPE_SUBTREE,
"(uid=*)",
["uid", "displayName"],
)
for member in res:
members.append(
{
"value": member[1]["uid"][0].decode("utf-8"),
"display": member[1]
.get("displayName", member[1]["uid"])[0]
.decode("utf-8"),
}
)
return members
| 5,349,706 |
def _unicode_decode_extracted_tb(extracted_tb):
"""Return a traceback with the string elements translated into Unicode."""
return [(_decode(file), line_number, _decode(function), _decode(text))
for file, line_number, function, text in extracted_tb]
| 5,349,707 |
def load_table(source, version):
"""Load synth table from file
"""
filepath = get_table_filepath(source, version=version)
return pd.read_table(filepath, delim_whitespace=True)
| 5,349,708 |
def single_spaces(string: str) -> str:
"""Replaces all instances of whitespace-like chars with single spaces
Args:
string (str): The string to modify
Returns:
str: The cleaned string
"""
return UGLY_SPACES_RE.sub(" ", string)
| 5,349,709 |
def make_zip(folder_path, output_filename):
"""将目录中除zip之外的文件打包成zip文件(包括子文件夹)
空文件夹不会被打包
example
----------------
make_zip('results','zips//招标信息结果_2017-05-09.zip')
"""
cwd = os.getcwd()
# 获取需要打包的文件列表
file_lists = []
for root, dirs, files in os.walk(folder_path):
for file in files:
file_1 = os.path.join(root, file).replace(folder_path + '/', '')
if 'zip' not in file_1:
file_lists.append(file_1)
# 将文件列表打包成zip
os.chdir(folder_path)
with zipfile.ZipFile(output_filename, 'w') as myzip:
for file in file_lists:
myzip.write(file)
# 将工作目录切换回原始
os.chdir(cwd)
| 5,349,710 |
def pandas_to_tsv(df, save_file_path, index=False, mode='w', header=True):
"""
Save pre-processed DataFrame as tsv file.
Params
------
df : pandas DataFrame
DataFrame to save as tsv
save_file_path : str
File save path. if path does not exist, it will be
created automatically.
index : bool
write the row names to output.
mode : str
file save mode; 'w' for write and 'a' for append to existing file.
header : bool
write the column names to output.
"""
assert isinstance(save_file_path, str) and\
len(save_file_path) > 0
assert isinstance(df, pd.DataFrame) and not df.empty,\
'df must be a non-empty pandas DataFrame'
assert isinstance(mode, str)
# make a new directory to save the file
try:
import os
os.makedirs(os.path.split(save_file_path)[0])
except OSError as e:
import errno
if e.errno != errno.EEXIST:
raise
# save the file
df.to_csv(
save_file_path, sep='\t', na_rep=r'\N',
index=index, mode=mode, header=header)
| 5,349,711 |
def snmp_count(
address,
oid,
port=161,
community="public",
version=SNMP_v2c,
timeout=10,
bulk=False,
filter=None,
max_repetitions=BULK_MAX_REPETITIONS,
tos=None,
ioloop=None,
udp_socket=None,
):
"""
Perform SNMP get request and returns Future to be used
inside @tornado.gen.coroutine
"""
def true(x, y):
return true
logger.debug("[%s] SNMP COUNT %s", address, oid)
if not filter:
filter = true
poid = oid + "."
result = 0
if udp_socket:
sock = udp_socket
prev_timeout = sock.get_timeout()
else:
sock = UDPSocket(ioloop=ioloop, tos=tos)
sock.settimeout(timeout)
while True:
# Get PDU
if bulk:
pdu = getbulk_pdu(community, oid, max_repetitions=max_repetitions, version=version)
else:
pdu = getnext_pdu(community, oid, version=version)
# Send request and wait for response
try:
yield sock.sendto(pdu, (address, port))
data, addr = yield sock.recvfrom(4096)
except socket.timeout:
raise SNMPError(code=TIMED_OUT, oid=oid)
except socket.gaierror as e:
logger.debug("[%s] Cannot resolve address: %s", address, e)
raise SNMPError(code=UNREACHABLE, oid=oid)
except socket.error as e:
logger.debug("[%s] Socket error: %s", address, e)
raise SNMPError(code=UNREACHABLE, oid=oid)
finally:
if udp_socket:
sock.settimeout(prev_timeout)
else:
sock.close()
# Parse response
try:
resp = parse_get_response(data)
except ValueError:
raise SNMPError(code=BER_ERROR, oid=oid)
if resp.error_status == NO_SUCH_NAME:
# NULL result
break
elif resp.error_status != NO_ERROR:
# Error
raise SNMPError(code=resp.error_status, oid=oid)
else:
# Success value
for oid, v in resp.varbinds:
if oid.startswith(poid):
# Next value
if filter(oid, v):
result += 1
else:
logger.debug("[%s] COUNT result: %s", address, result)
sock.close()
raise Return(result)
| 5,349,712 |
def create_group(api_key: str, board_id: str, group_name: str, *args, **kwargs):
"""Creates a new group in a specific board.
__________
Parameters
api_key : `str`
The monday.com v2 API user key.
board_id : `str`
The board's unique identifier.
group_name : `str`
The name of the new group.
args : `tuple`
The list of group return fields.
kwargs : `dict`
Optional arguments for querying assets.
_______
Returns
data : `dict`
A monday.com group in item form.
_____________
Return Fields
archived : `bool`
Is the group archived or not.
color : `str`
The group's color.
deleted : `bool`
Is the group deleted or not.
id : `str`
The group's unique identifier.
items : `list[moncli.entities.Item]`
The items in the group.
position : `str`
The group's position in the board.
title : `str`
The group's title.
"""
args = get_field_list(constants.DEFAULT_GROUP_QUERY_FIELDS, *args)
kwargs = get_method_arguments(constants.CREATE_GROUP_OPTIONAL_PARAMS, **kwargs)
kwargs['board_id'] = util.IntValue(board_id)
kwargs['group_name'] = util.StringValue(group_name)
return execute_mutation(api_key, constants.CREATE_GROUP, *args, **kwargs)
| 5,349,713 |
def test_delete_course(user_drf_client, courses):
"""Test the view that handles a request to delete a Course"""
course = courses[0]
resp = user_drf_client.delete(
reverse("courses_api-detail", kwargs={"pk": course.id})
)
assert resp.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
| 5,349,714 |
def _solve_qp_ik_vel(vel, jac, joint_pos, joint_lims=None, duration=None, margin=0.2):
"""
Solves the IK for a given pusher velocity using a QP solver, imposing joint limits.
If the solution is optimal, it is guaranteed that the resulting joint velocities will not
cause the joints to reach their limits (minus the margin) in the specified duration of time
:param vel: desired EE velocity (6 values)
:param jac: jacobian
:param joint_pos: current joint positions
:param joint_lims: matrix of joint limits; if None, limits are not imposed
:param duration: how long the specified velocity will be kept (in seconds); if None, 2.0 is used
:param margin: maximum absolute distance to be kept from the joint limits
:return: tuple with the solution (as a numpy array) and with a boolean indincating if the result is optimal or not
:type vel: np.ndarray
:type jac: np.ndarray
:type joint_pos: np.ndarray
:type joint_lims: np.ndarray
:type duration: float
:type margin: float
:rtype: (np.ndarray, bool)
"""
import cvxopt
x_len = len(joint_pos)
P = cvxopt.matrix(np.identity(x_len))
A = cvxopt.matrix(jac)
b = cvxopt.matrix(vel)
q = cvxopt.matrix(np.zeros(x_len))
if duration is None:
duration = 2.
if joint_lims is None:
G, h = None, None
else:
G = duration * np.identity(x_len)
h = np.zeros(x_len)
for i in range(x_len):
dist_up = abs(joint_lims[i, 1] - joint_pos[i])
dist_lo = abs(joint_lims[i, 0] - joint_pos[i])
if dist_up > dist_lo:
# we are closer to the lower limit
# => must bound negative angular velocity, i.e. G_ii < 0
h[i] = dist_lo
G[i, i] *= -1
else:
# we are closer to the upper limit
# => must bound positive angular velocity, i.e. G_ii > 0
h[i] = dist_up
h = cvxopt.matrix(h - margin)
G = cvxopt.matrix(G)
# sol = cvxopt.solvers.qp(P, q, A=A, b=b, G=G, h=h, options={'show_progress': False, 'kktreg': 1e-9}, kktsolver='ldl')
sol = cvxopt.solvers.qp(P, q, A=A, b=b, G=G, h=h, options={'show_progress': False, 'refinement': 5})
x = np.array(sol['x']).reshape(-1)
optimal = sol['status'] == 'optimal'
return x, optimal
| 5,349,715 |
def distance_straight(power, inches, freeze=True):
"""
:param power: range -100 to 100
:param inches: inches
:param freeze: True stops motors at end
"""
clear_motor_position_counter(c.LEFT_MOTOR)
clear_motor_position_counter(c.RIGHT_MOTOR)
blind(power, power)
distance = inches * 180.0
p = 0.25
i = 0.04
l_speed = power
r_speed = power
total_left = 0
total_right = 0
while (total_left + total_right) / 2 < distance:
clear_motor_position_counter(c.LEFT_MOTOR)
clear_motor_position_counter(c.RIGHT_MOTOR)
msleep(50)
l_position = abs(get_motor_position_counter(c.LEFT_MOTOR)) # abs to account for negative power
r_position = abs(get_motor_position_counter(c.RIGHT_MOTOR))
total_left += l_position
total_right += r_position
p_error = (r_position * c.F - l_position)
i_error = total_right * c.F - total_left
if power > 0:
l_speed += int(p * p_error + i * i_error)
r_speed -= int(p * p_error + i * i_error)
else:
l_speed -= int(p * p_error + i * i_error)
r_speed += int(p * p_error + i * i_error)
blind(l_speed, r_speed)
if freeze:
u.freeze_bot()
else:
pass
| 5,349,716 |
def channel_will_be_next(crontab: str):
"""Checks if the given notification channel will be activated on the
next channel, in an hour."""
return pycron.is_now(crontab, now + timedelta(hours=1))
| 5,349,717 |
def update_plot_force(force_est, rplt, app, ratio, box=None, muscle_names=None, plot_type="progress_bar"): # , box):
"""
update force plot
----------
force_est: np.ndarray
array of force estimate size
rplt, app, box:
values from init function
Returns
----------
"""
if plot_type == "curve":
# --- curve --- #
for force in range(force_est.shape[0]):
if box[force].isChecked() is True:
rplt[force].plot(force_est[force, :], clear=True, _callSync='off')
elif plot_type == "progress_bar":
# --- progress bar --- #
for force in range(force_est.shape[0]):
value = np.mean(force_est[force, -ratio:])
rplt[force].setValue(int(value))
names = muscle_names[force] if muscle_names else f"muscle_{force}"
rplt[force].setFormat(f"{names}: {int(value)} N")
elif plot_type == "bar":
# --- bar --- #
y = []
for force in range(force_est.shape[0]):
y.append(np.mean(force_est[force, -ratio:]))
rplt.setOpts(height=y)
app.processEvents()
| 5,349,718 |
def hospital_resident(residents, hospitals, optimal="resident"):
"""Solve an instance of HR using an adapted Gale-Shapley algorithm
:cite:`Rot84`. A unique, stable and optimal matching is found for the given
set of residents and hospitals. The optimality of the matching is found with
respect to one party and is subsequently the worst stable matching for the
other.
Parameters
----------
residents : list of Player
The residents in the game. Each resident must rank a non-empty subset
of the elements of ``hospitals``.
hospitals : list of Hospital
The hospitals in the game. Each hospital must rank all the residents
that have ranked them.
optimal : str, optional
Which party the matching should be optimised for. Must be one of
``"resident"`` and ``"hospital"``. Defaults to the former.
Returns
-------
matching : Matching
A dictionary-like object where the keys are the members of
``hospitals``, and the values are their matches ranked by preference.
"""
if optimal == "resident":
return resident_optimal(residents, hospitals)
if optimal == "hospital":
return hospital_optimal(hospitals)
| 5,349,719 |
def log_request(response):
"""Log request.
:param response:
:return:
"""
ip = request.headers.get('X-Forwarded-For', request.remote_addr)
host = request.host.split(':', 1)[0]
app.logger.info(f"method={request.method}, path={request.path}, "
f"status={response.status_code}, "
f"ip={ip}, host={host}, params={dict(request.args)},"
f"headers={request.headers}, "
f"body={request.data}")
return response
| 5,349,720 |
def reduce(path, n_procs, column, function):
""" Calculate an aggregate value from IMB output.
Args:
path: str, path to file
n_procs: int, number of processes
column: str, column name
function: callable to apply to specified `column` of table for `n_procs` in `path`
"""
tables = read_imb_out(path)
table = tables[n_procs] # separate lines here for more useful KeyError if missing:
col = table[column]
result = function(col)
return result
| 5,349,721 |
def output_file_path(status_id, phase):
"""
"""
BASE_DIR = Path(__file__).resolve().parent.parent
return f"%s/logs/stage/{status_id}-{phase}.txt" %str(BASE_DIR)
| 5,349,722 |
def clean(some_string, uppercase=False):
"""
helper to clean up an input string
"""
if uppercase:
return some_string.strip().upper()
else:
return some_string.strip().lower()
| 5,349,723 |
def to_title(value):
"""Converts a string into titlecase."""
t = re.sub("\s+", ".", value)
t = filter(LETTER_SET.__contains__, t)
t = re.sub("([a-z])'\W([A-Z])", lambda m: m.group(0).lower(), t.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
| 5,349,724 |
def parse_config_or_kwargs(config_file, **kwargs):
"""parse_config_or_kwargs
:param config_file: Config file that has parameters, yaml format
:param **kwargs: Other alternative parameters or overwrites for config
"""
with open(config_file) as con_read:
yaml_config = yaml.load(con_read, Loader=yaml.FullLoader)
# values from config file are all possible params
arguments = dict(yaml_config, **kwargs)
# In case some arguments were not passed, replace with default ones
for key, value in DEFAULT_ARGS.items():
arguments.setdefault(key, value)
return arguments
| 5,349,725 |
def sig_generacion(m):
"""Devuelve la matriz resultante de aplicar las reglas del juego a cada celda"""
FILAS = len(m)
COLUMNAS = len(m[0]) if len(m) else 0
new_m = [] # matriz resultado
for i in range(FILAS):
l = [] # Una lista para ir generando una fila
for j in range(COLUMNAS):
vec = num_vecinos(m, j, i)
if vec < 2 or vec > 3:
l.append(0) # muere
elif vec == 3:
l.append(1) # nace
else:
l.append(m[i][j]) # sobrevive si estaba viva
new_m.append(l)
return new_m
| 5,349,726 |
def calc_file_signature(data: str, password: str = None) -> str:
"""
Função que calcula o has da assinatura de um arquivo
@param data: string assinada
@param password: senha da assinatura
@return: hash da assinatura
"""
if (password):
digest = hmac.new(bytes(password), msg=bytes(data), digestmod=hashlib.sha256).digest()
res_hash = base64.b64encode(digest).decode()
else:
hash = hashlib.sha256()
hash.update(bytes(data))
res_hash = hash.hexdigest()
return res_hash
| 5,349,727 |
def processIRMovie(measurement, rotation, inputfolder, exportpath,
timeshift=0, doRotation=True, doRenaming=True):
"""Process TMC-files of IR camera into CSV files in movie mode.
Movie mode means, that one frame-sequence (movie) per datapoint was taken
instead of one frame per datapoint (timelapse). In the first step, the data
of a TMC file is exported via ThermoViewer into a CSV file. The second
step takes care of the optional rotation of the image and in the end,
the CSV files are renamed according to their date and time.
Parameters
----------
measurement : str
Relative path to measurement folder
rotation : int/str/float
Degrees about which the image should be rotated.
Valid values are 0, 90, 180, 270
inputfolder : str
Absolute path to the input folder
exportpath : str
Path of the export files
timeshift : int, optional
Timeshift in hours
doRotation : bool, optional
If True, the image is rotated about the given degree
doRenaming : bool, optional
If True, the CSV file is renamed
"""
date = measurement[:8]
foldername = os.path.basename(os.path.normpath(inputfolder))
prefix = "ir_export_" + date + "_" + foldername
# export all IR files from folder to csv files
# ROTATION: note that as long as the rotation feature is not
# implemented in ThermoViewer, it can't be used
ThermoViewer(mode="folder",
inputpath=inputfolder,
exportpath=exportpath,
rotation=0,
prefix=prefix,
frame_start=1,
frame_end=1,
exportformat="csv",
colorpalette="iron",
meta=False,
close=True)
# alternative image rotation
if doRotation:
for ofile in glob.glob(exportpath + prefix + "*.csv"):
if rotation == 180:
rotateCSVFile180(ofile)
print("Rotated file: ", ofile)
# include time of the file into filename
if doRenaming:
for ifile in glob.glob(inputfolder+"/*.TMC"):
timestamp = int(os.path.getmtime(ifile))
shifted_timestamp = (datetime.fromtimestamp(timestamp) +
datetime.timedelta(hours=timeshift))
time = shifted_timestamp.strftime('%H-%M-%S')
filenumber = os.path.basename(os.path.normpath(ifile))[2:-7]
ofilename = prefix + "_" + filenumber
ofile = exportpath + ofilename + "_0001.csv"
ofile_new = exportpath + ofilename + "_" + time + ".csv"
print("Renamed file: ", ofile)
# print("new: ", ofile_new)
os.rename(ofile, ofile_new)
| 5,349,728 |
def get_angle_from_coordinate(lat1, long1, lat2, long2):
"""https://stackoverflow.com/questions/3932502/calculate-angle-between-two-latitude-longitude-points"""
dLon = (long2 - long1)
y = np.sin(dLon) * np.cos(lat2)
x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(dLon)
brng = np.arctan2(y, x)
brng = np.degrees(brng)
brng = (brng + 360) % 360
brng = 360 - brng
return brng
| 5,349,729 |
def get_predictions():
"""Return the list of predications as a json object"""
results = []
conn = None
columns = ("pid", "name", "location", "latitude", "longitude", "type", "modtime")
try:
conn = psycopg2.connect(db_conn)
# create a cursor
cur = conn.cursor()
cur.execute(
"SELECT pid, name, location, latitude, longitude, type, modtime FROM predictions"
)
for row in cur.fetchall():
results.append(dict(zip(columns, row)))
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print("Database connection closed.")
json_str = json.dumps(results, indent=2, sort_keys=True, default=json_serial)
return Response(json_str, mimetype="application/json")
| 5,349,730 |
def parse_packageset(packageset):
"""
Get "input" or "output" packages and their repositories from each PES event.
:return: set of Package tuples
"""
return {parse_package(p) for p in packageset.get('package', packageset.get('packages', []))}
| 5,349,731 |
def download_nyiso_csv(month, data_type, zone = None):
"""Downloads a NYISO csv dataset for a specific data type, month, and zone.
Args:
month: string denoting the first day of the month to be downloaded in
yyyymmdd format
data_type: string denoting the type of NYISO data to retrieve,
examples include "damlbmp" which stands for "day ahead
market location based marginal price" or "outSched" for
"outage schedule"
zone: string denoting the NYISO geographic zone of the data to be
requested. This is required if data_type == "damlbmp"
Returns:
df: pandas dataframe of the NYISO csv file for the entire month requested
"""
# Build the necessary url to access the NYISO data
url = build_nyiso_url(month, data_type, zone)
# Download the zip folder to a temporary file location,
# then open the zip folder into the object zf
zip_folder_path, headers = urllib.request.urlretrieve(url)
zf = zipfile.ZipFile(zip_folder_path)
#TODO: increase efficiency by only reading the files from NYISO that contain the desired days
# For each file contained in zf, read the csv and concatenate it with
# the other csvs for this month to create a month-long csv
df = pd.DataFrame()
for file in zf.filelist:
temp_df = pd.read_csv(zf.open(file.filename))
df = pd.concat([df,temp_df])
return df
| 5,349,732 |
def pretty_print_large_number(number):
"""Given a large number, it returns a string of the sort: '10.5 Thousand' or '12.3 Billion'. """
s = str(number).ljust(12)
if number > 0 and number < 1e3:
pass
elif number >= 1e3 and number < 1e6:
s = s + " (%3.1f Thousand)" % (number * 1.0 / 1e3)
elif number >= 1e6 and number < 1e9:
s = s + " (%3.1f Million)" % (number * 1.0 / 1e6)
elif number >= 1e9 and number < 1e12:
s = s + " (%3.1f Billion)" % (number * 1.0 / 1e9)
elif number >= 1e12 and number < 1e15:
s = s + " (%3.1f Trillion)" % (number * 1.0 / 1e12)
return s
| 5,349,733 |
def find_aligning_transformation(skeleton, euler_frames_a, euler_frames_b):
"""
performs alignment of the point clouds based on the poses at the end of
euler_frames_a and the start of euler_frames_b
Returns the rotation around y axis in radians, x offset and z offset
"""
point_cloud_a = convert_euler_frame_to_cartesian_frame(skeleton, euler_frames_a[-1])
point_cloud_b = convert_euler_frame_to_cartesian_frame(skeleton, euler_frames_b[0])
weights = skeleton.get_joint_weights()
theta, offset_x, offset_z = _align_point_clouds_2D(point_cloud_a, point_cloud_b, weights)
return theta, offset_x, offset_z
| 5,349,734 |
def find_all_visit(tx):
"""
Method that queries the database to find all VISIT relationships
:param tx: session
:return: nodes of Person , Location
"""
query = (
"""
MATCH (p:Person)-[r:VISIT]->(l:Location)
RETURN p , ID(p) , r , r.start_hour , r.end_hour , r.date , l , ID(l)
"""
)
result = tx.run(query).data()
return result
| 5,349,735 |
def read_vectors(filename):
"""Reads measurement vectors from a space or comma delimited file.
:param filename: path of the file
:type filename: str
:return: array of vectors
:rtype: numpy.ndarray
:raises: ValueError
"""
vectors = []
data = read_csv(filename)
expected_size = len(data[0])
if expected_size % 3 != 0:
raise ValueError('Column size of vector data must be a multiple of 3')
for row in data:
if len(row) == expected_size:
vectors.append(row)
else:
raise ValueError('Inconsistent column size of vector data')
result = np.array(vectors, np.float32)
if not np.isfinite(result).all():
raise ValueError('Non-finite value present in vector data')
return result
| 5,349,736 |
def get_scoring_algorithm():
""" Base scoring algorithm for index and search """
return scoring.BM25F()
| 5,349,737 |
def _make_augmentation_pipeline(augmentation_list):
"""Buids an sklearn pipeline of augmentations from a tuple of strings.
Parameters
----------
augmentation_list: list of strings, A list of strings that determine the
augmentations to apply, and in which order to apply them (the first
string will be applied first). Possible augmentation strings are
['leadlag', 'ir', 'addtime', 'cumsum', 'basepoint']
Returns
-------
sklearn.Pipeline
The transforms, in order, as an sklearn pipeline.
Examples
--------
augementations = ('leadlag', 'ir', 'addtime')
_make_augmentation_pipeline(augmentations)
# Will return
Pipeline([
('leadlag', LeadLag()),
('ir', InvisibilityReset()),
('addtime', AddTime())
])
"""
# Dictionary of augmentations
AUGMENTATIONS = {
"leadlag": _LeadLag(),
"ir": _InvisibilityReset(),
"addtime": _AddTime(),
"cumsum": _CumulativeSum(),
"basepoint": _BasePoint(),
}
# Assertions, check we have a tuple/list
if augmentation_list is not None:
if isinstance(augmentation_list, str):
augmentation_list = (augmentation_list,)
assert all(
[x in list(AUGMENTATIONS.keys()) for x in augmentation_list]
), "augmentation_list must only contain string elements from {}. Given: {}.".format(
list(AUGMENTATIONS.keys()), augmentation_list
)
# Setup pipeline
if augmentation_list is not None:
pipeline = Pipeline(
[(tfm_str, AUGMENTATIONS[tfm_str]) for tfm_str in augmentation_list]
)
else:
pipeline = None
return pipeline
| 5,349,738 |
def ctg_path(event_name,sc_reform,path_cache,var_map,model,prev_events):
"""
Recursively computes the controllable and contigent events that influence
the schedule of a given event.
"""
if event_name in path_cache:#If solution has been already computed, use it
return path_cache[event_name]
else:
if event_name in sc_reform: #End point of uncontrollable duration
if event_name in prev_events:
raise RuntimeError('Contigent duration loop detected!')
else:
prev_events.add(event_name)
path_ref = ctg_path(sc_reform[event_name]['ref'],sc_reform,path_cache,var_map,model,prev_events)
path = [event_name]+path_ref
else: #Controllable event
if not event_name in var_map:#1-to-1 mapping between events and variables
var_map[event_name]=model.addVar(vtype=GRB.CONTINUOUS,lb=0.0)
model.update()
path = [event_name]
path_cache[event_name]=path #Caches solution for future use
return path
| 5,349,739 |
def _setup_mock_socket_file(mock_socket_create_conn, resp):
"""Sets up a mock socket file from the mock connection.
Args:
mock_socket_create_conn: The mock method for creating a socket connection.
resp: iterable, the side effect of the `readline` function of the mock
socket file.
Returns:
The mock socket file that will be injected into the code.
"""
fake_file = mock.Mock()
fake_file.readline.side_effect = resp
fake_conn = mock.Mock()
fake_conn.makefile.return_value = fake_file
mock_socket_create_conn.return_value = fake_conn
return fake_file
| 5,349,740 |
def usage_info():
"""
usage info
"""
print("Input params is illegal...")
print("try it again:\n python matcaffe2pycaffe.py -h")
| 5,349,741 |
def get_defense_type(action: int, game_config) -> int:
"""
Utility method for getting the defense type of action-id
:param action: action-id
:param game_config: game configuration
:return: action type
"""
defense_type = action % (game_config.num_attack_types+1) # +1 for detection
return defense_type
| 5,349,742 |
def color_box(
colors, border="#000000ff", border2=None, height=32, width=32,
border_size=1, check_size=4, max_colors=5, alpha=False, border_map=0xF
):
"""Color box."""
return colorbox.color_box(
colors, border, border2, height, width,
border_size, check_size, max_colors, alpha, border_map
)
| 5,349,743 |
def get_performance_of_lstm_classifier(X, y, n_epochs, verbose=1, final_score=False):
"""
Reshapes feature matrix X, applies LSTM and returns the performance of the neural network
:param X: List of non-reshaped/original feature matrices (one per logfile)
:param y: labels
:param n_epochs: Number of epochs the model should be trained
:param verbose: verbose mode of keras_model.fit
:param final_score: If final score should be printed, then don't use a validation set
:return rocs, recalls, specificities, presicions, f1s
"""
X_list, y_list = _get_splitted_up_feature_matrix_and_labels(X, y)
globals()["_maxlen"] = max(len(fm) for fm in X_list)
if final_score:
X_train_list, y_train_list, X_test_list, y_test_list, X_val, y_val = \
_split_into_train_test_val_data(X_list, y_list, size_test_set=3, size_val_set=0)
X_lstm, y_lstm = _get_reshaped_matrices(X_train_list, y_train_list)
model = _generate_lstm_classifier((X_lstm.shape[1], X_lstm.shape[2]))
trained_model = _fit_lstm(model, X_lstm, y_lstm, n_epochs, verbose)
else:
X_train_list, y_train_list, X_test_list, y_test_list, X_val, y_val = \
_split_into_train_test_val_data(X_list, y_list, size_test_set=3, size_val_set=2)
X_lstm, y_lstm = _get_reshaped_matrices(X_train_list, y_train_list)
X_val, y_val = _get_reshaped_matrices(X_val, y_val)
model = _generate_lstm_classifier((X_lstm.shape[1], X_lstm.shape[2]))
trained_model = _fit_lstm(model, X_lstm, y_lstm, n_epochs, verbose, val_set=(X_val, y_val))
print('Performance training set: ')
_calculate_performance(X_lstm, y_lstm, trained_model)
print('Performance test set: ')
rocs, recalls, specificities, presicions, f1s = _calculate_performance(X_test_list, y_test_list, trained_model)
return rocs, recalls, specificities, presicions, f1s
| 5,349,744 |
def worker(data):
"""Thread function."""
width, column = data
queen = Queen(width)
queen.run(column)
return queen.solutions
| 5,349,745 |
def eitem(self, key, value):
"""Translate included eitems."""
_eitem = self.get("_eitem", {})
urls = []
for v in force_list(value):
urls.append(
{
"description": "E-book by EbookCentral",
"value": clean_val("u", v, str),
}
)
_eitem.update({"urls": urls})
return _eitem
| 5,349,746 |
def annotate_group(groups, ax=None, label=None, labeloffset=30):
"""Annotates the categories with their parent group and add x-axis label"""
def annotate(ax, name, left, right, y, pad):
"""Draw the group annotation"""
arrow = ax.annotate(name, xy=(left, y), xycoords="data",
xytext=(right, y - pad), textcoords="data",
annotation_clip=False, verticalalignment="top",
horizontalalignment="center", linespacing=2.0,
arrowprops={'arrowstyle': "-", 'shrinkA': 0, 'shrinkB': 0,
'connectionstyle': "angle,angleB=90,angleA=0,rad=5"}
)
return arrow
if ax is None:
ax = plt.gca()
level = 0
for level in range(len(groups)):
grp = groups[level]
for name, coord in list(grp.items()):
ymin = ax.get_ylim()[0] - np.ptp(ax.get_ylim()) * 0.12 - np.ptp(ax.get_ylim()) * 0.05 * (level)
ypad = 0.01 * np.ptp(ax.get_ylim())
xcenter = np.mean(coord)
annotate(ax, name, coord[0], xcenter, ymin, ypad)
annotate(ax, name, coord[1], xcenter, ymin, ypad)
if label is not None:
# Define xlabel and position it according to the number of group levels
ax.annotate(label,
xy=(0.5, 0), xycoords="axes fraction",
xytext=(0, -labeloffset - (level + 1) * 15), textcoords="offset points",
verticalalignment="top", horizontalalignment="center")
return
| 5,349,747 |
def background_schwarzfischer(fluor_chan, bin_chan, div_horiz=7, div_vert=5, mem_lim=None, memmap_dir=None):
"""Perform background correction according to Schwarzfischer et al.
Arguments:
fluor_chan -- (frames x height x width) numpy array; the fluorescence channel to be corrected
bin_chan -- boolean numpy array of same shape as `fluor_chan`; segmentation map (background=False, cell=True)
div_horiz -- int; number of (non-overlapping) tiles in horizontal direction
div_vert -- int; number of (non-overlapping) tiles in vertical direction
mem_lim -- max number of bytes for temporary data before switching to memmap;
if in (0,1], max percentage of free memory to be used;
if non-positive, always use memory; if None, decide automatically
memmap_dir -- str; directory for creating memmap
Returns:
Background-corrected fluorescence channel as numpy array (dtype single) of same shape as `fluor_chan`
"""
n_frames, height, width = fluor_chan.shape
# Allocate arrays
if np.can_cast(fluor_chan, np.float16):
dtype_interp = np.float16
elif np.can_cast(fluor_chan, np.float32):
dtype_interp = np.float32
else:
dtype_interp = np.float64
dtype_interp = np.dtype(dtype_interp)
bg_mean = np.empty((n_frames, 1, 1), dtype=dtype_interp)
# Create large arrays in memory or as memmap
if mem_lim is None or mem_lim > 0:
bg_interp, arr_temp, iter_temp = _get_arr(fluor_chan.shape, dtype_interp, mem_lim, memmap_dir)
else:
bg_interp, arr_temp, iter_temp = np.empty(shape=fluor_chan.shape, dtype=dtype_interp)
# Construct tiles for background interpolation
# Each pair of neighboring tiles is overlapped by a third tile, resulting in a total tile number
# of `2 * div_i - 1` tiles for each direction `i` in {`horiz`, `vert`}.
# Due to integer rounding, the sizes may slightly vary between tiles.
tiles_vert = _make_tiles(height, div_vert)
tiles_horiz = _make_tiles(width, div_horiz)
supp = np.empty((tiles_horiz.size, tiles_vert.size))
# Interpolate background as cubic spline with each tile’s median as support point at the tile center
for t in range(n_frames):
print(f"Interpolating background in frame {t:3d} …")
masked_frame = ma.masked_array(fluor_chan[t, ...], mask=bin_chan[t, ...])
for iy, (y, sy) in enumerate(tiles_vert):
for ix, (x, sx) in enumerate(tiles_horiz):
supp[ix, iy] = ma.median(masked_frame[sy, sx])
bg_spline = scint.RectBivariateSpline(x=tiles_horiz['center'], y=tiles_vert['center'], z=supp)
patch = bg_spline(x=range(width), y=range(height)).T
bg_interp[t, ...] = patch
bg_mean[t, ...] = patch.mean()
# Correct for background using Schwarzfischer’s formula:
# corrected_image = (raw_image - interpolated_background) / gain
# wherein, in opposite to Schwarzfischer, the gain is approximated as
# median(interpolated_background / mean_background)
# This “simple” calculation may consume more memory than available.
# Therefore, a less readable but more memory-efficient command flow is used.
for st, sl in iter_temp:
np.divide(bg_interp[:, sl, :], bg_mean, out=arr_temp[:, :st, :])
np.subtract(fluor_chan[:, sl, :], bg_interp[:, sl, :], out=bg_interp[:, sl, :])
np.divide(bg_interp[:, sl, :], np.median(arr_temp[:, :st, :], axis=0, keepdims=True), out=bg_interp[:, sl, :])
# `bg_interp` now holds the corrected image
return bg_interp
| 5,349,748 |
def _get_eula_date(extract_path: str) -> Optional[str]:
"""Get any EULA accept date in the install script, if any.
:param extract_path: The path to the extracted archive.
:return: The EULA date, if any.
"""
install_script = os.path.join(extract_path, "houdini.install")
if not os.path.exists(install_script):
return None
with open(install_script) as handle:
for line in handle:
if line.startswith("LICENSE_DATE"):
return line.split("=")[1].strip()
return None
| 5,349,749 |
def get_output_data_path(extension, suffix=None):
"""Return full path for data file with extension, generated by a test script"""
name = get_default_test_name(suffix)
return osp.join(TST_PATH[0], f"{name}.{extension}")
| 5,349,750 |
def _plot(self, **kwargs) -> tp.BaseFigure: # pragma: no cover
"""Plot `close` and overlay it with the heatmap of `labels`."""
if self.wrapper.ndim > 1:
raise TypeError("Select a column first. Use indexing.")
return self.close.rename('close').vbt.overlay_with_heatmap(self.labels.rename('labels'), **kwargs)
| 5,349,751 |
def _set_global_vars(metadata):
"""Identify files used multiple times in metadata and replace with global variables
"""
fnames = collections.defaultdict(list)
for sample in metadata.keys():
for k, v in metadata[sample].items():
print k, v
if os.path.isfile(v):
v = _expand_file(v)
metadata[sample][k] = v
fnames[v].append(k)
loc_counts = collections.defaultdict(int)
global_vars = {}
global_var_sub = {}
for fname, locs in fnames.items():
if len(locs) > 1:
loc_counts[locs[0]] += 1
name = "%s%s" % (locs[0], loc_counts[locs[0]])
global_var_sub[fname] = name
global_vars[name] = fname
for sample in metadata.keys():
for k, v in metadata[sample].items():
if v in global_var_sub:
metadata[sample][k] = global_var_sub[v]
return metadata, global_vars
| 5,349,752 |
def user_get(context, id):
"""Get user by id."""
return IMPL.user_get(context, id)
| 5,349,753 |
def animation_template(world):
"""Shows how to animate a robot."""
#first, build a trajectory with 10 random configurations
robot = world.robot(0)
times = list(range(10))
milestones = []
for t in times:
robot.randomizeConfig()
milestones.append(robot.getConfig())
traj = trajectory.RobotTrajectory(robot,times,milestones)
vis.add("world",world)
robotPath = ("world",world.robot(0).getName()) #compound item reference: refers to robot 0 in the world
#we're also going to visualize the end effector trajectory
#eetraj = traj.getLinkTrajectory(robot.numLinks()-1,0.05)
#vis.add("end effector trajectory",eetraj)
#uncomment this to automatically visualize the end effector trajectory
vis.add("robot trajectory",traj)
vis.setAttribute("robot trajectory","endeffectors",[13,20])
vis.setWindowTitle("Animation test")
MANUAL_ANIMATION = False
if not MANUAL_ANIMATION:
#automatic animation, just call vis.animate
vis.animate(robotPath,traj)
if not MULTITHREADED:
#need to set up references to function-local variables manually, and the easiest way is to use a default argument
def callback(robot=robot):
if MANUAL_ANIMATION:
#with manual animation, you just set the robot's configuration based on the current time.
t = vis.animationTime()
q = traj.eval(t,endBehavior='loop')
robot.setConfig(q)
pass
vis.loop(callback=callback,setup=vis.show)
else:
vis.show()
while vis.shown():
vis.lock()
if MANUAL_ANIMATION:
#with manual animation, you just set the robot's configuration based on the current time.
t = vis.animationTime()
q = traj.eval(t,endBehavior='loop')
robot.setConfig(q)
vis.unlock()
time.sleep(0.01)
#quit the visualization thread nicely
vis.kill()
| 5,349,754 |
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array('2000-01-01', dtype='datetime64[D]')
"""
while isinstance(x, (list, tuple)):
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x
| 5,349,755 |
def run_bincapture(args: List[str]) -> bytes:
"""run is like "subprocess.run(args, capture_out=True, text=False)",
but with helpful settings and obeys "with capture_output(out)".
"""
if _capturing:
try:
return subprocess.run(args, check=True, capture_output=True).stdout
except subprocess.CalledProcessError as err:
raise Exception(f"{err.stderr.decode('UTF-8')}{err}") from err
else:
return subprocess.run(args, check=True, stdout=subprocess.PIPE).stdout
| 5,349,756 |
def _EnumValFromText(fdesc, enum_text_val, log):
"""Convert text version of enum to integer value.
Args:
fdesc: field descriptor containing the text -> int mapping.
enum_text_val: text to convert.
log: logger obj
Returns:
integer value of enum text.
"""
log.debug("converting enum val:" + enum_text_val)
log.debug("possible enum vals:" + str(fdesc.enum_type.values_by_name.keys()))
enum_val = fdesc.enum_type.values_by_name[enum_text_val.upper()].number
log.debug("done enum vals")
return enum_val
| 5,349,757 |
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
# print('correct shape:', correct.shape)
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
if len(res) == 1:
return res[0]
else:
return (res[0], res[1], correct[0], pred[0])
| 5,349,758 |
def yyyydoy2jd(year,doy,hh=0,mm=0,ss=0.0):
"""
yyyydoy2jd Take a year, day-of-year, etc and convert it into a julian day
Usage: jd = yyyydoy2jd(year,doy,hh,mm,ss)
Input: year - 4 digit integer
doy - 3 digit, or less integer, (1 <= doy <= 366)
hh - 2 digit, or less int, (0 <= hh < 24) (not required)
mm - 2 digit, or less int,(0 <= ss < 60) (not required)
ss - float (not required)
Output: 'jd' (float)
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
#
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),01,01,int(hh),int(mm),int(sec),int(ms))
dto = dto + dt.timedelta(days=(int(doy) - 1))
mn = dto.month
dy = dto.day
jd = cal2jd(int(year),int(mn),int(dy))
jd = jd + float(hh)/24. + float(mm)/60./24. + float(sec)/3600./24.
return jd - 2400000.5
| 5,349,759 |
def lon2index(lon, coords, corr=True):
"""convert longitude to index for OpenDAP request"""
if corr:
if lon < 0:
lon += 360
lons = coords.lon.values
return np.argmin(np.abs(lons - lon))
| 5,349,760 |
def get_dynamic_call_address(ea):
"""Find all dynamic calls e.g call eax"""
dism_addr_list = list(FuncItems(ea))
return [addr for addr in dism_addr_list if print_insn_mnem(addr) == 'call' and get_operand_type(addr, 0)==1]
| 5,349,761 |
def update_pfc_context_to_original(operation: DataOperation) -> None:
"""
Revert the change to pyfileconf currently running context
made by :func:`update_pfc_context_to_pipeline_section_path`
:param operation: The operation which was just executed
:return: None
"""
context.stack.pop_frame()
| 5,349,762 |
def iwbo_nats(model, x, k, kbs=None):
"""Compute the IWBO in nats."""
if kbs: return - iwbo_batched(model, x, k, kbs).mean()
else: return - iwbo(model, x, k).mean()
| 5,349,763 |
def ko_json(queryset, field_names=None, name=None, safe=False):
"""
Given a QuerySet, return just the serialized representation
based on the knockout_fields. Useful for middleware/APIs.
Convenience method around ko_data.
"""
return ko_data(queryset, field_names, name, safe, return_json=True)
| 5,349,764 |
def map(video_features_path, audio_hypothesis, file_uri, ier=False):
"""Maps outputs of pyannote.audio and pyannote.video models
Parameters:
-----------
video_features_path: str
Path to the video features (.npy) file as defined in pyannote.video
audio_hypothesis: Annotation
hypothesis made by the audio model
file_uri: str
uri of the file you're interested in (used to filter out audio_hypothesis)
ier: bool
If True, the mapping will be done using `optimal_mapping_ier`
which may map the same label to several clusters in order to minimize IER
If False (default), pyannote.metrics `optimal_mapping` will be used.
"""
clustering = FaceClustering()
#TODO : move the preprocess (i.e. npy to pyannote) to some other place ?
face_id, _ = clustering.model.preprocess(video_features_path,CLUSTERING_THRESHOLD)
if ier:
optimal_mapping=optimal_mapping_ier(face_id, audio_hypothesis)
else:
der=DiarizationErrorRate()
optimal_mapping=der.optimal_mapping(face_id, audio_hypothesis)
mapped_hypothesis=audio_hypothesis.rename_labels(mapping=optimal_mapping)
return mapped_hypothesis, face_id
| 5,349,765 |
def delete_demo(guid):
"""
Delete a demo object and all its children.
:param guid: The demo's guid
:return:
"""
web_utils.check_null_input((guid, 'demo to delete'))
demo_service.delete_demo_by_guid(guid)
return '', 204
| 5,349,766 |
def return_json():
"""
Sample function that has been given a different name
"""
print("Tooler should render out the JSON value returned")
return {"one": 1, "deep": {"structure": ["example"]}}
| 5,349,767 |
def get(url) -> str:
"""Send an http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: str
:returns:
UTF-8 encoded string of response
"""
return _execute_request(url).read().decode("utf-8")
| 5,349,768 |
def strip_filenames(path, ext='', allowed_chars=None):
"""
Strips the filenames from whitespaces and other 'problematic' chars.
In other words converts the filenames to alpharethmetic chars.
:param path: (String) Base path for the filenames.
:param ext: (String, optional) If provided, the glob will rename only these files.
:param allowed_chars: (String, optional) If provided, it includes the
chars to be allowed in the re function compile().
:return:
"""
if allowed_chars is None:
allowed_chars = '[^a-zA-Z0-9.]+'
pattern = re.compile(allowed_chars)
for cl in sorted(glob(path + '*' + ext)):
# get only the filename
cl1 = cl[cl.rfind(sep) :]
# strip all white spaces, quatation points, etc.
name = pattern.sub('', cl1)
move(path + cl1, path + name)
| 5,349,769 |
def brute_force(durs, labels, labelset, train_dur, val_dur, test_dur, max_iter=5000):
"""finds indices that split (labels, durations) tuples into training,
test, and validation sets of specified durations, with the set of unique labels
in each dataset equal to the specified labelset.
The durations of the datasets created using the returned indices will be
*greater than* or equal to the durations specified.
Must specify a positive value for one of {train_dur, test_dur}.
The other value can be specified as '-1' which is interpreted as
"use the remainder of the dataset for this split,
after finding indices for the set with a specified duration".
Parameters
----------
durs : list
of durations of vocalizations
labels : list
of labels from vocalizations
labelset : set
of labels
train_dur : int, float
Target duration for training set, in seconds.
val_dur : int, float
Target duration for validation set, in seconds.
test_dur : int, float
Target duration for test set, in seconds.
max_iter : int
maximum number of iterations to attempt to find indices. Default is 5000.
Returns
-------
train_inds, val_inds, test_inds : list
of int, the indices that will split datasets
Notes
-----
A 'brute force' algorithm that just randomly assigns indices to a set,
and iterates until it finds some partition where each set has instances of all classes of label.
Starts by ensuring that each label is represented in each set and then adds files to reach the required
durations.
"""
logger = logging.getLogger(__name__)
logger.setLevel("INFO")
sum_durs = sum(durs)
train_dur, val_dur, test_dur = validate_split_durations(
train_dur, val_dur, test_dur, sum_durs
)
target_split_durs = dict(
zip(("train", "val", "test"), (train_dur, val_dur, test_dur))
)
if not len(durs) == len(labels):
raise ValueError(
"length of list of durations did not equal length of list of labels; "
"should be same length since "
"each duration of a vocalization corresponds to the labels from its annotations.\n"
f"Length of durations: {len(durs)}. Length of labels: {len(labels)}"
)
iter = 1
all_labels_err = (
"Did not successfully divide data into training, "
"validation, and test sets of sufficient duration "
f"after {max_iter} iterations. "
"Try increasing the total size of the data set."
)
# ---- outer loop that repeats until we successfully split our reach max number of iters ---------------------------
while 1:
# list of indices we use to index into both `durs` and `labels`
durs_labels_inds = list(
range(len(labels))
) # we checked len(labels) == len(durs) above
# when making `split_inds`, "initialize" the dict with all split names, by using target_split_durs
# so we don't get an error when indexing into dict in return statement below
split_inds = {split_name: [] for split_name in target_split_durs.keys()}
total_split_durs = {split_name: 0 for split_name in target_split_durs.keys()}
split_labelsets = {split_name: set() for split_name in target_split_durs.keys()}
# list of split 'choices' we use when randomly adding indices to splits
choice = []
for split_name in target_split_durs.keys():
if target_split_durs[split_name] > 0 or target_split_durs[split_name] == -1:
choice.append(split_name)
# ---- make sure each split has at least one instance of each label --------------------------------------------
for label_from_labelset in sorted(labelset):
label_inds = [
ind for ind in durs_labels_inds if label_from_labelset in labels[ind]
]
random.shuffle(label_inds)
for split_name in target_split_durs.keys():
if (
target_split_durs[split_name] > 0
or target_split_durs[split_name] == -1
) and label_from_labelset not in split_labelsets[split_name]:
try:
ind = label_inds.pop()
split_inds[split_name].append(ind)
total_split_durs[split_name] += durs[ind]
split_labelsets[split_name] = split_labelsets[split_name].union(
set(labels[ind])
)
durs_labels_inds.remove(ind)
except IndexError:
if len(label_inds) == 0:
logger.debug(
"Ran out of elements while dividing dataset into subsets of specified durations."
f"Iteration {iter}"
)
iter += 1
break # do next iteration
else:
# something else happened, re-raise error
raise
for split_name in target_split_durs.keys():
if (
target_split_durs[split_name] > 0
and total_split_durs[split_name] >= target_split_durs[split_name]
):
choice.remove(split_name)
if len(choice) == 0:
finished = True
else:
finished = False
# ---- inner loop that actually does split ---------------------------------------------------------------------
random.shuffle(durs_labels_inds)
while finished is False:
# pop durations off list and append to randomly-chosen
# list, either train, val, or test set.
# Do this until the total duration for each data set is equal
# to or greater than the target duration for each set.
try:
ind = durs_labels_inds.pop()
except IndexError:
if len(durs_labels_inds) == 0:
logger.debug(
"Ran out of elements while dividing dataset into subsets of specified durations."
f"Iteration {iter}"
)
iter += 1
break # do next iteration
else:
# something else happened, re-raise error
raise
which_set = random.randint(0, len(choice) - 1)
split_name = choice[which_set]
split_inds[split_name].append(ind)
total_split_durs[split_name] += durs[ind]
if (
target_split_durs[split_name] > 0
and total_split_durs[split_name] >= target_split_durs[split_name]
):
choice.remove(split_name)
elif target_split_durs[split_name] == -1:
# if this split is -1 and other split is already "finished"
if (split_name == "test" and "train" not in choice) or (
split_name == "train" and "test" not in choice
):
# just add all remaining inds to this split
split_inds[split_name].extend(durs_labels_inds)
choice.remove(split_name)
if len(choice) < 1: # list is empty, we popped off all the choices
for split_name in target_split_durs.keys():
if target_split_durs[split_name] > 0:
if total_split_durs[split_name] < target_split_durs[split_name]:
raise ValueError(
"Loop to find splits completed, "
f"but total duration of '{split_name}' split, "
f"{total_split_durs[split_name]} seconds, "
f"is less than target duration specified: {target_split_durs[split_name]} seconds."
)
else:
finished = True
break
if iter > max_iter:
raise ValueError(
"Could not find subsets of sufficient duration in "
f"less than {max_iter} iterations."
)
# make sure that each split contains all unique labels in labelset
if finished is True:
for split_name in target_split_durs.keys():
if (
target_split_durs[split_name] > 0
or target_split_durs[split_name] == -1
):
split_labels = [
label for ind in split_inds[split_name] for label in labels[ind]
]
split_labelset = set(split_labels)
if split_labelset != set(labelset):
iter += 1
if iter > max_iter:
raise ValueError(all_labels_err)
else:
logger.debug(
f"Set of unique labels in '{split_name}' split did not equal specified labelset. "
f"Getting new '{split_name}' split. Iteration: {iter}"
)
continue
# successfully split
break
elif finished is False:
continue
split_inds = {
split_name: (inds if inds else None) for split_name, inds in split_inds.items()
}
return split_inds["train"], split_inds["val"], split_inds["test"]
| 5,349,770 |
def test_operator__remove_all__2(evaluate):
"""remove-all() does nothing on `None`."""
assert None is evaluate(None, 'remove-all', 'a')
| 5,349,771 |
def VOLUME(env: Optional[Dict] = None) -> Dict:
"""Get specification for the volume that is associated with the worker that
is used to execute the main algorithm step.
Parameters
----------
env: dict, default=None
Optional environment variables that override the system-wide
settings, default=None
Returns
-------
dict
"""
return read_config_obj(var=METANOME_VOLUME, env=env if env is not None else os.environ)
| 5,349,772 |
def test_reaction_oneliner():
"""
7
2
xx
2
3
"""
m1 = MyObject4(bar=2)
m2 = MyObject4(bar=lambda: m1.bar)
loop.iter()
print(m2.bar)
loop.iter()
print(m2.bar)
print('xx')
m1.set_bar(3)
loop.iter()
print(m2.bar)
loop.iter()
print(m2.bar)
| 5,349,773 |
def compress(body, compress_level):
"""Compress 'body' at the given compress_level."""
import zlib
yield '\037\213' # magic header
yield '\010' # compression method
yield '\0'
yield struct.pack("<L", long(time.time()))
yield '\002'
yield '\377'
crc = zlib.crc32("")
size = 0
zobj = zlib.compressobj(compress_level,
zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
for line in body:
size += len(line)
crc = zlib.crc32(line, crc)
yield zobj.compress(line)
yield zobj.flush()
yield struct.pack("<l", crc)
yield struct.pack("<L", size & 0xFFFFFFFFL)
| 5,349,774 |
def train_step(model_optimizer, game_board_log, predicted_action_log,
action_result_log):
"""Run one training step."""
def loss_fn(model_params):
logits = PolicyGradient().apply({'params': model_params}, game_board_log)
loss = compute_loss(logits, predicted_action_log, action_result_log)
return loss
grad_fn = jax.grad(loss_fn)
grads = grad_fn(model_optimizer.target)
model_optimizer = model_optimizer.apply_gradient(grads)
return model_optimizer
| 5,349,775 |
def make_static_rnn_with_control_flow_v2_tests(options):
"""Make a set of tests to do basic Lstm cell."""
test_parameters = [
{
"dtype": [tf.float32],
"num_batches": [4],
"time_step_size": [4],
"input_vec_size": [3],
"num_cells": [4],
"use_sequence_length": [True, False],
},
]
def build_graph(parameters):
"""Build a simple graph with BasicLSTMCell."""
num_batches = parameters["num_batches"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
num_cells = parameters["num_cells"]
inputs_after_split = []
for i in range(time_step_size):
one_timestamp_input = tf.placeholder(
dtype=parameters["dtype"],
name="split_{}".format(i),
shape=[num_batches, input_vec_size])
inputs_after_split.append(one_timestamp_input)
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
num_cells, activation=tf.nn.relu, state_is_tuple=True)
sequence_length = None
if parameters["use_sequence_length"]:
# Using different sequence length in each bach, like [1, 2, 3, 3...].
sequence_length = [
min(i + 1, time_step_size) for i in range(num_batches)
]
cell_outputs, _ = rnn.static_rnn(
lstm_cell,
inputs_after_split,
dtype=tf.float32,
sequence_length=sequence_length)
out = cell_outputs[-1]
return inputs_after_split, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
with tf.variable_scope("", reuse=True):
kernel = tf.get_variable("rnn/basic_lstm_cell/kernel")
bias = tf.get_variable("rnn/basic_lstm_cell/bias")
kernel_values = create_tensor_data(parameters["dtype"],
[kernel.shape[0], kernel.shape[1]], -1,
1)
bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0,
1)
sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values)))
num_batches = parameters["num_batches"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
input_values = []
for _ in range(time_step_size):
tensor_data = create_tensor_data(parameters["dtype"],
[num_batches, input_vec_size], 0, 1)
input_values.append(tensor_data)
out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values)))
return input_values, out
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
| 5,349,776 |
def jaccard(structured_phrases, phrases_to_score, partial=False, status_callback=None, status_increment=None, pmd_class=PartialMatchDict):
""" calculate jaccard similarity between phrases_to_score, using
structured_phrases to determine cooccurrences. For phrases `a' and `b', let
A be the set of documents `a' appeared in, and B be the set of documents
`b' appeared in. Then the Jaccard similarity of `a' and `b' is Similarity
value of two phrases is |A intersect B| / |A union B|.
Setting partial to true allows partial phrase matching: two phrases are the
same if they have any common subsequence of words. Very slow.
"""
# indicies will index into our union and intersection arrays
phrases = {}
if partial:
indices = pmd_class()
else:
indices = {}
for i, phrase in enumerate(phrases_to_score):
indices[phrase] = i
phrases[i] = phrase
N = len(phrases_to_score)
phrase_count = np.zeros(N)
if partial:
intersection = np.zeros((N, N), dtype=np.uint32)
else:
intersection = dok_matrix((N, N), dtype=np.uint32)
count = 0
if status_callback and not status_increment:
length = len(structured_phrases)
status_increment = length / 100
# take each document
for doc_phrases in structured_phrases:
if status_callback and status_increment > 0 and count % status_increment == 0:
try:
status_callback(status_format(float(count) / length))
except:
status_callback("%d processed" % count)
count += 1
# take all phrases within this document
for i in range(len(doc_phrases)):
np1 = tuple(doc_phrases[i])
if np1 in indices:
# this phrase is important enough to count
if partial:
matches1 = indices[np1]
else:
matches1 = set()
matches1.add(indices[np1])
for index1 in matches1:
phrase_count[index1] += 1
for k in range(i + 1, len(doc_phrases)):
np2 = tuple(doc_phrases[k])
if np2 in indices:
# this np is important enough to count
if partial:
matches2 = indices[np2]
else:
matches2 = set()
matches2.add(indices[np2])
for index1 in matches1:
for index2 in matches2:
if index2 != index1:
intersection[index1,index2] += 1
intersection[index2,index1] += 1
# use inclusion exclusion
if partial:
tiled_phrase_count = np.lib.stride_tricks.as_strided(phrase_count,
(N, phrase_count.size),
(0, phrase_count.itemsize))
union = tiled_phrase_count + tiled_phrase_count.T - intersection
jaccard = intersection / union
else:
jaccard = dok_matrix((N, N))
for coords, intersection_count in intersection.iteritems():
jaccard[coords] = intersection_count / (phrase_count[coords[0]] + phrase_count[coords[1]] - intersection_count)
jaccard = np.asarray(jaccard.todense())
return jaccard, phrases
| 5,349,777 |
def print_python(node: AST) -> str:
"""Takes an AST and produces a string containing a human-readable
Python expression that builds the AST node."""
return black.format_str(ast.dump(node), mode=black.FileMode())
| 5,349,778 |
def reg2deg(reg):
"""
Converts phase register values into degrees.
:param cycles: Re-formatted number of degrees
:type cycles: int
:return: Number of degrees
:rtype: float
"""
return reg*360/2**32
| 5,349,779 |
def weakify_voxelwise_label_one_sub(pos_path_path, masks_path):
""" This function converts the voxelwise mask of a positive patch into a weak mask: it creates a sphere around the aneurysm center
Args:
pos_path_path (str): path to the positive patch to be converted
masks_path (str): path to the folder containing all the positive masks
Returns:
None
Raises:
ValueError: if the voxelwise mask is either non-binary or empty
ValueError: if the newly created weak mask is either non-binary or empty
"""
sub_ses_lesion = re.findall(r"sub-\d+_ses-\d+_Lesion_\d+", pos_path_path)[0]
patch_pair = re.findall(r"patch_pair_\d+", pos_path_path)[0]
last_part_of_path = os.path.basename(os.path.normpath(pos_path_path))
filename_mask = last_part_of_path.replace("pos_patch_angio", "mask_patch")
voxelwise_mask_obj = nib.load(os.path.join(masks_path, sub_ses_lesion, patch_pair, filename_mask))
voxelwise_mask_patch = np.asanyarray(voxelwise_mask_obj.dataobj) # type: np.ndarray
if not np.array_equal(voxelwise_mask_patch, voxelwise_mask_patch.astype(bool)) and np.sum(voxelwise_mask_patch) != 0:
raise ValueError("Voxelwise mask of positive patches must be binary and non-empty")
lesion = extract_lesion_info(voxelwise_mask_patch)
# N.B. I INVERT X and Y BECAUSE of OpenCV (see https://stackoverflow.com/a/56849032/9492673)
x_center = lesion["centroid_y_coord"] # extract y coordinate of lesion centroid
y_center = lesion["centroid_x_coord"] # extract x coordinate of lesion centroid
z_central = lesion["idx_slice_with_more_white_pixels"] # extract idx of slice with more non-zero pixels
aneur_center, aneur_radius = np.asarray([x_center, y_center, z_central]), lesion["equivalent_diameter"] / 2
weak_mask_with_sphere_patch = create_bin_sphere(voxelwise_mask_patch.shape, aneur_center, aneur_radius) # type: np.ndarray
weak_mask_with_sphere_patch = weak_mask_with_sphere_patch.astype(np.uint8)
if not np.array_equal(voxelwise_mask_patch, voxelwise_mask_patch.astype(bool)) and np.sum(voxelwise_mask_patch) != 0:
raise ValueError("Voxelwise mask of positive patches must be binary and non-empty")
weak_mask_obj = nib.Nifti1Image(weak_mask_with_sphere_patch, affine=voxelwise_mask_obj.affine)
# overwrite mask patch
nib.save(weak_mask_obj, os.path.join(masks_path, sub_ses_lesion, patch_pair, filename_mask))
| 5,349,780 |
def load_config(config_file=None):
"""Load the configuration file.
Configuration options will be available in dict sjkscan.conf.config.
When configuration options are added, modified or removed in future
releases, `config_template` in this function must be updated.
:param config_file: file to read. Defaults to sjkscan.conf in package bundle.
"""
#: Dictionary of lists containing configuration file sections and entries.
#: Each list contains tuples of configuration entry names and default values.
#: If it's not in this dictionary, it doesn't make it to config.config.
config_template = {
'Paths': [
('data', '/Users/sjk/Code/sjkscan/data'),
('dir_format', '%Y-%m-%d_%H-%M-%S'),
('inbox', '%(data)s/INBOX'),
('archive', '%(data)s/ARCHIVE')
],
'OCR': [
('language', 'swe')
],
'Rotation': [
('rotatate', 180)
],
'Scanimage': [
('resolution', 300),
('brightness', 80),
('contrast', 100)
],
'Logging': [
('level', 'INFO')
]
}
# conf = configparser.ConfigParser(interpolation=configparser.BasicInterpolation())
conf = configparser.ConfigParser()
if config_file:
conf.read(config_file)
else:
conf.read_string(resource_string(__name__, 'sjkscan.conf').decode('utf-8'))
for section in config_template:
config[section] = dict()
for entry, default in config_template[section]:
config[section][entry] = conf.get(section, entry, fallback=default)
| 5,349,781 |
def plotit(depth, a, n, res1, res2, res3, title):
"""Call `comp_appres` and plot result."""
# Compute the three different models
rho1, AB2 = comp_appres(depth, res1, a, n)
rho2, _ = comp_appres(depth, res2, a, n)
rho3, _ = comp_appres(depth, res3, a, n)
# Create figure
plt.figure()
# Plot curves
plt.loglog(AB2, rho1, label='Case 1')
plt.plot(AB2, rho2, label='Case 2')
plt.plot(AB2, rho3, label='Case 3')
# Legend, labels
plt.legend(loc='best')
plt.title(title)
plt.xlabel('AB/2 (m)')
plt.ylabel(r'Apparent resistivity $\rho_a (\Omega\,$m)')
plt.show()
| 5,349,782 |
def plot_accuracy(scores: dict, filename: str) -> None:
"""
Plots the distribution of validation accuracy of the neural network
for all hyperparameter combination experiments.
"""
bins = 250
(hist, _) = np.histogram(scores, bins=bins, range=(0, 1))
x = np.linspace(0, 1, bins)
fig = go.Figure()
fig.add_trace(go.Bar(
x = x,
y = hist,
marker_color=train_color,
))
fig.update_layout(
title_text="Distribution of validation accuracy over experiments",
xaxis_title_text="Validation accuracy",
yaxis_title_text="Number of experiments",
)
pio.write_html(fig, filename)
| 5,349,783 |
def update_channel_metadata_cache():
"""
After a channel is imported, or when the devserver is started,
scan through the settings.CONTENT_DATABASE_DIR folder for all channel content databases,
and pull the data from each database's ChannelMetadata object to update the ChannelMetadataCache
object in the default database to ensure they are in sync.
"""
db_names = get_channel_ids_for_content_database_dir(settings.CONTENT_DATABASE_DIR)
# Delete ChannelMetadataCache objects in default db that are not found in CONTENT_DATABASE_DIR
ChannelMetadataCache.objects.exclude(id__in=db_names).delete()
# sync the ChannelMetadataCache objects in default db with ChannelMetadata objects in CONTENT_DATABASE_DIR
for db_name in db_names:
with using_content_database(db_name):
update_values = ChannelMetadata.objects.values()[0]
ch_metadata_obj, _ = ChannelMetadataCache.objects.update_or_create(
id=db_name,
defaults=update_values,
)
# Records a new last_updated only if channel is brand new. Does not
# handle case where channel's version is upgraded, which is not
# yet supported on UI anyway
if ch_metadata_obj.last_updated is None:
ch_metadata_obj.last_updated = local_now()
ch_metadata_obj.save()
| 5,349,784 |
def flag_element(uid: int, reason: Union[key_duplicate, key_optimization, ReviewDeleteReasons], db_user: User,
is_argument: bool, ui_locales: str, extra_uid=None) -> dict:
"""
Flags an given argument based on the reason which was sent by the author. This argument will be enqueued
for a review process.
:param uid: Uid of the argument/statement, which should be flagged
:param reason: String which describes the reason
:param db_user: User
:param is_argument: Boolean
:param ui_locales: ui_locales
:param extra_uid: Uid of the argument/statement, which should be flagged
:return: success, info, error
"""
tn = Translator(ui_locales)
argument_uid = uid if is_argument else None
statement_uid = uid if not is_argument else None
# was this already flagged?
flag_status = QueueAdapter(db_user=db_user).element_in_queue(argument_uid=argument_uid,
statement_uid=statement_uid,
premisegroup_uid=None)
if flag_status:
LOG.debug("Already flagged by %s", flag_status)
if flag_status == FlaggedBy.user:
info = tn.get(_.alreadyFlaggedByYou)
else:
info = tn.get(_.alreadyFlaggedByOthers)
return {'success': '', 'info': info}
return __add_flag(reason, argument_uid, statement_uid, extra_uid, db_user, tn)
| 5,349,785 |
def export_bioimageio_model(checkpoint, export_folder, input_data=None,
dependencies=None, name=None,
description=None, authors=None,
tags=None, license=None,
documentation=None, covers=None,
git_repo=None, cite=None,
input_optional_parameters=True,
model_postprocessing=None,
for_deepimagej=False, links=[],
maintainers=None, checkpoint_name="best",
config={}):
"""
"""
assert input_data is not None
# load trainer and model
trainer = get_trainer(checkpoint, name=checkpoint_name, device="cpu")
model, model_kwargs = _get_model(trainer, model_postprocessing)
# create the weights
os.makedirs(export_folder, exist_ok=True)
weight_path = _write_weights(model, export_folder)
# create the test input/output file and derive the tensor kwargs from the model and its kwargs
test_in_paths, test_out_paths = _write_data(input_data, model, trainer, export_folder)
tensor_kwargs = _get_tensor_kwargs(model, model_kwargs, test_in_paths, test_out_paths)
# create the model source file
source = _write_source(model, export_folder)
# create dependency file
_write_depedencies(export_folder, dependencies)
# get the additional kwargs
kwargs = _get_kwargs(trainer, name, description,
authors, tags,
license, documentation,
git_repo, cite,
maintainers,
export_folder, input_optional_parameters)
kwargs.update(tensor_kwargs)
preprocessing = _get_preprocessing(trainer)
# the apps to link with this model, by default ilastik
links.append("ilastik/ilastik")
kwargs.update({"links": links, "config": config})
zip_path = os.path.join(export_folder, f"{name}.zip")
# change the working directory to the export_folder to avoid issues with relative paths
cwd = os.getcwd()
os.chdir(export_folder)
try:
build_spec.build_model(
weight_uri=weight_path,
weight_type="pytorch_state_dict",
test_inputs=[f"./{os.path.split(test_in)[1]}" for test_in in test_in_paths],
test_outputs=[f"./{os.path.split(test_out)[1]}" for test_out in test_out_paths],
root=".",
output_path=f"{name}.zip",
dependencies="environment.yaml",
preprocessing=preprocessing,
architecture=source,
model_kwargs=model_kwargs,
add_deepimagej_config=for_deepimagej,
**kwargs
)
except Exception as e:
raise e
finally:
os.chdir(cwd)
# load and validate the model
rdf_path = os.path.join(export_folder, "rdf.yaml")
_extract_from_zip(zip_path, rdf_path, "rdf.yaml")
val_success = _validate_model(rdf_path)
if val_success:
print(f"The model was successfully exported to '{export_folder}'.")
else:
warn(f"Validation of the bioimageio model exported to '{export_folder}' has failed. " +
"You can use this model, but it will probably yield incorrect results.")
return val_success
| 5,349,786 |
def main(args=None):
"""Entry point for CLI."""
if len(sys.argv) > 1:
if sys.argv[1] in {"-h", "--h", "help", "-help", "--help", "-H"}:
help_text()
sys.exit()
BatchMandelbrot(**dict(arg.split("=") for arg in sys.argv[1:]))
| 5,349,787 |
def load_config_at_path(path: Pathy) -> Dynaconf:
"""Load config at exact path
Args:
path: path to config file
Returns:
dict: config dict
"""
path = pathlib.Path(path)
if path.exists() and path.is_file():
options = DYNACONF_OPTIONS.copy()
options.update({
'root_path': str(path.parent),
'settings_file': str(path.name),
})
return Dynaconf(**options)
else:
raise ConfigurationError(
f'Couldn\'t find ballet.yml config file at {path!s}')
| 5,349,788 |
def _build_target(action, original_target, plugin, context):
"""Augment dictionary of target attributes for policy engine.
This routine adds to the dictionary attributes belonging to the
"parent" resource of the targeted one.
"""
target = original_target.copy()
resource, _w = _get_resource_and_action(action)
hierarchy_info = attributes.RESOURCE_HIERARCHY_MAP.get(resource, None)
if hierarchy_info and plugin:
# use the 'singular' version of the resource name
parent_resource = hierarchy_info['parent'][:-1]
parent_id = hierarchy_info['identified_by']
f = getattr(plugin, 'get_%s' % parent_resource)
# f *must* exist, if not found it is better to let quantum explode
# Note: we do not use admin context
data = f(context, target[parent_id], fields=['tenant_id'])
target['%s_tenant_id' % parent_resource] = data['tenant_id']
return target
| 5,349,789 |
def test_constructor_raises_value_error_for_invalid_url():
"""Test that ocnstructor raises TypeError for invalid url."""
with pytest.raises(ValueError):
ms.MangaSource('test', 'bad test', '-')
| 5,349,790 |
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry):
"""Configure Gammu state machine."""
device = entry.data[CONF_DEVICE]
config = {"Device": device, "Connection": "at"}
gateway = await create_sms_gateway(config, opp)
if not gateway:
return False
opp.data[DOMAIN][SMS_GATEWAY] = gateway
opp.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
| 5,349,791 |
def grep_response_body(regex_name, regex, owtf_transaction):
"""Grep response body
:param regex_name: Regex name
:type regex_name: `str`
:param regex: Regex
:type regex:
:param owtf_transaction: OWTF transaction
:type owtf_transaction:
:return: Output
:rtype: `dict`
"""
return grep(regex_name, regex, owtf_transaction.get_raw_response_body)
| 5,349,792 |
def link(f, search_range, pos_columns=None, t_column='frame', verbose=True, **kwargs):
"""
link(f, search_range, pos_columns=None, t_column='frame', memory=0,
predictor=None, adaptive_stop=None, adaptive_step=0.95,
neighbor_strategy=None, link_strategy=None, dist_func=None,
to_eucl=None)
Link a DataFrame of coordinates into trajectories.
Parameters
----------
f : DataFrame
The DataFrame must include any number of column(s) for position and a
column of frame numbers. By default, 'x' and 'y' are expected for
position, and 'frame' is expected for frame number. See below for
options to use custom column names.
search_range : float or tuple
the maximum distance features can move between frames,
optionally per dimension
pos_columns : list of str, optional
Default is ['y', 'x'], or ['z', 'y', 'x'] when 'z' is present in f
t_column : str, optional
Default is 'frame'
memory : integer, optional
the maximum number of frames during which a feature can vanish,
then reappear nearby, and be considered the same particle. 0 by default.
predictor : function, optional
Improve performance by guessing where a particle will be in
the next frame.
For examples of how this works, see the "predict" module.
adaptive_stop : float, optional
If not None, when encountering an oversize subnet, retry by progressively
reducing search_range until the subnet is solvable. If search_range
becomes <= adaptive_stop, give up and raise a SubnetOversizeException.
adaptive_step : float, optional
Reduce search_range by multiplying it by this factor.
neighbor_strategy : {'KDTree', 'BTree'}
algorithm used to identify nearby features. Default 'KDTree'.
link_strategy : {'recursive', 'nonrecursive', 'numba', 'hybrid', 'drop', 'auto'}
algorithm used to resolve subnetworks of nearby particles
'auto' uses hybrid (numba+recursive) if available
'drop' causes particles in subnetworks to go unlinked
dist_func : function, optional
a custom distance function that takes two 1D arrays of coordinates and
returns a float. Must be used with the 'BTree' neighbor_strategy.
to_eucl : function, optional
function that transforms a N x ndim array of positions into coordinates
in Euclidean space. Useful for instance to link by Euclidean distance
starting from radial coordinates. If search_range is anisotropic, this
parameter cannot be used.
Returns
-------
DataFrame with added column 'particle' containing trajectory labels.
The t_column (by default: 'frame') will be coerced to integer.
See also
--------
link_iter
Notes
-----
This is an implementation of the Crocker-Grier linking algorithm.
[1]_
References
----------
.. [1] Crocker, J.C., Grier, D.G. http://dx.doi.org/10.1006/jcis.1996.0217
"""
if pos_columns is None:
pos_columns = guess_pos_columns(f)
# copy the dataframe
f = f.copy()
# coerce t_column to integer type
if not np.issubdtype(f[t_column].dtype, np.integer):
f[t_column] = f[t_column].astype(np.integer)
# sort on the t_column
pandas_sort(f, t_column, inplace=True)
coords_iter = coords_from_df(f, pos_columns, t_column)
ids = []
for i, _ids in link_iter(coords_iter, search_range, verbose=verbose, **kwargs):
ids.extend(_ids)
f['particle'] = ids
return f
| 5,349,793 |
def get_classifier(opt, input_dim):
"""
Return a tuple with the ML classifier to be used and its hyperparameter
options (in dict format)."""
if opt == 'RF':
ml_algo = RandomForestClassifier
hyperparams = {
'n_estimators': [100],
'max_depth': [None, 10, 30, 50, 100],
'min_samples_split': [2, 10, 50, 100],
'random_state': [42],
'n_jobs': [-1],
}
elif opt == 'GBDT':
ml_algo = LGBMClassifier
hyperparams = {
'boosting_type': ['gbdt'],
'n_estimators': [100],
'max_depth': [-1, 10, 30, 50, 100],
'num_leaves': [2, 3, 5, 10, 50],
'learning_rate': [0.001, 0.01, 0.1],
'class_weight': [None, 'balanced'],
'random_state': [42],
'n_jobs': [-1],
}
elif opt == 'LR':
ml_algo = LogisticRegression
hyperparams = {
'solver': ['newton-cg', 'lbfgs', 'saga'],
'C': [0.0001, 0.001, 0.01],
'class_weight': [None, 'balanced'],
'random_state': [42],
'n_jobs': [-1],
}
elif opt == 'GNB':
ml_algo = GaussianNB
hyperparams = {
'var_smoothing': [10**-i for i in range(2, 15)],
}
elif opt == 'SVM':
ml_algo = SVC
hyperparams = {
'probability': [True],
'C': [0.01, 0.1, 1, 10],
'gamma': [0.001, 0.01, 0.1, 1],
}
elif opt == 'NN':
ml_algo = KerasClassifier(get_nn_model(input_dim), epochs=30, verbose=0)
hyperparams = {}
else:
raise ValueError(f'{opt} is an invalid classifier name.')
return ml_algo, hyperparams
| 5,349,794 |
def list_spiders_endpoint():
"""It returns a list of spiders available in the SPIDER_SETTINGS dict
.. version 0.4.0:
endpoint returns the spidername and endpoint to run the spider from
"""
spiders = {}
for item in app.config['SPIDER_SETTINGS']:
spiders[item['endpoint']] = 'URL: ' + request.url_root + 'run-spider/' + item['endpoint']
return jsonify(endpoints=spiders)
| 5,349,795 |
def plot3dOnFigure(ax, pixels, colors_rgb,axis_labels=list("RGB"), axis_limits=((0, 255), (0, 255), (0, 255))):
"""Plot pixels in 3D."""
# Set axis limits
ax.set_xlim(*axis_limits[0])
ax.set_ylim(*axis_limits[1])
ax.set_zlim(*axis_limits[2])
# Set axis labels and sizes
ax.tick_params(axis='both', which='major', labelsize=14, pad=8)
ax.set_xlabel(axis_labels[0], fontsize=16, labelpad=16)
ax.set_ylabel(axis_labels[1], fontsize=16, labelpad=16)
ax.set_zlabel(axis_labels[2], fontsize=16, labelpad=16)
# Plot pixel values with colors given in colors_rgb
ax.scatter(
pixels[:, :, 0].ravel(),
pixels[:, :, 1].ravel(),
pixels[:, :, 2].ravel(),
c=colors_rgb.reshape((-1, 3)), edgecolors='none')
return ax
| 5,349,796 |
def _get_slice_predictions(
model: ModelBridge,
param_name: str,
metric_name: str,
generator_runs_dict: TNullableGeneratorRunsDict = None,
relative: bool = False,
density: int = 50,
slice_values: Optional[Dict[str, Any]] = None,
fixed_features: Optional[ObservationFeatures] = None,
trial_index: Optional[int] = None,
) -> SlicePredictions:
"""Computes slice prediction configuration values for a single metric name.
Args:
model: ModelBridge that contains model for predictions
param_name: Name of parameter that will be sliced
metric_name: Name of metric to plot
generator_runs_dict: A dictionary {name: generator run} of generator runs
whose arms will be plotted, if they lie in the slice.
relative: Predictions relative to status quo
density: Number of points along slice to evaluate predictions.
slice_values: A dictionary {name: val} for the fixed values of the
other parameters. If not provided, then the status quo values will
be used if there is a status quo, otherwise the mean of numeric
parameters or the mode of choice parameters. Ignored if
fixed_features is specified.
fixed_features: An ObservationFeatures object containing the values of
features (including non-parameter features like context) to be set
in the slice.
Returns: Configruation values for AxPlotConfig.
"""
if generator_runs_dict is None:
generator_runs_dict = {}
parameter = get_range_parameter(model, param_name)
grid = get_grid_for_parameter(parameter, density)
plot_data, raw_data, cond_name_to_parameters = get_plot_data(
model=model,
generator_runs_dict=generator_runs_dict,
metric_names={metric_name},
fixed_features=fixed_features,
)
if fixed_features is not None:
slice_values = fixed_features.parameters
else:
fixed_features = ObservationFeatures(parameters={})
fixed_values = get_fixed_values(model, slice_values, trial_index)
prediction_features = []
for x in grid:
predf = deepcopy(fixed_features)
predf.parameters = fixed_values.copy()
predf.parameters[param_name] = x
prediction_features.append(predf)
f, cov = model.predict(prediction_features)
f_plt = f[metric_name]
sd_plt = np.sqrt(cov[metric_name][metric_name])
# pyre-fixme[7]: Expected `Tuple[PlotData, List[Dict[str, Union[float, str]]],
# List[float], np.ndarray, np.ndarray, str, str, bool, Dict[str, Union[None, bool,
# float, int, str]], np.ndarray, bool]` but got `Tuple[PlotData, Dict[str,
# Dict[str, Union[None, bool, float, int, str]]], List[float], List[Dict[str,
# Union[float, str]]], np.ndarray, str, str, bool, Dict[str, Union[None, bool,
# float, int, str]], typing.Any, bool]`.
return (
plot_data,
cond_name_to_parameters,
f_plt,
raw_data,
grid,
metric_name,
param_name,
relative,
fixed_values,
sd_plt,
parameter.log_scale,
)
| 5,349,797 |
def ellipse(a, b, center=(0.0, 0.0), num=50):
"""Return the coordinates of an ellipse.
Parameters
----------
a : float
The semi-major axis of the ellipse.
b : float
The semi-minor axis of the ellipse.
center : 2-tuple of floats, optional
The position of the center of the ellipse;
default: (0.0, 0.0)
num : integer, optional
The number of points on the upper side of the ellipse.
The number includes the leading and trailing edges.
Thus, the total number of points will be 2 * (num - 1);
default: 50.
Returns
-------
x : numpy.ndarray
The x-coordinates of the ellipse as a 1D array of floats.
y: numpy.ndarray
The y-coordinates of the ellipse as a 1D array of floats.
"""
xc, yc = center
x_upper = numpy.linspace(xc + a, xc - a, num=num)
y_upper = b / a * numpy.sqrt(a**2 - x_upper**2)
x_lower = numpy.linspace(xc - a, xc + a, num=num)[1:-1]
y_lower = -b / a * numpy.sqrt(a**2 - x_lower**2)
x = numpy.concatenate((x_upper, x_lower))
y = numpy.concatenate((y_upper, y_lower))
return x, y
| 5,349,798 |
def _copy_inputs(test_inputs: List[str], project_path: str) -> bool:
"""Copies all the test files into the test project directory."""
# The files are assumed to reside in the repo's 'data' directory.
print(f'# Copying inputs (from "${{PWD}}/{_DATA_DIRECTORY}")...')
expected_prefix: str = f"{_DATA_DIRECTORY}/"
for test_input in test_inputs:
print(f"# + {test_input}")
if not test_input.startswith(expected_prefix):
print("! FAILURE")
print(f'! Input file {test_input} must start with "{expected_prefix}"')
return False
if not os.path.isfile(test_input):
print("! FAILURE")
print(f"! Missing input file {test_input} ({test_input})")
return False
# Looks OK, copy it
shutil.copy(test_input, project_path)
print("# Copied")
return True
| 5,349,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.