content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def formula(formula: str, formula_param: str, cols: List[str]) -> Aggregation:
""" Create a user defined formula aggregation.
Args:
formula (str): the user defined formula to apply to each group
formula_param (str): the parameter name within the formula
cols (List[str]): the columns to aggregate on, can be renaming expressions, i.e. "new_col = col"
Returns:
an aggregation
"""
return Aggregation(j_aggregation=_JAggregation.AggFormula(formula, formula_param, *cols))
| 5,347,900 |
def num_false_positives(df):
"""Total number of false positives (false-alarms)."""
return df.noraw.Type.isin(['FP']).sum()
| 5,347,901 |
def repmot(instr, marker, value, repcase, lenout=None):
"""
Replace a marker with the text representation of an ordinal number.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/repmot_c.html
:param instr: Input string.
:type instr: str
:param marker: Marker to be replaced.
:type marker: str
:param value: Replacement value.
:type value: int
:param repcase: Case of replacement text.
:type repcase: str
:param lenout: Optional available space in output string.
:type lenout: int
:return: Output string.
:rtype: str
"""
if lenout is None:
lenout = ctypes.c_int(len(instr) + len(marker) + 15)
instr = stypes.stringToCharP(instr)
marker = stypes.stringToCharP(marker)
value = ctypes.c_int(value)
repcase = ctypes.c_char(repcase.encode(encoding='UTF-8'))
out = stypes.stringToCharP(lenout)
libspice.repmot_c(instr, marker, value, repcase, lenout, out)
return stypes.toPythonString(out)
| 5,347,902 |
def check_for_pending_actions(
user: IdPUser, ticket: SSOLoginData, sso_session: SSOSession
) -> Optional[WerkzeugResponse]:
"""
Check whether there are any pending actions for the current user,
and if there are, redirect to the actions app.
:param user: the authenticating user
:param ticket: SSOLoginData instance
:param sso_session: SSOSession
"""
if current_app.actions_db is None:
current_app.logger.info('This IdP is not initialized for special actions')
return None
# Add any actions that may depend on the login data
add_idp_initiated_actions(user, ticket)
actions_eppn = current_app.actions_db.get_actions(user.eppn, session=ticket.key)
# Check for pending actions
pending_actions = [a for a in actions_eppn if a.result is None]
if not pending_actions:
# eduid_webapp.idp.mfa_action.check_authn_result will have added the credential used
# to the ticket.mfa_action_creds hash - transfer it to the session
update = False
for cred_key, ts in ticket.mfa_action_creds.items():
cred = user.credentials.find(cred_key)
authn = AuthnData(cred_id=cred.key, timestamp=ts)
sso_session.add_authn_credential(authn)
update = True
# eduid_webapp.idp.mfa_action.check_authn_result will have added any external mfa used to
# the ticket.mfa_action_external - transfer it to the session
if ticket.mfa_action_external is not None:
sso_session.external_mfa = ticket.mfa_action_external
update = True
if update:
current_app.sso_sessions.save(sso_session)
current_app.logger.debug(f'There are no pending actions for user {user}')
return None
# Pending actions found, redirect to the actions app
current_app.logger.debug(f'There are pending actions for user {user}: {pending_actions}')
actions_uri = current_app.conf.actions_app_uri
current_app.logger.info(f'Redirecting user {user} to actions app {actions_uri}')
# TODO: The IdP should never _write_ to the actions namespace. Actions should _read_
# the ticket.key from the IdP namespace instead.
actions = Actions(ts=utc_now(), session=ticket.key)
session.actions = actions
return redirect(actions_uri)
| 5,347,903 |
def emulatte_RESOLVE(
thicks, resistivity, freqs, nfreq, spans, height,
vca_index=None, add_noise=False, noise_ave=None, noise_std=None
):
"""
return : ndarray
[
Re(HCP1), Re(HCP2), Re(HCP3), (Re(VCX)), Re(HCP4), Re(HCP5),
Im(HCP1), Im(HCP2), Im(HCP3), (Im(VCX)), Im(HCP4), Im(HCP5),
]
"""
#フォワード計算
tc = [0, 0, -height]
hankel_filter = 'werthmuller201'
moment = 1
displacement_current = False
res = np.append(2e14, resistivity)
model = fwd.model(thicks)
model.set_properties(res=res)
fields = []
primary_fields = []
# HCP, VCA応答の計算
for i in range(nfreq):
f = np.array([freqs[i]])
rc = [-spans[i], 0, -height]
# VCAあり
if (nfreq == 6) and (i == vca_index):
hmdx = fwd.transmitter("HMDx", f, moment=moment)
model.locate(hmdx, tc, rc)
resp = model.emulate(hankel_filter=hankel_filter)
resp = resp['h_x'][0]
primary_field = moment / (2 * np.pi * spans[i] ** 3)
# VCAなし
else:
vmd = fwd.transmitter("VMD", f, moment=moment)
model.locate(vmd, tc, rc)
resp = model.emulate(hankel_filter=hankel_filter)
resp = resp['h_z'][0]
primary_field = - moment / (4 * np.pi * spans[i] ** 3)
fields.append(resp)
primary_fields.append(primary_field)
fields = np.array(fields)
primary_fields = np.array(primary_fields)
#1次磁場、2次磁場をppmに変換
inph_total_field = np.real(fields)
quad_secondary_field = np.imag(fields)
inph_secondary_field = inph_total_field - primary_fields
real_ppm = abs(inph_secondary_field / primary_fields) * 1e6
imag_ppm = abs(quad_secondary_field / primary_fields) * 1e6
# bookpurnongのそれぞれの周波数のノイズレベル Christensen(2009)
# ノイズ付加
add = np.random.choice([True, False], p=[0.7, 0.3])
if (add_noise & add):
noise = [nlv for nlv in zip(noise_ave, noise_std)]
for index, nlv in enumerate(noise):
inphnoise = np.random.normal(nlv[0], nlv[1])
quadnoise = np.random.normal(nlv[0], nlv[1])
real_ppm[index] = real_ppm[index] + inphnoise
imag_ppm[index] = imag_ppm[index] + quadnoise
resp = np.hstack([real_ppm, imag_ppm])
return resp
| 5,347,904 |
def metadata_dict_chex_mimic(metadata_location):
"""Reads whole csv to find image_name, creates dict with nonempty bboxes
Output:
Bboxes dictionary with key the img_name and values the bboxes themselves."""
bboxes = {}
with open(metadata_location) as f_obj:
reader = csv.reader(f_obj, delimiter=',')
next(reader) # skip header
for line in reader:
_, img_name, x, y, w, h = [int(entry) if entry.isnumeric() else entry for entry in line]
if h != 0 and w != 0: # only append nonempty bboxes
img_name = str(Path(img_name)) # compatibility between different OS
bboxes.setdefault(img_name, []) # these two lines allow safe placing of multiple values for key
bboxes[img_name].append([x, y, w, h])
return bboxes
| 5,347,905 |
def test_clear_players():
"""Test clear_players()."""
app = GameShow(__name__)
app.players['foo'] = 'bar'
app.clear_players()
assert isinstance(app.players, dict)
assert not app.players
| 5,347,906 |
def create_feature_df(cnv_dict, feature_type, labels, csv=False):
"""Creates a pandas Dataframe containing cnvs as rows and features as columns"""
# get features for each CNV
cnv_features = []
if csv:
for chrom in cnv_dict:
for cnv in cnv_dict[chrom]:
if cnv.tads:
cnv_features.append(
np.append([cnv.chr, cnv.start, cnv.end], cnv.annotate(feature_type)))
feature_df = pd.DataFrame(data=cnv_features, columns=[
'CHR', 'START', 'END'] + labels)
else:
for chrom in cnv_dict:
for cnv in cnv_dict[chrom]:
if cnv.tads:
cnv_features.append(cnv.annotate(feature_type))
feature_df = pd.DataFrame(data=cnv_features, columns=labels)
return feature_df
| 5,347,907 |
def test_get_ring_and_fgroup_ortho(input_smiles, bond_smarts, expected_pattern):
"""Ensure that FGs and rings attached to ortho groups are correctly
detected.
The expected values were generated using fragmenter=0.0.7
"""
molecule, _, functional_groups, ring_systems = Fragmenter._prepare_molecule(
smiles_to_molecule(input_smiles, True), default_functional_groups(), False
)
bond = tuple(
get_map_index(molecule, i)
for i in molecule.chemical_environment_matches(bond_smarts)[0]
)
# noinspection PyTypeChecker
atoms, bonds = Fragmenter._get_torsion_quartet(molecule, bond)
atoms, bonds = Fragmenter._get_ring_and_fgroups(
molecule, functional_groups, ring_systems, atoms, bonds
)
actual_atoms = {
map_index
for map_index in atoms
if molecule.atoms[get_atom_index(molecule, map_index)].atomic_number != 1
}
expected_atoms = {
get_map_index(molecule, atom_index)
for match in molecule.chemical_environment_matches(expected_pattern)
for atom_index in match
}
assert actual_atoms == expected_atoms
| 5,347,908 |
def recalculate_order(order: Order, **kwargs):
"""Recalculate and assign total price of order.
Total price is a sum of items in order and order shipping price minus
discount amount.
Voucher discount amount is recalculated by default. To avoid this, pass
update_voucher_discount argument set to False.
"""
# avoid using prefetched order lines
lines = [OrderLine.objects.get(pk=line.pk) for line in order]
prices = [line.get_total() for line in lines]
total = sum(prices, order.shipping_price)
# discount amount can't be greater than order total
order.discount_amount = min(order.discount_amount, total.gross.amount)
if order.discount:
total -= order.discount
order.total = total
order.save(
update_fields=[
"discount_amount",
"total_net_amount",
"total_gross_amount",
"currency",
]
)
recalculate_order_weight(order)
| 5,347,909 |
def simulationcell_from_axes(axes, bconds='p p p', rckc=15.):
""" construct the <simulationcell> xml element from axes
Args:
axes (np.array): lattice vectors
bconds (str, optional): boundary conditions in x,y,z directions.
p for periodic, n for non-periodic, default to 'p p p'
rckc: long-range cutoff paramter rc*kc, default to 15
Return:
etree.Element: representing <simulationcell>
"""
def pad_line(line): # allow content to be selected by double clicked
return ' ' + line + ' '
# write primitive lattice vectors
lat_node = etree.Element('parameter', attrib={
'name': 'lattice',
'units': 'bohr'
})
lat_node.text = xml.arr2text(axes)
# write boundary conditions
bconds_node = etree.Element('parameter', {'name': 'bconds'})
bconds_node.text = pad_line(bconds)
# write long-range cutoff parameter
lr_node = etree.Element('parameter', {'name': 'LR_dim_cutoff'})
lr_node.text = pad_line(str(rckc))
# build <simulationcell>
sc_node = etree.Element('simulationcell')
sc_node.append(lat_node)
sc_node.append(bconds_node)
sc_node.append(lr_node)
return sc_node
| 5,347,910 |
def diff_seq(seq1, seq0):
"""Returns the difference of two sequences: seq1 - seq0.
Args:
seq1: The left operand.
seq0: The right operand.
Returns:
The difference of the two sequences.
"""
return (seq1 - seq0) % MAX_SEQ
| 5,347,911 |
def method_menu():
"""Method menu items
1. Add a new method
2. Duplicate selected method
3. Remove selected method
------------------------------
4. Clear methods
"""
message_method = "You are about to delete all methods. Do you want to continue?"
method_items = [
menu_item(icon_text("fas fa-plus-circle", "Add a new method"), id="add_method"),
menu_item(
icon_text("fas fa-clone", "Duplicate selection"), id="duplicate_method"
),
menu_item(
icon_text("fas fa-minus-circle", "Remove selection"), id="remove_method"
),
menu_item(divider=True),
menu_item("Clear all methods", id="clear-methods"),
menu_item(divider=True),
menu_item("Measurement", header=True),
menu_item(
dcc.Upload(
icon_text("fas fa-paperclip", "Add to selection"),
id="add-measurement-for-method",
)
),
menu_item(
icon_text("fas fa-times-circle", "Remove from selection"),
id="remove-measurement-from-method",
),
dcc.ConfirmDialog(id="confirm-clear-methods", message=message_method),
]
# Callbacks for the add, duplicate, and remove methods
_ = [
app.clientside_callback(
f"""function() {{
document.getElementById("{t}-method-button").click();
throw window.dash_clientside.PreventUpdate;
}}""",
Output(f"{t}-method-button", "n_clicks"),
Input(f"{t}_method", "n_clicks"),
prevent_initial_call=True,
)
for t in TARGET
]
# Callbacks for the clear all methods
app.clientside_callback(
"""function(n) {
if (n == null) throw window.dash_clientside.PreventUpdate;
return true;
}""",
Output("confirm-clear-methods", "displayed"),
Input("clear-methods", "n_clicks"),
prevent_initial_call=True,
)
return create_submenu(label="Method", children=method_items, right=False)
| 5,347,912 |
def shapeanalysis_OuterWire(*args):
"""
* Returns the outer wire on the face <Face>. This is replacement of the method BRepTools::OuterWire until it works badly. Returns the first wire oriented as outer according to FClass2d_Classifier. If none, last wire is returned.
:param face:
:type face: TopoDS_Face &
:rtype: TopoDS_Wire
"""
return _ShapeAnalysis.shapeanalysis_OuterWire(*args)
| 5,347,913 |
def is_numeric(val: str) -> bool:
"""Check whether an unparsed string is a numeric value"""
if val in MISSING_VALUES:
return True
try:
float(val)
except Exception:
return False
else:
return True
| 5,347,914 |
def safeRun( commandArgs ):
"""
Runs the given command and reads the output
"""
errTmp = tempfile.mkstemp()
errStream = os.fdopen( errTmp[0] )
process = Popen( commandArgs, stdin = PIPE,
stdout = PIPE, stderr = errStream )
process.stdin.close()
processStdout = process.stdout.read()
process.stdout.close()
errStream.seek( 0 )
err = errStream.read()
errStream.close()
os.unlink( errTmp[1] )
process.wait()
# 'grep' return codes:
# 0 - OK, lines found
# 1 - OK, no lines found
# 2 - Error occured
if process.returncode != 0 and commandArgs[0] != "grep":
raise Exception( "Error in '%s' invocation: %s" % \
(commandArgs[0], err) )
return processStdout
| 5,347,915 |
def python_cat(infiles: typing.List[str],
outfile: str,
remove: bool=False,
error_file: typing.TextIO=sys.stderr,
verbose: bool=False):
"""
This is very plain Python, except for the os.remove() call.
"""
start_time: float = 0
if verbose:
print('Using python_cat.', file=error_file, flush=True)
start_time = time.time()
filename: str
with open(outfile, "wt") as ofile:
for filename in infiles:
if verbose:
print('Copying {}.'.format(filename), file=error_file, flush=True)
with open(filename, "rt") as ifile:
line: str
for line in ifile:
ofile.write(line)
if remove:
for filename in infiles:
if verbose:
print('Removing {}.'.format(filename), file=error_file, flush=True)
os.remove(filename)
if verbose:
print('Done with python_cat.', file=error_file, flush=True)
print('Time taken : {}s'.format(time.time() - start_time), file=error_file, flush=True)
| 5,347,916 |
def test_client_exists():
"""Pass."""
assert inspect.isclass(axonapi.connect.Connect)
| 5,347,917 |
def commandline_options():
# TODO: complete docstring
"""
:return:
"""
# Instantiate and add parser options:
parser = OptionParser(usage="%prog [OPTIONS] FILENAME",
version="%prog 1.0")
parser.add_option("-l",
"--largest-per-folder",
action="store_true",
help="Copies largest image for each folder and sub-folder.")
parser.add_option("-s",
"--smallest-per-folder",
action="store_true",
help="Copies smallest image for each folder and sub-folder.")
# TODO: Add/override default help option.
# Parse options:
(options, args) = parser.parse_args()
# Call main function:
main(args[0], args[1], options)
| 5,347,918 |
def add_command_line_options(
parser: Union[argparse.ArgumentParser, optparse.OptionParser],
transport_argument: bool = False,
) -> None:
"""Add command line options for all available transport layer classes."""
if transport_argument:
known_transports = list(get_known_transports())
if isinstance(parser, argparse.ArgumentParser):
parser.add_argument(
"-t",
"--transport",
dest="transport",
metavar="TRN",
default=default_transport,
help="Transport mechanism. Known mechanisms: "
+ ", ".join(known_transports)
+ f" (default: {default_transport})",
choices=known_transports,
)
else:
parser.add_option(
"-t",
"--transport",
dest="transport",
metavar="TRN",
default=default_transport,
help="Transport mechanism. Known mechanisms: "
+ ", ".join(known_transports)
+ " (default: %default)",
type="choice",
choices=known_transports,
)
for transport in get_known_transports().values():
transport().add_command_line_options(parser)
| 5,347,919 |
def test_init_no_params():
"""Test the creator by passing no parameters. Should cause a TypeError exception"""
success = False
try:
testPlane = ComplexPlane()
except TypeError:
"""test passes"""
success = True
message = 'Creator should have generated a TypeError exception, as no required parameters were passed'
assert success, message
| 5,347,920 |
def expand_options(sent, as_strings=True):
"""
['1', '(', '2', '|', '3, ')'] -> [['1', '2'], ['1', '3']]
For example:
Will it (rain|pour) (today|tomorrow|)?
---->
Will it rain today?
Will it rain tomorrow?
Will it rain?
Will it pour today?
Will it pour tomorrow?
Will it pour?
Args:
sent (list<str>): List of sentence in sentence
Returns:
list<list<str>>: Multiple possible sentences from original
"""
return expand_parentheses(sent, as_strings)
| 5,347,921 |
def gcd_recursive_by_divrem(m, n):
"""
Computes the greatest common divisor of two numbers by recursively getting remainder from
division.
:param int m: First number.
:param int n: Second number.
:returns: GCD as a number.
"""
if n == 0:
return m
return gcd_recursive_by_divrem(n, m % n)
| 5,347,922 |
def repeat(atoms, coord):
"""
Repeat atoms (:class:`AtomArray` or :class:`AtomArrayStack`)
multiple times in the same model with different coordinates.
Parameters
----------
atoms : AtomArray, shape=(n,) or AtomArrayStack, shape=(m,n)
The atoms to be repeated.
coord : ndarray, dtype=float, shape=(k,n,3) or shape=(k,m,n,3)
The coordinates to be used fr the repeated atoms.
The length of first dimension determines the number of repeats.
If `atoms` is an :class:`AtomArray` 3 dimensions, otherwise
4 dimensions are required.
Returns
-------
repeated: AtomArray, shape=(n*k,) or AtomArrayStack, shape=(m,n*k)
The repeated atoms.
Whether an :class:`AtomArray` or an :class:`AtomArrayStack` is
returned depends on the input `atoms`.
Examples
--------
>>> atoms = array([
... Atom([1,2,3], res_id=1, atom_name="N"),
... Atom([4,5,6], res_id=1, atom_name="CA"),
... Atom([7,8,9], res_id=1, atom_name="C")
... ])
>>> print(atoms)
1 N 1.000 2.000 3.000
1 CA 4.000 5.000 6.000
1 C 7.000 8.000 9.000
>>> repeat_coord = np.array([
... [[0,0,0], [1,1,1], [2,2,2]],
... [[3,3,3], [4,4,4], [5,5,5]]
... ])
>>> print(repeat(atoms, repeat_coord))
1 N 0.000 0.000 0.000
1 CA 1.000 1.000 1.000
1 C 2.000 2.000 2.000
1 N 3.000 3.000 3.000
1 CA 4.000 4.000 4.000
1 C 5.000 5.000 5.000
"""
if isinstance(atoms, AtomArray) and coord.ndim != 3:
raise ValueError(
f"Expected 3 dimensions for the coordinate array, got {coord.ndim}"
)
elif isinstance(atoms, AtomArrayStack) and coord.ndim != 4:
raise ValueError(
f"Expected 4 dimensions for the coordinate array, got {coord.ndim}"
)
repetitions = len(coord)
orig_length = atoms.array_length()
new_length = orig_length * repetitions
if isinstance(atoms, AtomArray):
if coord.ndim != 3:
raise ValueError(
f"Expected 3 dimensions for the coordinate array, "
f"but got {coord.ndim}"
)
repeated = AtomArray(new_length)
repeated.coord = coord.reshape((new_length, 3))
elif isinstance(atoms, AtomArrayStack):
if coord.ndim != 4:
raise ValueError(
f"Expected 4 dimensions for the coordinate array, "
f"but got {coord.ndim}"
)
repeated = AtomArrayStack(atoms.stack_depth(), new_length)
repeated.coord = coord.reshape((atoms.stack_depth(), new_length, 3))
else:
raise TypeError(
f"Expected 'AtomArray' or 'AtomArrayStack', "
f"but got {type(atoms).__name__}"
)
for category in atoms.get_annotation_categories():
annot = np.tile(atoms.get_annotation(category), repetitions)
repeated.set_annotation(category, annot)
if atoms.bonds is not None:
bonds = atoms.bonds
for _ in range(repetitions-1):
bonds += atoms.bonds
repeated.bonds = bonds
if atoms.box is not None:
repeated.box = atoms.box.copy()
return repeated
| 5,347,923 |
def get_mzi_delta_length(m, neff=2.4, wavelength=1.55):
""" m*wavelength = neff * delta_length """
return m * wavelength / neff
| 5,347,924 |
def yagzag2radec(yag, zag, q):
"""
Given ACA Y-ang, Z-ang and pointing quaternion determine RA, Dec. The
input ``yag`` and ``zag`` values can be 1-d arrays in which case the output
``ra`` and ``dec`` will be corresponding arrays of the same length.
:param yag: ACA Y angle (degrees)
:param zag: ACA Z angle (degrees)
:param q: Quaternion
:rtype: list ra, dec (degrees)
"""
try:
one = np.ones(len(yag))
except TypeError:
one = 1.0
d_aca = np.array([one, tan(radians(yag)), tan(radians(zag))])
d_aca *= 1.0 / np.sum(d_aca**2)
eci = np.dot(q.transform, d_aca)
return eci2radec(eci)
| 5,347,925 |
def load_image(
path: str,
color_mode="rgb",
target_size: Union[None, ImageSize] = None,
normalize=False,
) -> np.ndarray:
"""Load an RGB image from the given path, optionally resizing it.
:param path: Path to the image
:param color_mode: "rgb", "bgr" or "grayscale"
:param target_size: Target size of the image (width, height).
:param normalize: Normalize values to [0.0, [1.0]
"""
from tensorflow.keras.preprocessing.image import load_img
pil_color_mode = color_mode
if pil_color_mode == "bgr":
pil_color_mode = "rgb"
pil = load_img(path, color_mode=pil_color_mode, target_size=target_size)
image = np.array(pil)
if color_mode == "bgr":
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if normalize:
image = image / 255.0
return image
| 5,347,926 |
def test_check_for_runs():
"""
Test check_for_runs
"""
response = {
"_items": [
{
"non_static_inputs": ["INPUT_1"],
"assay_name": "assay 1",
"workflow_location": "https://github.com/CIMAC-CIDC/proto",
"_id": "123",
}
]
}
with patch(
"framework.tasks.snakemake_tasks.EVE.get", return_value=FakeFetcher(response)
):
record_response, assay_dict = check_for_runs("token")
if not "123" in assay_dict:
raise AssertionError("Assay dictionary malformed")
assay = assay_dict["123"]
if (
not "non_static_inputs" in assay
or not "assay_name" in assay
or not "workflow_location" in assay
):
raise AssertionError
| 5,347,927 |
def deit_base_patch16_384():
"""
DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
cfg = ViTConfig(
name="deit_base_patch16_384",
url="",
input_size=(384, 384),
patch_size=16,
embed_dim=768,
nb_blocks=12,
nb_heads=12,
crop_pct=1.0,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
)
return ViT, cfg
| 5,347,928 |
def update_output(
event_id,
event_dividers,
light_dividers,
filename,
geometry,
do_plot_tracks,
do_plot_opids,
figure,
):
"""Update 3D event display end event id"""
fig = go.Figure(figure)
if event_dividers is None:
return no_update, no_update, no_update, no_update, no_update
try:
fig.data = []
fig.add_traces(
draw_event(
filename,
GEOMETRIES[geometry],
event_dividers,
light_dividers,
event_id,
do_plot_tracks,
do_plot_opids,
)
)
except IndexError as err:
print("IndexError", err)
return fig, {"display": "none"}, True, no_update, no_update
except KeyError as err:
print("KeyError", err)
return fig, {"display": "none"}, True, "Select a geometry first", no_update
url_filename = filename.replace(DOCKER_MOUNTED_FOLDER, "")
return (
fig,
{"height": "85vh"},
False,
no_update,
f"https://larnddisplay.lbl.gov/{url_filename}?geom={geometry}#{event_id}",
)
| 5,347,929 |
def get_containerports(marathon_url, app_id):
"""
Get containerports if we have portmapping.
marathon_url : [string] the URL of the marathon service
app_id : [string] ID of the running marathon app
Method : GET
Return : list of ports
"""
api_endpoint = '/v2/apps/'
headers = {'Content-Type': 'application/json'}
url = marathon_url + api_endpoint + app_id
print(url)
r = requests.get(url, headers=headers)
print(r.status_code)
containerports = []
for portmapping in r.json()['app']['container']['docker']['portMappings']:
containerports.append(portmapping['containerPort'])
return containerports
| 5,347,930 |
def backward_algorithm(O, HMM_model):
"""HMM Backward Algorithm.
Args:
O: (o1, o2, ..., oT), observations
HMM_model: (pi, A, B), (init state prob, transition prob, emitting prob)
Return:
prob: the probability of HMM_model generating O.
"""
pi, A, B = HMM_model
T = len(O)
N = len(pi)
prob = 0.0
# Begin Assignment
# 后向概率矩阵
betas = np.zeros((N, T))
for i in range(N):
betas[i][0] = 1
for t in range(1, T):
for i in range(N):
for j in range(N):
betas[i][t] += A[i][j]*B[j][O[T-t]]*betas[j][t-1]
for i in range(N):
prob += pi[i]*B[i][O[0]]*betas[i][-1]
# End Assignment
return prob
| 5,347,931 |
def test_risky_user_get_command(requests_mock) -> None:
"""
Scenario: Get Risky User.
Given:
- User has provided valid credentials.
- Headers and JWT token have been set.
When:
- risky_user_get_command is called.
Then:
- Ensure outputs prefix is correct.
- Ensure outputs key fields is correct.
- Ensure user ID is correct.
"""
from AzureRiskyUsers import risky_user_get_command
mock_response = load_mock_response('get_risky_user.json')
requests_mock.post(ACCESS_TOKEN_REQUEST_URL, json={})
requests_mock.get(f'{BASE_URL}identityProtection/riskyUsers/1', json=mock_response)
result = risky_user_get_command(mock_client(), args={'id': '1'})
assert result.outputs_prefix == 'AzureRiskyUsers.RiskyUser'
assert result.outputs_key_field == 'id'
assert result.raw_response.get('id') == '1'
| 5,347,932 |
def scan_codes(code_type, image):
"""Get *code_type* codes from a PIL Image
*code_type* can be any of zbar supported code type [#zbar_symbologies]_:
- **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`)
- **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`),
DataBar (`databar`) and DataBar Expanded (`databar-exp`)
- **2D**: QR Code (`qrcode`)
- **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417`
.. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html
Args:
code_type (str): Code type to search
image (PIL.Image.Image): Image to scan
returns:
A list of *code_type* code values or None
"""
assert Image.isImageType(image)
converted_image = image.convert('L') # Convert image to gray scale (8 bits per pixel).
raw = converted_image.tobytes() # Get image data.
width, height = converted_image.size # Get image size.
return zbar_code_scanner('{0}.enable'.format(code_type).encode(), raw, width, height)
| 5,347,933 |
def validate_user_defined_info(user_defined_info):
"""
Validate user defined info, delete the item if its key is in lineage.
Args:
user_defined_info (dict): The user defined info.
Raises:
LineageParamTypeError: If the type of parameters is invalid.
LineageParamValueError: If user defined keys have been defined in lineage.
"""
if not isinstance(user_defined_info, dict):
log.error("Invalid user defined info. It should be a dict.")
raise LineageParamTypeError("Invalid user defined info. It should be dict.")
for key, value in user_defined_info.items():
if not isinstance(key, str):
error_msg = "Dict key type {} is not supported in user defined info." \
"Only str is permitted now.".format(type(key))
log.error(error_msg)
raise LineageParamTypeError(error_msg)
if not isinstance(value, (int, str, float)):
error_msg = "Dict value type {} is not supported in user defined info." \
"Only str, int and float are permitted now.".format(type(value))
log.error(error_msg)
raise LineageParamTypeError(error_msg)
field_map = set(FIELD_MAPPING.keys())
user_defined_keys = set(user_defined_info.keys())
insertion = list(field_map & user_defined_keys)
if insertion:
for key in insertion:
user_defined_info.pop(key)
raise LineageParamValueError("There are some keys have defined in lineage. "
"Duplicated key(s): %s. " % insertion)
| 5,347,934 |
def data_write(fname, keys, values, path='data', exists=0):
"""DEPRECATED; USE :func:`save`.
Parameters
----------
fname : str
File name.
keys : str or list of str
Name(s) of the values to store in file.
values : anything
Values to store with keys in file.
path : str, optional
Absolute or relative path where to store. Default is 'data'.
exists : int, optional
Flag how to act if a shelve with the given name already exists:
- < 0: Delete existing shelve.
- 0 (default): Do nothing (print that it exists).
- > 0: Append to existing shelve.
"""
# Issue warning
mesg = ("\n The use of `data_write` and `data_read` is deprecated.\n"
" These function will be removed before v1.0.\n"
" Use `emg3d.save` and `emg3d.load` instead.")
warnings.warn(mesg, DeprecationWarning)
# Get absolute path, create if it doesn't exist.
path = os.path.abspath(path)
os.makedirs(path, exist_ok=True)
full_path = os.path.join(path, fname)
# Check if shelve exists.
bak_exists = os.path.isfile(full_path+".bak")
dat_exists = os.path.isfile(full_path+".dat")
dir_exists = os.path.isfile(full_path+".dir")
if any([bak_exists, dat_exists, dir_exists]):
print(" > File exists, ", end="")
if exists == 0:
print("NOT SAVING THE DATA.")
return
elif exists > 0:
print("appending to it", end='')
else:
print("overwriting it.")
for extension in ["dat", "bak", "dir"]:
try:
os.remove(full_path+"."+extension)
except FileNotFoundError:
pass
# Cast into list.
if not isinstance(keys, (list, tuple)):
keys = [keys, ]
values = [values, ]
# Shelve it.
with shelve.open(full_path) as db:
# If appending, print the keys which will be overwritten.
if exists > 0:
over = [j for j in keys if any(i == j for i in list(db.keys()))]
if len(over) > 0:
print(" (overwriting existing key(s) "+f"{over}"[1:-1]+").")
else:
print(".")
# Writing it to the shelve.
for i, key in enumerate(keys):
# If the parameter is a TensorMesh instance, we set the volume
# None. This saves space, and it will simply be reconstructed if
# required.
if type(values[i]).__name__ == 'TensorMesh':
if hasattr(values[i], '_vol'):
delattr(values[i], '_vol')
db[key] = values[i]
| 5,347,935 |
def add_immediate_alert(context: dict, severity: str,
message: Union[str, dict], title: Optional[str] = None,
dismissable: bool = True, safe: bool = False) -> None:
"""Add an alert for immediate display."""
if safe and isinstance(message, str):
message = Markup(message)
data = {'message': message, 'title': title, 'dismissable': dismissable}
if 'immediate_alerts' not in context:
context['immediate_alerts'] = []
context['immediate_alerts'].append((severity, data))
| 5,347,936 |
def get_summoner_masteries(summoner_ids):
"""
https://developer.riotgames.com/api/methods#!/1017/3450
Args:
summoner_ids (int | list<int>): the summoner ID(s) to get mastery pages for
Returns:
dict<str, MasteryPages>: the requested summoners' mastery pages
"""
# Can only have 40 summoners max if it's a list
if isinstance(summoner_ids, list) and len(summoner_ids) > 40:
raise ValueError("Can only get masteries for up to 40 summoners at once.")
id_string = ",".join(str(x) for x in summoner_ids) if isinstance(summoner_ids, list) else str(summoner_ids)
# Get JSON response
request = "{version}/summoner/{ids}/masteries".format(version=cassiopeia.dto.requests.api_versions["summoner"], ids=id_string)
response = cassiopeia.dto.requests.get(request)
# Convert response to Dto type
for id_, masteries in response.items():
response[id_] = cassiopeia.type.dto.summoner.MasteryPages(masteries)
return response
| 5,347,937 |
def _patch_tornado():
"""
If tornado is imported before nest_asyncio, make tornado aware of
the pure-Python asyncio Future.
"""
if 'tornado' in sys.modules:
import tornado.concurrent as tc
tc.Future = asyncio.Future
if asyncio.Future not in tc.FUTURES:
tc.FUTURES += (asyncio.Future,)
| 5,347,938 |
def test_query_my_project(db, clients, user_group, allowed):
"""
Test if users may see a project that is in one of their studies
"""
client = clients.get(user_group)
project = ProjectFactory()
user = User.objects.get(groups__name=user_group)
study = StudyFactory()
project.study = study
project.save()
user.save()
Membership(collaborator=user, study=study).save()
variables = {"id": to_global_id("ProjectNode", project.project_id)}
resp = client.post(
"/graphql",
data={"query": PROJECT, "variables": variables},
content_type="application/json",
)
if allowed:
assert resp.json()["data"]["project"]["name"] == project.name
else:
assert resp.json()["errors"][0]["message"] == "Not allowed"
| 5,347,939 |
def make_pod_spec(
name,
image_spec,
image_pull_policy,
image_pull_secret,
port,
cmd,
node_selector,
run_as_uid,
fs_gid,
env,
working_dir,
volumes,
volume_mounts,
labels,
cpu_limit,
cpu_guarantee,
mem_limit,
mem_guarantee,
lifecycle_hooks,
init_containers,
):
"""
Make a k8s pod specification for running a user notebook.
Parameters:
- name:
Name of pod. Must be unique within the namespace the object is
going to be created in. Must be a valid DNS label.
- image_spec:
Image specification - usually a image name and tag in the form
of image_name:tag. Same thing you would use with docker commandline
arguments
- image_pull_policy:
Image pull policy - one of 'Always', 'IfNotPresent' or 'Never'. Decides
when kubernetes will check for a newer version of image and pull it when
running a pod.
- image_pull_secret:
Image pull secret - Default is None -- set to your secret name to pull
from private docker registry.
- port:
Port the notebook server is going to be listening on
- cmd:
The command used to execute the singleuser server.
- node_selector:
Dictionary Selector to match nodes where to launch the Pods
- run_as_uid:
The UID used to run single-user pods. The default is to run as the user
specified in the Dockerfile, if this is set to None.
- fs_gid
The gid that will own any fresh volumes mounted into this pod, if using
volume types that support this (such as GCE). This should be a group that
the uid the process is running as should be a member of, so that it can
read / write to the volumes mounted.
- env:
Dictionary of environment variables.
- volumes:
List of dictionaries containing the volumes of various types this pod
will be using. See k8s documentation about volumes on how to specify
these
- volume_mounts:
List of dictionaries mapping paths in the container and the volume(
specified in volumes) that should be mounted on them. See the k8s
documentaiton for more details
- working_dir:
String specifying the working directory for the notebook container
- labels:
Labels to add to the spawned pod.
- cpu_limit:
Float specifying the max number of CPU cores the user's pod is
allowed to use.
- cpu_guarentee:
Float specifying the max number of CPU cores the user's pod is
guaranteed to have access to, by the scheduler.
- mem_limit:
String specifying the max amount of RAM the user's pod is allowed
to use. String instead of float/int since common suffixes are allowed
- mem_guarantee:
String specifying the max amount of RAM the user's pod is guaranteed
to have access to. String ins loat/int since common suffixes
are allowed
- lifecycle_hooks:
Dictionary of lifecycle hooks
- init_containers:
List of initialization containers belonging to the pod.
"""
api_client = ApiClient()
pod = V1Pod()
pod.kind = "Pod"
pod.api_version = "v1"
pod.metadata = V1ObjectMeta()
pod.metadata.name = name
pod.metadata.labels = labels.copy()
pod.spec = V1PodSpec()
security_context = V1PodSecurityContext()
if fs_gid is not None:
security_context.fs_group = int(fs_gid)
if run_as_uid is not None:
security_context.run_as_user = int(run_as_uid)
pod.spec.security_context = security_context
if image_pull_secret is not None:
pod.spec.image_pull_secrets = []
image_secret = V1LocalObjectReference()
image_secret.name = image_pull_secret
pod.spec.image_pull_secrets.append(image_secret)
if node_selector:
pod.spec.node_selector = node_selector
pod.spec.containers = []
notebook_container = V1Container()
notebook_container.name = "notebook"
notebook_container.image = image_spec
notebook_container.working_dir = working_dir
notebook_container.ports = []
port_ = V1ContainerPort()
port_.name = "notebook-port"
port_.container_port = port
notebook_container.ports.append(port_)
notebook_container.env = [V1EnvVar(k, v) for k, v in env.items()]
notebook_container.args = cmd
notebook_container.image_pull_policy = image_pull_policy
notebook_container.lifecycle = lifecycle_hooks
notebook_container.resources = V1ResourceRequirements()
notebook_container.resources.requests = {}
if cpu_guarantee:
notebook_container.resources.requests['cpu'] = cpu_guarantee
if mem_guarantee:
notebook_container.resources.requests['memory'] = mem_guarantee
notebook_container.resources.limits = {}
if cpu_limit:
notebook_container.resources.limits['cpu'] = cpu_limit
if mem_limit:
notebook_container.resources.limits['memory'] = mem_limit
notebook_container.volume_mounts = volume_mounts
pod.spec.containers.append(notebook_container)
pod.spec.init_containers = init_containers
pod.spec.volumes = volumes
return api_client.sanitize_for_serialization(pod)
| 5,347,940 |
def preprocess(text, remove_punct=False, remove_num=True):
"""
preprocess text into clean text for tokenization
"""
# 1. normalize
text = normalize_unicode(text)
# 2. to lower
text = text.lower()
# 3. space
text = spacing_punctuation(text)
text = spacing_number(text)
# (optional)
if remove_punct:
text = remove_punctuation(text)
# 4. de-contract
text = decontracted(text)
# 5. handle number
if remove_num:
text = remove_number(text)
else:
text = clean_number(text)
# 6. remove space
text = remove_space(text)
return text
| 5,347,941 |
def verify_ptp_calibration_states(
device, states, domain, max_time=15, check_interval=5
):
""" Verify ptp parent values in show ptp parent command
Args:
device (`obj`): Device object
states ('str): PTP calibration state
domain ('str): PTP domain
max_time (int): Maximum wait time for the trigger,
in second. Default: 15
check_interval (int): Wait time between iterations when looping is needed,
in second. Default: 5
Returns:
True
False
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
out = None
try:
out = device.parse("show ptp brief | ex FA")
except SchemaEmptyParserError:
pass
if out:
result = True
else:
result = False
if result:
return True
timeout.sleep()
return False
| 5,347,942 |
def test_padding():
"""
Test for checking the padding of a single
linear constraint set
"""
C_0 = np.eye(3)
C_out = cons.pad_constraints(C_0, 4, 9)
assert C_out.shape == (3, 9)
with pytest.raises(cons.PaddingError):
C_out = cons.pad_constraints(C_0, 4, 6)
| 5,347,943 |
def get_auth_use_case():
"""Get use case instance."""
return auth_use_case
| 5,347,944 |
def remove_border(curset, direct):
"""
Remove the cells on a given border.
"""
border = get_border(curset, direct)
curset.difference_update(border)
| 5,347,945 |
def convert_file_link(file):
"""Reads the content of all files matching the file specification (removing
YAML metadata blocks is required) for insertion into the calling file.
Optionally add a separator between each file and/or add a prefix to each
line of the included files.
Args:
file (Match): A Match object corresponding to the file specification
Returns:
str: the concatentated contents of the file specification
"""
incl_file = str(file.group(1))
file_sep = ''
line_prefix = ''
options = ''
# get file separator, if any
if '|' in incl_file:
incl_file, *options = incl_file.split('|')
if len(options) == 1:
file_sep = options[0]
if len(options) == 2:
file_sep = options[0]
line_prefix = options[1]
# create list of files
incl_list = sorted(glob.glob(os.path.normpath(os.path.join(os.getcwd(), incl_file))))
incl_contents = ''
for i, file in enumerate(incl_list):
with open(file, 'r', encoding='utf8') as input_file:
file_contents = input_file.read()
# TODO check contents for file include regex to do nested includes?
# remove YAML header from file
file_metadata, file_contents = split_doc(file_contents)
# process prefix and suffix in included metadata
if file_metadata:
file_contents = file_metadata.get('prefix', '') + file_contents + file_metadata.get('suffix', '')
if not file_contents:
continue
# replace ?{value}
if config['replace']:
file_contents = MetadataReplace(file_contents).safe_substitute(file_metadata)
# add prefix if required
if line_prefix:
file_contents = line_prefix + re.sub('\n', '\n' + line_prefix, file_contents)
if i < len(incl_list) - 1:
file_contents += '\n\n' + file_sep
incl_contents += file_contents + '\n\n'
# return contents of all matched files
return incl_contents
| 5,347,946 |
def example_extract_reference_primitives():
"""
Extract primitives from VertexGroupReference (.ply) file.
"""
vertex_group_reference = VertexGroupReference(filepath=dir_tests / 'test_data' / 'test_mesh.ply', num_samples=10000)
# save extracted primitives to both a Vertex Group (.vg) file and a binary Vertex group (.bvg) file
vertex_group_reference.save_vg(dir_tests / 'test_output' / 'reference.vg')
vertex_group_reference.save_bvg(dir_tests / 'test_output' / 'reference.bvg')
| 5,347,947 |
def pagerank(G, alpha=0.85, personalization=None,
max_iter=100, tol=1.0e-6, nstart=None, weight='weight',
dangling=None):
"""Returns the PageRank of the nodes in the graph.
PageRank computes a ranking of the nodes in the graph G based on
the structure of the incoming links. It was originally designed as
an algorithm to rank web pages.
Parameters
----------
G : graph
A NetworkX graph. Undirected graphs will be converted to a directed
graph with two directed edges for each undirected edge.
alpha : float, optional
Damping parameter for PageRank, default=0.85.
personalization: dict, optional
The "personalization vector" consisting of a dictionary with a
key some subset of graph nodes and personalization value each of those.
At least one personalization value must be non-zero.
If not specfiied, a nodes personalization value will be zero.
By default, a uniform distribution is used.
max_iter : integer, optional
Maximum number of iterations in power method eigenvalue solver.
tol : float, optional
Error tolerance used to check convergence in power method solver.
nstart : dictionary, optional
Starting value of PageRank iteration for each node.
weight : key, optional
Edge data key to use as weight. If None weights are set to 1.
dangling: dict, optional
The outedges to be assigned to any "dangling" nodes, i.e., nodes without
any outedges. The dict key is the node the outedge points to and the dict
value is the weight of that outedge. By default, dangling nodes are given
outedges according to the personalization vector (uniform if not
specified). This must be selected to result in an irreducible transition
matrix (see notes under google_matrix). It may be common to have the
dangling dict to be the same as the personalization dict.
Returns
-------
pagerank : dictionary
Dictionary of nodes with PageRank as value
Examples
--------
>>> G = nx.DiGraph(nx.path_graph(4))
>>> pr = nx.pagerank(G, alpha=0.9)
Notes
-----
The eigenvector calculation is done by the power iteration method
and has no guarantee of convergence. The iteration will stop after
an error tolerance of ``len(G) * tol`` has been reached. If the
number of iterations exceed `max_iter`, a
:exc:`networkx.exception.PowerIterationFailedConvergence` exception
is raised.
The PageRank algorithm was designed for directed graphs but this
algorithm does not check if the input graph is directed and will
execute on undirected graphs by converting each edge in the
directed graph to two edges.
See Also
--------
pagerank_numpy, pagerank_scipy, google_matrix
Raises
------
PowerIterationFailedConvergence
If the algorithm fails to converge to the specified tolerance
within the specified number of iterations of the power iteration
method.
References
----------
.. [1] A. Langville and C. Meyer,
"A survey of eigenvector methods of web information retrieval."
http://citeseer.ist.psu.edu/713792.html
.. [2] Page, Lawrence; Brin, Sergey; Motwani, Rajeev and Winograd, Terry,
The PageRank citation ranking: Bringing order to the Web. 1999
http://dbpubs.stanford.edu:8090/pub/showDoc.Fulltext?lang=en&doc=1999-66&format=pdf
"""
if len(G) == 0:
return {}
if not G.is_directed():
D = G.to_directed()
else:
D = G
# Create a copy in (right) stochastic form
W = nx.stochastic_graph(D, weight=weight)
N = W.number_of_nodes()
# Choose fixed starting vector if not given
if nstart is None:
x = dict.fromkeys(W, 1.0 / N)
else:
# Normalized nstart vector
s = float(sum(nstart.values()))
x = dict((k, v / s) for k, v in nstart.items())
if personalization is None:
# Assign uniform personalization vector if not given
p = dict.fromkeys(W, 1.0 / N)
else:
s = float(sum(personalization.values()))
p = dict((k, v / s) for k, v in personalization.items())
if dangling is None:
# Use personalization vector if dangling vector not specified
dangling_weights = p
else:
s = float(sum(dangling.values()))
dangling_weights = dict((k, v / s) for k, v in dangling.items())
dangling_nodes = [n for n in W if W.out_degree(n, weight=weight) == 0.0]
# power iteration: make up to max_iter iterations
for _ in range(max_iter):
xlast = x
x = dict.fromkeys(xlast.keys(), 0)
danglesum = alpha * sum(xlast[n] for n in dangling_nodes)
for n in x:
# this matrix multiply looks odd because it is
# doing a left multiply x^T=xlast^T*W
for nbr in W[n]:
x[nbr] += alpha * xlast[n] * W[n][nbr][weight]
x[n] += danglesum * dangling_weights.get(n, 0) + (1.0 - alpha) * p.get(n, 0)
# check convergence, l1 norm
err = sum([abs(x[n] - xlast[n]) for n in x])
if err < N * tol:
return x
raise nx.PowerIterationFailedConvergence(max_iter)
| 5,347,948 |
def fetch_file(name, chunksize=16 * 1024):
"""
Fetch a datafile from a compressed/gzipped URL source.
Parameters
----------
name : :class:`str`
Name of the file to fetch.
chunksize : :class:`int`
Number of bytes to read in a chunk.
"""
fp, url, compressed = [
(Path(k), url, compressed)
for (k, (url, compressed)) in MANIFEST.items()
if name.lower() in Path(k).name.lower()
][0]
if "1drv" in url:
url = get_onedrive_directlink(
url
) # allow direct access to file object for 1drv
# construct relative path from this file
local_target = (Path(__file__).parent / fp).resolve()
if not local_target.exists():
if not local_target.parent.exists():
local_target.parent.mkdir(parents=True)
if compressed:
dec = zlib.decompressobj(
32 + zlib.MAX_WBITS
) # offset 32 to skip the header
decompress = dec.decompress
else:
decompress = lambda x: x
with urlopen(url) as response:
pbar = tqdm.tqdm(
total=int(response.headers["content-length"]),
unit="b",
unit_scale=True,
unit_divisor=1024,
desc=str(fp.name),
)
CHUNKSIZE = 16 * 1024
with open(local_target, "wb") as f:
while True:
chunk = response.read(chunksize)
if chunk:
rv = decompress(chunk)
f.write(rv)
pbar.update(len(chunk))
else:
break
return fp
| 5,347,949 |
def create_include(workflow_stat):
"""
Generates the html script include content.
@param workflow_stat the WorkflowInfo object reference
"""
include_str = """
<script type='text/javascript' src='bc_action.js'>
</script>
<script type='text/javascript' src='bc_""" + workflow_stat.wf_uuid +"""_data.js'>
</script>
"""
return include_str
| 5,347,950 |
def choose_sample_from_group(
group: general.ParameterListType,
) -> general.ParameterValuesType:
"""
Choose single sample from group DataFrame.
"""
# Make continous index from 0
indexes = [idx for idx in range(len(group))]
assert len(indexes) > 0
# Choose from indexes
choice = random.choices(population=indexes, k=1)[0]
# Get the dict at choice index
chosen_dict = group[choice]
assert isinstance(chosen_dict, dict)
return chosen_dict
| 5,347,951 |
def path_available(filepath):
# type: (str) -> bool
"""Return true if filepath is available"""
parent_directory = dirname(filepath)
if not exists(parent_directory):
raise ParentDirectoryDoesNotExist(parent_directory, filepath)
return not exists(filepath)
| 5,347,952 |
def memory_item_to_resource(urn: URN, items: Dict[str, Any] = None, loader: Callable = None) -> CloudWandererResource:
"""Convert a resource and its attributes to a CloudWandererResource.
Arguments:
urn (URN): The URN of the resource.
items (dict): The dictionary of items stored under this URN. (Secondary Attributs, BaseResource)
loader (Callable): The method which can be used to fulfil the :meth:`CloudWandererResource.load`
"""
items = items or {}
attributes = [
attribute
for item_type, attribute in items.items()
if item_type not in ["SubresourceUrns", "BaseResource", "ParentUrn"]
]
base_resource: Dict[str, Any] = next(
iter(resource for item_type, resource in items.items() if item_type == "BaseResource"), {}
)
return CloudWandererResource(
urn=urn,
subresource_urns=items.get("SubresourceUrns"),
resource_data=base_resource,
secondary_attributes=attributes,
loader=loader,
)
| 5,347,953 |
def generate_csv_spreadsheet(spreadsheet_location, mappings_location):
"""Reads the main XSLX mappings file and creates a spreadsheet for the
mappings in CSV"""
sheets = get_sheets(spreadsheet_location)
now = datetime.datetime.utcnow()
strf_time = now.strftime("%y/%m/%d")
relationship_type = "related-to"
with mappings_location.open('w', newline='\n', encoding='utf-8') as csvfile:
fieldnames = ['DATE DELIVERED', 'VERIS PATH', 'RELATIONSHIP TYPE', 'TECHNIQUE ID']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for sheet, name in sheets:
name = name.lower()
veris_path = None
for idx, row in sheet.iterrows():
if row[0] is not numpy.nan:
veris_path = f'{name}.{row[0]}'
if row[1] is not numpy.nan:
# Any rows without a mapping will have value numpy.nan (which are skipped)
writer.writerow({
'DATE DELIVERED': strf_time,
'VERIS PATH': veris_path,
'RELATIONSHIP TYPE': relationship_type,
'TECHNIQUE ID': row[1] # .strip()
})
| 5,347,954 |
def sample_user(email='[email protected]', password='testpass'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password)
| 5,347,955 |
def ShearX(img: Image, magnitude: float) -> Image:
"""Shear the image on x-axis."""
return img.transform(
img.size,
PIL.Image.AFFINE,
(1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
PIL.Image.BICUBIC,
fillcolor=FILLCOLOR,
)
| 5,347,956 |
def py_test(name=None,
srcs=[],
deps=[],
visibility=None,
main=None,
base=None,
testdata=[],
**kwargs):
"""python test."""
target = PythonTest(
name=name,
srcs=srcs,
deps=deps,
visibility=visibility,
main=main,
base=base,
testdata=testdata,
kwargs=kwargs)
build_manager.instance.register_target(target)
| 5,347,957 |
def run_command_unchecked(command, cwd, env=None):
"""Runs a command in the given dir, returning its exit code and stdio."""
p = subprocess.Popen(
command,
cwd=cwd,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
)
stdout, _ = p.communicate()
exit_code = p.wait()
return exit_code, stdout.decode('utf-8', 'replace')
| 5,347,958 |
def read_onsets(onsets_path: PathLike) -> numpy.array:
"""
Read a text file containing onsets. Return it as a list of floats.
"""
with open(onsets_path, "r") as io:
lines = io.readlines()
onsets = numpy.array([float(line) for line in lines])
return onsets
| 5,347,959 |
def resolve(path):
"""Resolve a command to help understanding where a command comes from"""
cmd, resolver = get_command(path, True)
click.echo(
f"The command {path} is resolved by the resolver {resolver.name}"
)
| 5,347,960 |
def format_specific_efficacy(method, type_1: str, type_2: str = None):
""" Format the efficacy string specifically for defense or attack. """
effective, ineffective, useless = format_damage(method, type_1, type_2)
type_name = format_type(type_1, type_2)
s = "**{}** \N{EN DASH} **{}**\n".format(type_name, "DEFENSE" if method is defense_method else "ATTACK")
if effective:
s += "Super effective: `{}`\n".format(", ".join(effective))
if ineffective:
s += "Not very effective: `{}`\n".format(", ".join(ineffective))
if useless:
s += "No effect: `{}`\n".format(", ".join(useless))
return s
| 5,347,961 |
def postfix(itemString):
"""transform infixExpre into postfixExpre
Algorithm:
step1: if operator, stack in;
step2: if "(", stack in.
step3: if variable, pop out the all continued unary operator until other operator or "("
step4: if ")", pop out all operators until "(", then pop all continued unary operator.
step5: goto step1.
Arg:
itemString: bitwise expression string persented in infix.
Return:
itemStr: expression string persented in postfix.
"""
itemStr = ""
boperatorList = ["&", "|", "^"]
uoperator = "~"
opeList = []
for (idx, char) in enumerate(itemString):
#open parenthesis, stack it
if char == "(":
opeList.append(char)
#binary operatork, stack it
elif char in boperatorList:
opeList.append(char)
#unary operator
elif uoperator in char:
opeList.append(char)
#closed parenthesis, pop out the operator to string
elif char == ")":
while(opeList and opeList[-1] != "("):
itemStr += opeList[-1]
opeList.pop()
if opeList and opeList[-1] != "(":
print("error!")
sys.exit(0)
#open parenthesis found
opeList.pop()
#unary operator found before open parenthesis
while(opeList and opeList[-1] == "~"):
itemStr += opeList[-1]
opeList.pop()
#variable name found
else:
itemStr += char
#top of stack is unary operator
while(opeList and opeList[-1] in uoperator):
itemStr += opeList[-1]
opeList.pop()
if len(opeList) > 1:
print("error in function postfix!")
sys.exit(0)
#have one operator without parenthesis
elif len(opeList):
itemStr += opeList[0]
return itemStr
| 5,347,962 |
def validate_authorization(sender, instance, **kwargs):
"""Authorizations are generated by app code instead of ModelForm, so full_clean() before saving."""
instance.full_clean()
| 5,347,963 |
def main():
"""Main entry point for the default bot command."""
launch.main(config_path=args.config_path)
| 5,347,964 |
def getSuffixes(algorithm, seqType) :
""" Get the suffixes for the right algorithm with the right
sequence type
"""
suffixes = {}
suffixes['LAST'] = {}
suffixes['BLAST'] = {}
suffixes['BLAST']['nucl'] = ['nhr', 'nsq', 'nin']
suffixes['BLAST']['prot'] = ['phr', 'psq', 'pin']
suffixes['LAST']['nucl'] = [ 'des', 'sds', 'suf', 'bck', 'prj', 'ssp', 'tis' ]
suffixes['LAST']['prot'] = [ 'des', 'sds', 'suf', 'bck', 'prj', 'ssp', 'tis' ]
if not algorithm in suffixes:
return None
if not seqType in suffixes[algorithm]:
return None
return suffixes[algorithm][seqType]
| 5,347,965 |
def make_numbered_in_temp(
keep: int = 10,
lock_timeout: float = -1,
tmpdir: Optional[Path] = None,
register=None,
) -> Path:
"""
Helper to create a numbered directory in the temp dir with automatic disposal
of old contents.
"""
import tempfile
from robocorp_code.path_operations import get_user
from robocorp_code.path_operations import make_numbered_dir_with_cleanup
from robocorp_code.path_operations import LOCK_TIMEOUT
user = get_user() or "unknown"
temproot = tmpdir if tmpdir else Path(tempfile.gettempdir())
rootdir = temproot / f"robocorp-code-{user}"
rootdir.mkdir(exist_ok=True)
return make_numbered_dir_with_cleanup(
prefix="rcc-",
root=rootdir,
keep=keep,
lock_timeout=lock_timeout if lock_timeout > 0 else LOCK_TIMEOUT,
register=register,
)
| 5,347,966 |
def handle_server_api(output, kwargs):
""" Special handler for API-call 'set_config' [servers] """
name = kwargs.get('keyword')
if not name:
name = kwargs.get('name')
if name:
server = config.get_config('servers', name)
if server:
server.set_dict(kwargs)
old_name = name
else:
config.ConfigServer(name, kwargs)
old_name = None
Downloader.do.update_server(old_name, name)
return name
| 5,347,967 |
def compare_names(namepartsA, namepartsB):
"""Takes two name-parts lists (as lists of words) and returns a score."""
complement = set(namepartsA) ^ set(namepartsB)
intersection = set(namepartsA) & set(namepartsB)
score = float(len(intersection))/(len(intersection)+len(complement))
return score
| 5,347,968 |
def Normal_VaR(return_matrix, theta,Horizon): #500 datas needed
"""
Compute the Value-at-Risk and Conditional Value-at-Risk
Parameters
----------
risk_returns : np.ndarray
theta : np.float64
Horizon : np.int16
Returns
----------
np.ndarray,np.ndarray VaR , CVaR
"""
mean_forecast,var_forecast,conditional_volatility = Arch_data(return_matrix , Horizon )
excess_innovations = Extract_Excess_Innovations(return_matrix , mean_forecast , conditional_volatility )
mu,scale = Dist_parameters(excess_innovations)
VaR,CVaR = Var_CVaR_extractor(mean_forecast,var_forecast,scale,mu,theta)
return VaR,CVaR
| 5,347,969 |
def daemon(queue, asg, interval, namespace, user="guest", password="guest"):
"""Submit EC2 custom metric for jobs waiting to be run."""
logging.info("queue: %s" % queue)
logging.info("interval: %d" % interval)
logging.info("namespace: %s" % namespace)
while True:
try:
job_count = float(get_waiting_job_count(queue, user, password))
logging.info("jobs_waiting for %s queue: %s" % (queue, job_count))
desired_capacity, max_size = map(float, get_desired_capacity_max(asg))
if desired_capacity == 0:
if job_count > 0:
desired_capacity = float(bootstrap_asg(asg, max_size if job_count > max_size else job_count))
logging.info(
"bootstrapped ASG %s to desired=%s" % (asg, desired_capacity)
)
else:
desired_capacity = 1.0
metric = job_count / desired_capacity
submit_metric(queue, asg, metric, namespace)
except Exception as e:
logging.error("Got error: %s" % e)
logging.error(traceback.format_exc())
time.sleep(interval)
| 5,347,970 |
def oscillator_amplitude(state, ders, period, floquet, zero_phase_lc, phase_warmup_periods=5, thr=0.0, dt=0.005):
"""calculates the isostable amplitude of the oscillator from dynamical equations
:param state: state of the system
:param ders: a list of state variable derivatives
:param period: oscillator period
:param floquet: floquet exponent
:param zero_phase_lc: zero phase limit cycle state
:param phase_warmup_periods: how many periods to wait for evaluating the asymptotic phase shift (default 5)
:param thr: threshold determining zero phase (default 0.0)
:param dt: time step (default 0.005)
:return: isostable amplitude of state"""
# get phase
phase = oscillator_phase(state, ders, period, phase_warmup_periods, thr=thr, dt=dt)
# calculate time to evolve to zero isochron
time = (1-phase/(2*pi))*period
# evolve to 0 isochron
state = integrate_period(state, ders, time, dt)
# amplitude sign
if(inside_limit_cycle(state, ders, period)):
sign = -1
else:
sign = 1
return 0.5*sign*distance(state,zero_phase_lc)*exp(floquet*time)
| 5,347,971 |
def commonprefix(a, b):
"""Find longest common prefix of `a` and `b`."""
pos = 0
length = min(len(a), len(b))
while pos < length and a[pos] == b[pos]:
pos += 1
return pos, b
| 5,347,972 |
def parse_datetime(strtime):
"""
Parse a string date, time & tz into a datetime object:
2003-03-20 05:00:00-07
"""
offset = int(strtime[-3:])
date_time = dt.strptime(strtime[:-4], '%Y-%m-%d %H:%M:%S')
offset = timedelta(hours=offset)
return (date_time + offset).replace(tzinfo=utc)
| 5,347,973 |
def traverse_depth_first(base: AnyDependency) -> List[AnyDependency]:
"""Performs a depth first traversal of the dependency tree.
"""
def _traverse_tree_2(base: AnyDependency) -> List[AnyDependency]:
queue: List[AnyDependency] = []
current_idx = 0
queue.append(base)
while len(queue) != current_idx:
node = queue[current_idx]
if not isinstance(node, UnresolvedDependency):
queue.extend(cast(Iterable, node.dependencies))
current_idx += 1
return queue
deps = _traverse_tree_2(base)
return deps
| 5,347,974 |
def _store_span(item, span):
"""Store span at `pytest.Item` instance."""
setattr(item, "_datadog_span", span)
| 5,347,975 |
def test_invalid_a_b_setter(a):
"""Test setting invalid a, b values."""
ellipse = Ellipse(1, 1)
with pytest.raises(ValueError):
ellipse.a = a
with pytest.raises(ValueError):
ellipse.b = a
| 5,347,976 |
def update_config(
client,
key,
*,
value=None,
remove=False,
global_only=False,
commit_message=None
):
"""Add, update, or remove configuration values."""
section, section_key = _split_section_and_key(key)
if remove:
value = client.remove_value(
section, section_key, global_only=global_only
)
if value is None:
raise errors.ParameterError('Key "{}" not found.'.format(key))
else:
client.set_value(section, section_key, value, global_only=global_only)
return value
| 5,347,977 |
def AcCheckTargetTools(context, programs, value_if_not_found=None,
path=None, pathext=None, reject=[]):
"""Corresponds to AC_CHECK_TARGET_TOOLS_ autoconf macro.
.. _AC_CHECK_TARGET_TOOLS: http://www.gnu.org/software/autoconf/manual/autoconf.html#index-AC_005fCHECK_005fTARGET_005fTOOLS-314
"""
# TODO: first I need to know how to determine AC_CANONICAL_TARGET
raise NotImplementedError("not implemented")
| 5,347,978 |
def parse_time(t):
""" parse a date time string, or a negative number as
the number of seconds ago.
returns unix timestamp in MS
"""
try:
tint = int(t)
if tint <= 0:
return int(nowms() + (tint * 1000))
except ValueError:
pass
#the parsed date may or may not have a tz; if it does not, localize it.
parsed = dtparse(t)
if not parsed.tzinfo:
parsed = parsed.replace(tzinfo=tzlocal())
#Get the millisec by subtracting epoch in the same tz, then x 1000
return int((parsed - datetime.fromtimestamp(0, parsed.tzinfo)).total_seconds() * 1000)
| 5,347,979 |
def _get_duration(tmin: np.datetime64, tmax: np.datetime64) -> str:
"""
Determine the duration of the given datetimes.
See also: `ISO 8601 Durations <https://en.wikipedia.org/wiki/ISO_8601#Durations>`_
:param tmin: Time minimum
:param tmax: Time maximum
:return: Temporal resolution formatted as an ISO 8601:2004 duration string
"""
delta = tmax - tmin
day = np.timedelta64(1, 'D')
days = (delta.astype('timedelta64[D]') / day) + 1
return 'P{}D'.format(int(days))
| 5,347,980 |
def inplace_freeze(model: nn.Module):
"""Freezes the modle by turning off its parameter's
require_grad attributes. In-place operation.
"""
for p in model.parameters():
p.requires_grad_(False)
| 5,347,981 |
def get_polyend_circle_angles(a, b, isLeft):
"""
theta0 = pi/2 + betta, theta1 = 2 * pi + betta;
betta = pi/2 - alpha;
alpha = atan(a)
"""
if a is None and b is None:
return None, None
alpha = math.pi / 2.0 if a is None else math.atan(a)
betta = math.pi / 2.0 - alpha
shift = 0.0 if isLeft else math.pi
theta0 = betta + shift
theta1 = theta0 + math.pi
return theta0, theta1
| 5,347,982 |
def main():
"""Main entrypoint."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("file", nargs="+", type=pathlib.Path)
args = parser.parse_args()
hooks.utils.check_executable("packer")
return hooks.utils.bulk_check(
packer_fix,
args.file,
)
| 5,347,983 |
def descompact_zip(file_path: str, dest_path: str) -> None:
"""Descompact the GlassFish .zip file."""
zip_file = zipfile.ZipFile(f'{file_path}')
try:
zip_file.extractall(dest_path)
except Exception as err:
print('Error unzipping Glassfish: ', str(err))
| 5,347,984 |
def add_entry(entries):
"""Add a new task"""
new_task = input('\nTo do: ')
protect = 'No'
tasklimit = str(ToDo.timestamp)
taskinfo = (new_task, 'undone', protect, random.randint(0,10000000), tasklimit)
cur.execute("INSERT INTO mytodos VALUES(?,?,?,?,?)" , taskinfo)
entries = cur.execute('SELECT * FROM mytodos')
view_entries(entries)
| 5,347,985 |
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
if isinstance(args, list):
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug("Starting confluence...")
args.func(args)
_logger.info("Confluence complete.")
| 5,347,986 |
def get_search_cache_key(prefix, *args):
""" Generate suitable key to cache twitter tag context
"""
key = '%s_%s' % (prefix, '_'.join([str(arg) for arg in args if arg]))
not_allowed = re.compile('[^%s]' % ''.join([chr(i) for i in range(33, 128)]))
key = not_allowed.sub('', key)
return key
| 5,347,987 |
def main(argv=None):
"""Run pragma-no-mutate filter with specified command line arguments.
"""
return PragmaNoMutateFilter().main(argv)
| 5,347,988 |
def patch_requests():
"""Stub out services that makes requests."""
patch_client = patch("homeassistant.components.meteo_france.MeteoFranceClient")
with patch_client:
yield
| 5,347,989 |
def independence_single_value(values, sigma=0.70):
"""
This calculates the independence of the models for a given metric
where the metric is single valued, e.g. the slope of a gradient.
------Input------
values (list) : The single values for each model.
sigma (float) : The value of sigma_s
-----Returns-----
S (np.array 2D) : The inter model similarity
W (np.array 1D) : The weight per model from the similarity calculation
"""
sigma_s = sigma
# Can first calculate inter model distances S and D
S = np.zeros((len(values), len(values)))
# Weightings W dims=num_models
W = np.zeros((len(values), 1))
for i, model_i in enumerate(values):
i_data = model_i
for j, model_j in enumerate(values):
if i != j:
j_data = model_j
s = math.exp(-((i_data - j_data) ** 2).sum() / (1 * sigma_s ** 2))
S[i, j] = s
for ii in range(len(values)):
w = 1 / (1 + np.nansum(S[ii], 0))
W[ii] = w
W /= np.nansum(W)
return S, W
| 5,347,990 |
def extract_fingerprints(atoms, i_jbond_dict, radius):
"""Extract the r-radius subgraphs (i.e., fingerprints)
from a molecular graph using Weisfeiler-Lehman algorithm."""
if (len(atoms) == 1) or (radius == 0):
fingerprints = [fingerprint_dict[a] for a in atoms]
else:
nodes = atoms
i_jedge_dict = i_jbond_dict
for _ in range(radius):
"""Update each node ID considering its neighboring nodes and edges
(i.e., r-radius subgraphs or fingerprints)."""
fingerprints = []
for i, j_edge in i_jedge_dict.items():
neighbors = [(nodes[j], edge) for j, edge in j_edge]
fingerprint = (nodes[i], tuple(sorted(neighbors)))
fingerprints.append(fingerprint_dict[fingerprint])
nodes = fingerprints
"""Also update each edge ID considering two nodes
on its both sides."""
_i_jedge_dict = defaultdict(lambda: [])
for i, j_edge in i_jedge_dict.items():
for j, edge in j_edge:
both_side = tuple(sorted((nodes[i], nodes[j])))
edge = edge_dict[(both_side, edge)]
_i_jedge_dict[i].append((j, edge))
i_jedge_dict = _i_jedge_dict
return np.array(fingerprints)
| 5,347,991 |
def GetUniqueClassMembers(Class, Ignore = [], AllowedOverrides = []):
"""
Args:
- Class {object}: reference to the class
- Ignore {List[str]}:
- AlwaysAllow {List[str]}: Always allowed members named x, even if they exists in the parent class
Returns: tuple("Name", Reference)
"""
Members = inspect.getmembers(Class)
ParentClass = GetClassParents(Class)[0]
UniqueMemebers = [x for x in Members if (not hasattr(ParentClass, x[0]) and x[0] not in Ignore) or x[0] in AllowedOverrides] # and not x[0].startswith("__")
return UniqueMemebers
| 5,347,992 |
def set_categories(ax, labels, categories):
"""Applies gradient coloring for rewards
Params:
* ax: matplotlib.axes._subplots.AxesSubplot
Axis object
* labels: array-like
One or Two sized array with x and y labels.
* categories: dict<tuple(str, str), list<float>>
Vertical and Horizontal categories
"""
if any(categories):
xlabel, ylabel = labels[0], labels[-1]
ax.vlines(categories[xlabel]['0'], 0, 1,
transform=ax.get_xaxis_transform(),
colors='tab:purple', label=f'category {snakefy(xlabel)}')
ax.hlines(categories[ylabel]['1'], 0, 1,
transform=ax.get_yaxis_transform(),
colors='tab:cyan', label=f'category {snakefy(ylabel)}')
| 5,347,993 |
def get_condition_keys_available_to_raw_arn(db_session, raw_arn):
"""
Get a list of condition keys available to a RAW ARN
:param db_session: SQLAlchemy database session object
:param raw_arn: The value in the database, like arn:${Partition}:s3:::${BucketName}/${ObjectName}
"""
rows = db_session.query(ArnTable).filter(ArnTable.raw_arn.like(raw_arn))
result = rows.first()
if result.condition_keys:
condition_keys = result.condition_keys.split(",")
return condition_keys
else:
return False
| 5,347,994 |
def get_reviewer(form):
""" Gets reviewer info, or adds if necessary
"""
reviewer = Reviewer.query.filter_by(email=form.get("reviewer-email")).first()
if reviewer:
reviewer_id = reviewer.reviewer_id
else:
reviewer_id = add_reviewer(form)
return reviewer_id
| 5,347,995 |
def read_xyz(using):
"""Reads coordinates of an xyz file and return a list of |Atom| objects, one for each atom"""
coords = []
with open(using, "r") as f:
for coord in f.readlines()[2:]:
line = coord.split()
for val in PT.ptable.values():
if line[0] == val[0]:
coords.append(
Atom(line[0],
coords=tuple(float(i) for i in line[1:4])))
return coords
| 5,347,996 |
def train_agent_real_env(
problem_name, agent_model_dir, event_dir, world_model_dir, epoch_data_dir,
hparams, epoch=0, is_final_epoch=False):
"""Train the PPO agent in the real environment."""
gym_problem = registry.problem(problem_name)
ppo_hparams = trainer_lib.create_hparams(hparams.ppo_params)
ppo_params_names = ["epochs_num", "epoch_length",
"learning_rate", "num_agents",
"optimization_epochs"]
for param_name in ppo_params_names:
ppo_param_name = "real_ppo_"+ param_name
if ppo_param_name in hparams:
ppo_hparams.set_hparam(param_name, hparams.get(ppo_param_name))
ppo_hparams.epochs_num = _ppo_training_epochs(hparams, epoch,
is_final_epoch, True)
ppo_hparams.save_models_every_epochs = 10
environment_spec = copy.copy(gym_problem.environment_spec)
ppo_hparams.add_hparam("environment_spec", environment_spec)
with temporary_flags({
"problem": problem_name,
"output_dir": world_model_dir,
"data_dir": epoch_data_dir,
}):
# epoch = 10**20 is a hackish way to avoid skiping training
rl_trainer_lib.train(ppo_hparams, event_dir, agent_model_dir,
name_scope="ppo_real")
| 5,347,997 |
def initialize_classification(model_name: str,
num_classes: int,
use_pretrained: bool =True
) -> (Module, int):
""" Initialize these variables which will be set in this if statement. Each of these
variables is model specific. The final fully-connected layer will fit the new number
of classes. The weights are initialized with the Xavier algorithm. All biases are
initialized to 0.
Args:
model_name (str): Classification network name in ['vgg', 'alexnet', 'resnet', 'googlenet'].
num_classes (int): The number of classes in dataset.
use_pretrain (bool): If true, load pretrained model on ImageNet.
Return:
model (Module): Modified classification network fitting given class number.
input_size (int): input image size for the classification network.
"""
model = None
input_size = None
# VGG-16
if "vgg" in model_name.lower():
model = models.vgg16(pretrained=use_pretrained)
set_parameter_requires_grad(model, True)
num_ftrs = model.classifier[6].in_features
model.classifier[6] = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.classifier[6].weight)
nn.init.zeros_(model.classifier[6].bias)
input_size = 224
# Alexnet
elif "alexnet" in model_name.lower():
model = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model, True)
num_ftrs = model.classifier[6].in_features
model.classifier[6] = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.classifier[6].weight)
nn.init.zeros_(model.classifier[6].bias)
input_size = 224
# Resnet-50
elif "resnet" in model_name.lower():
if '18' in model_name.lower():
model = models.resnet18(pretrained=use_pretrained)
else:
model = models.resnet50(pretrained=use_pretrained)
set_parameter_requires_grad(model, True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.fc.weight)
nn.init.zeros_(model.fc.bias)
input_size = 224
# GoogLeNet
elif "googlenet" in model_name.lower():
model = models.googlenet(pretrained=use_pretrained, aux_logits=True)
set_parameter_requires_grad(model, True)
# Handle the auxilary network
num_ftrs = model.aux1.fc2.in_features
model.aux1.fc2 = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.aux1.fc2.weight)
nn.init.zeros_(model.aux1.fc2.bias)
num_ftrs = model.aux2.fc2.in_features
model.aux2.fc2 = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.aux2.fc2.weight)
nn.init.zeros_(model.aux2.fc2.bias)
# Handle the primary network
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, num_classes)
nn.init.xavier_uniform_(model.fc.weight)
nn.init.zeros_(model.fc.bias)
input_size = 224
else:
raise ValueError("Invalid classification network name.")
return model, input_size
| 5,347,998 |
def get_csc():
"""get Configuration Client"""
config_host = enstore_functions2.default_host()
config_port = enstore_functions2.default_port()
return configuration_client.ConfigurationClient((config_host,config_port))
| 5,347,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.