content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def lemmatize(
nlp: Optional[Language] = None, name="lemmatize"
) -> ops.base.SpacyBasedOperation:
"""Helper function to return SpacyBasedOperation for lemmatizing.
This operation returns a stream.DataStream where each item is a string after
being lemmatized.
Parameters
----------
nlp : Optional[spacy.language.Language]
spacy's language model or None. If None then by default
`en_core_web_sm` spacy model is loaded
name : Optional[str]
name of this operation
Returns
-------
out : SpacyBasedOperation
"""
return ops.base.SpacyBasedOperation(nlp=nlp, process_doc_fn=_lemmatize, name=name,)
| 5,350,300 |
def mean_absolute_deviation(curve1: np.ndarray, curve2: np.ndarray, *args):
"""Calculate the mean deviation."""
diff = np.abs(curve1 - curve2)
return np.mean(diff)
| 5,350,301 |
def get_segtype_hops(seg_type, connector=None): # pragma: no cover
"""
Request list of segments by type used to construct paths.
:param seg_type: The type of PathSegmentType requested.
:returns: List of SCIONDSegTypeHopReplyEntry objects.
"""
global _connector
if not connector:
connector = _connector
if not connector:
raise SCIONDLibNotInitializedError
return connector.get_segtype_hops(seg_type)
| 5,350,302 |
def main():
"""Starts the parser on the file given by the filename as the first
argument on the commandline.
"""
filename = optParse()
fsm = Doxypy()
fsm.parseFile(filename)
| 5,350,303 |
def build_moderation_channel_embed(ctx, channel, action):
"""
Builds a moderation embed which display some information about the mod channel being created/removed
:param ctx: The discord context
:param channel: The channel to be created/removed
:param action: either "Added" or "Removed" to tell the user what happened to the mod channel
:return embed: The moderation embed to be sent to the user
"""
embed = create_default_embed(ctx)
embed.title = "Koala Moderation - Mod Channel " + action
embed.add_field(name="Channel Name", value=channel.mention)
embed.add_field(name="Channel ID", value=channel.id)
return embed
| 5,350,304 |
def print_settings(settings):
"""
This function returns the harmonic approximation settings .
Returns
-------
text: str
Pretty-printed settings for the current Quantas run.
"""
text = '\nCalculator: Equation of state (EoS) fitting\n'
text += '\nMeasurement units\n'
text += '-------------------------------------\n'
text += ' - {:12} {}\n'.format('pressure:', settings['pressure_unit'])
text += ' - {:12} {}\n'.format('lenght:', settings['lenght_unit'])
return text
| 5,350,305 |
def run_analysis(output, stimtype="gabors", comp="surp", ctrl=False,
CI=0.95, alg="sklearn", parallel=False, all_scores_df=None):
"""
run_analysis(output)
Calculates statistics on scores from runs for each specific analysis
criteria and saves them in the summary scores dataframe.
Overwrites any existing dataframe of analysed data.
Required args:
- output (str): general directory in which summary dataframe is saved.
Optional args:
- stimtype (str) : stimulus type
default: "gabors"
- comp (str) : type of comparison
default: "surp"
- ctrl (bool) : if True, control comparisons are analysed
default: False
- CI (num) : CI for shuffled data
default: 0.95
- alg (str) : algorithm used to run logistic regression
("sklearn" or "pytorch")
default: "sklearn"
- parallel (bool) : if True, run information is collected in
parallel
default: False
- all_scores_df (pd df): already collated scores dataframe
default: None
Returns:
- scores_summ (pd DataFrame): dataframe with analysed scores
"""
if all_scores_df is None:
all_scores_df = run_collate(output, stimtype, comp, ctrl, alg, parallel)
stats = "mean" # across runs for shuffle CIs
if all_scores_df is None:
return
scores_summ = pd.DataFrame()
ext_test = sess_str_util.ext_test_str(
("q1v4" in output), ("rvs" in output), comp)
if ext_test == "":
ext_test = None
# common labels
comm_labs = gen_util.remove_if(info_dict(),
["uniqueid", "run_n", "epoch_n"])
# get all unique comb of labels
for acr_shuff in [False, True]:
if not acr_shuff:
df_unique = all_scores_df[comm_labs].drop_duplicates()
else:
df_unique = all_scores_df[gen_util.remove_if(comm_labs,
["mouse_n", "n_rois"])].drop_duplicates()
for _, df_row in df_unique.iterrows():
if acr_shuff and not df_row["shuffle"]:
# second pass, only shuffle
continue
vals = [df_row[x] for x in comm_labs]
curr_lines = gen_util.get_df_vals(all_scores_df, comm_labs, vals)
# assign values to current line in summary df
curr_idx = len(scores_summ)
gen_util.set_df_vals(scores_summ, curr_idx, comm_labs, vals)
# calculate stats
scores_summ = calc_stats(scores_summ, curr_lines, curr_idx, CI,
ext_test, stats=stats, shuffle=acr_shuff)
savename = get_df_name("analyse", stimtype, comp, ctrl, alg)
file_util.saveinfo(scores_summ, savename, output, overwrite=True)
return scores_summ
| 5,350,306 |
def decrypt_ballots_with_all_guardians(
ballots: List[Dict], guardians: List[Dict], context: Dict
) -> Dict:
"""
Decrypt all ballots using the guardians.
Runs the decryption in batches, rather than all at once.
"""
ballots_per_batch = 2
decrypted_ballots: Dict = {}
for batch in batch_list(ballots, ballots_per_batch):
ballot_shares: Dict[str, List[Dict]] = {}
# Each guardian should decrypt their own shares independently...
for guardian in guardians:
response = guardian_api.decrypt_ballot_shares(batch, guardian, context)
shares: List[Dict] = response["shares"]
ballot_shares[guardian["id"]] = shares
# These shares are then gathered by the mediator and used to fully decrypt the ballots!
decrypted_batch = mediator_api.decrypt_ballots(batch, ballot_shares, context)
# The decrypted ballots are keyed by ballot ID. Merge them into the full dictionary.
decrypted_ballots = {**decrypted_ballots, **decrypted_batch}
return decrypted_ballots
| 5,350,307 |
def getRealItemScenePos(item):
"""
Returns item's real position in scene space. Mostly for e.g. stranditems.
This will change as the root item is moved round the scene,
but should not change when zooming.
"""
view = pathview()
try:
vhitem = item.virtualHelixItem()
linepos = linecenter(item.line()) # StrandItem lines are drawn in the virtual-helix space.
except AttributeError:
# E.g. EndPointItems, caps, etc, has no VhItem, position should be in scene coordinates:
return item.scenePos()
# Should I map to scene space or maybe use pathrootitem, i.e. vhitem.mapToItem(pathroot(), *linepos) ?
# mapping to pathroot produces constant result independent of zoom and transform.
# mapping to scene produces variable results.
return vhitem.mapToScene(*linepos)
| 5,350,308 |
def get_default_upload_mode():
"""
Returns the string for the default upload mode
:return: Default upload mode string
"""
return api.MODE_DEFAULT
| 5,350,309 |
def remove_multi_whitespace(string_or_list):
""" Cleans redundant whitespace from extracted data """
if type(string_or_list) == str:
return ' '.join(string_or_list.split())
return [' '.join(string.split()) for string in string_or_list]
| 5,350,310 |
def validate_board(board: list) -> bool:
"""
Checks if board fits the rules. If fits returns True, else False.
>>> validate_board(["**** ****","***1 ****","** 3****","* 4 1****",\
" 9 5 "," 6 83 *","3 1 **"," 8 2***"," 2 ****"])
False
"""
if check_rows(board) and\
check_columns(board) and\
check_color(board):
return True
return False
| 5,350,311 |
def test_copying_in_on_modified_block(
action_block_factory,
create_temp_files,
module_factory,
):
"""Module should copy properly."""
file1, file2, file3, file4 = create_temp_files(4)
file2.write_text('original')
file4.write_text('some other content')
action_block = action_block_factory(
copy=[
{'content': str(file1), 'target': str(file2)},
{'content': str(file3), 'target': str(file4)},
],
)
module = module_factory(on_modified=action_block, path=Path('/a/b/c'))
module.execute(action='all', block='on_modified', path=Path('/a/b/c'))
# Check if content has been copied
assert file2.read_text() == file1.read_text()
assert file4.read_text() == file3.read_text()
| 5,350,312 |
def get_H(m, n):
"""Calculate the distance of each point of the m, n matrix from the center"""
u = np.array([i if i <= m / 2 else m - i for i in range(m)],
dtype=np.float32)
v = np.array([i if i <= m / 2 else m - i for i in range(m)],
dtype=np.float32)
v.shape = n, 1
return (u - m/2)**2 + (v - n/2)**2
| 5,350,313 |
def create_users(xml_filename, test_mode=False, verbose=False):
"""
Import OET cruise record XML file and create django auth users from the list of participants
:param filename: the name of the XML file
:return: the number of users created
"""
num_created = 0
cruise_record = xml2struct(xml_filename)
participant_list = cruise_record['oet:oetcruise']['r2r:cruise']['r2r:cruiseParticipants']['r2r:cruiseParticipant']
names = [participant['r2r:cruiseParticipantName']['text'] for participant in participant_list]
for name in names:
split_name = name.split()
first_name = split_name[0]
last_name = "".join(split_name[1:])
new_user = create_user(first_name, last_name, save=not test_mode, verbose=verbose)
if new_user:
print 'Created user', new_user.username, '(%s)' % name
num_created += 1
return num_created
| 5,350,314 |
def parse_variants(ref: str, call: List[str], pos: List[str],
results: Dict[str, int]):
""" Parse the variants and add to results """
call_variant = set(call)
pos_variant = set(pos)
# The types of concordant calls are counted separately
if call_variant == pos_variant:
# These variants are homozygous reference
if len(call_variant) == 1 and next(iter(call_variant)) == ref:
results['alleles_hom_ref_concordant'] += 2
# These variants are heterozygous, since they have different calls
elif len(call_variant) > 1:
results['alleles_het_concordant'] += 2
# If they are not homozygous reference, and not heterozygous, they must
# be homozygous alternative, for whichever alt allele they have
else:
results['alleles_hom_alt_concordant'] += 2
# Here we count all alleles independently, also for the concordant calls
for allele in call:
if allele == '.':
results['alleles_no_call'] += 1
elif allele in pos:
results['alleles_concordant'] += 1
# We cannot match an A/A call with A/G, so we need to remove the
# A call from the positive set once we have 'used' it
pos.remove(allele)
else:
results['alleles_discordant'] += 1
| 5,350,315 |
def learn(infile, outfile, apply_model, minimum_abundance_ratio, maximum_sec_shift, cb_decoys, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, xgb_autotune, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, plot_reports, threads, test):
"""
Learn true/false interaction features in SEC data.
"""
# Define outfile
if outfile is None:
outfile = infile
else:
copyfile(infile, outfile)
outfile = outfile
# Run PyProphet training
click.echo("Info: Running PyProphet.")
# Drop feature scores if they already exist
con = sqlite3.connect(outfile)
c = con.cursor()
c.execute('DROP TABLE IF EXISTS FEATURE_SCORED;')
con.close()
pyprophet(outfile, apply_model, minimum_abundance_ratio, maximum_sec_shift, cb_decoys, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, xgb_autotune, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, plot_reports, threads, test)
# Combine all replicates
click.echo("Info: Combine evidence across replicate runs.")
combined_data = combine(outfile, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, pfdr)
con = sqlite3.connect(outfile)
combined_data.df.to_sql('FEATURE_SCORED_COMBINED', con, index=False, if_exists='replace')
con.close()
| 5,350,316 |
def writeJSONFile(filename,JSONDocument):
""" Writes a JSON document to a named file
Parameters
----------
filename : str
name of the file
JSONDocument : str
JSON document to write to the file
Returns
-------
True
"""
filename='data/'+filename
with open(filename, 'w') as outfile:
json.dump(JSONDocument, outfile)
return True
| 5,350,317 |
def read_readme():
"""Read README content.
If the README.rst file does not exist yet
(this is the case when not releasing)
only the short description is returned.
"""
try:
return local_file('README.rst')
except IOError:
return __doc__
| 5,350,318 |
def get_data_loader(dataset, dataset_dir, batch_size, workers=8, is_training=False):
""" Create data loader. """
return data.DataLoader(
get_dataset(dataset, is_training=is_training, dataset_dir=dataset_dir),
batch_size=batch_size,
shuffle=is_training,
num_workers=workers,
pin_memory=True,
)
| 5,350,319 |
def listwhom(detailed=False):
"""Return the list of currently avalailable databases for covid19
data in PyCoA.
The first one is the default one.
If detailed=True, gives information location of each given database.
"""
try:
if int(detailed):
df = pd.DataFrame(get_db_list_dict())
df = df.T.reset_index()
df.index = df.index+1
df = df.rename(columns={'index':'Database',0: "WW/iso3",1:'Granularité',2:'WW/Name'})
return df
else:
return _db.get_available_database()
except:
raise CoaKeyError('Waiting for a boolean !')
| 5,350,320 |
def paginate(
gcp_resource: Callable,
method_name: str,
items_name: str,
subitems_name: str = None,
exclude_region_resources: bool = False,
**kwargs,
) -> Iterable:
"""Paginate GCP API list and aggregatedList results.
Args:
gcp_resource: GCP resource on which we do our paging
method_name: list method to call. Usually `list` or `aggregatedList`
items_name: Name of the key in our result that contains the list of items.
Usually `items`
subitems_name: When using aggregatedList this contains the actual items.
Usually the same as the gcp_resource name. E.g. `disks` when requesting
disks, `instances` when fetching instances, etc.
exclude_region_resources: Regional resources have their own API and can be
excluded from aggregatedList calls if so desired
"""
next_method_name = method_name + "_next"
method = getattr(gcp_resource, method_name)
request = method(**kwargs)
while request is not None:
for attempt in Retrying(
reraise=True,
stop=stop_after_attempt(10),
retry=retry_if_exception_type(socket.timeout),
):
with attempt:
result = request.execute()
if items_name in result:
items = result[items_name]
if isinstance(items, dict):
for location, item in items.items():
if (
method_name == "aggregatedList"
and exclude_region_resources
and str(location).startswith("regions/")
):
continue
if subitems_name in item:
yield from item[subitems_name]
else:
yield from items
if hasattr(gcp_resource, next_method_name):
method = getattr(gcp_resource, next_method_name)
request = method(request, result)
else:
request = None
| 5,350,321 |
def project(s):
"""Maps (x,y,z) coordinates to planar-simplex."""
# Is s an appropriate sequence or just a single point?
try:
return unzip(map(project_point, s))
except TypeError:
return project_point(s)
except IndexError: # for numpy arrays
return project_point(s)
| 5,350,322 |
def point_at_angle_on_ellipse(
phi: ArrayLike, coefficients: ArrayLike
) -> NDArray:
"""
Return the coordinates of the point at angle :math:`\\phi` in degrees on
the ellipse with given canonical form coefficients.
Parameters
----------
phi
Point at angle :math:`\\phi` in degrees to retrieve the coordinates
of.
coefficients
General form ellipse coefficients as follows: the center coordinates
:math:`x_c` and :math:`y_c`, semi-major axis length :math:`a_a`,
semi-minor axis length :math:`a_b` and rotation angle :math:`\\theta`
in degrees of its semi-major axis :math:`a_a`.
Returns
-------
:class:`numpy.ndarray`
Coordinates of the point at angle :math:`\\phi`
Examples
--------
>>> coefficients = np.array([0.5, 0.5, 2, 1, 45])
>>> point_at_angle_on_ellipse(45, coefficients) # doctest: +ELLIPSIS
array([ 1., 2.])
"""
phi = np.radians(phi)
x_c, y_c, a_a, a_b, theta = tsplit(coefficients)
theta = np.radians(theta)
cos_phi = np.cos(phi)
sin_phi = np.sin(phi)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
x = x_c + a_a * cos_theta * cos_phi - a_b * sin_theta * sin_phi
y = y_c + a_a * sin_theta * cos_phi + a_b * cos_theta * sin_phi
return tstack([x, y])
| 5,350,323 |
def append_file(source, dest, seek=None):
"""
This function appends the contents of the source file into the dest file. If a seek value is specified, 0s are inserted from the end of the dest file until the seek value is reached.
"""
with open(source, 'rb') as in_file:
data = in_file.read()
with open(dest, 'ab') as out_file:
if seek:
seek_length = seek - out_file.tell()
if seek_length < 0:
print('[-] Error: The seek offset must be greater than the length of the destination file.')
sys.exit(1)
out_file.write(bytearray((chr(0) * seek_length).encode('utf-8')))
print("[*] " + source + ' start byte is: 0x%0.8x' % out_file.tell())
out_file.write(data)
print("[*] " + source + ' end byte is: 0x%0.8x' % out_file.tell())
| 5,350,324 |
def handle_row(row, data, a_tags):
"""
iterates through each post in the row
:param row: object of the specific row
:param data: dict format of the final data
:param a_tags: array of strings containing href's
:return: None
"""
single_post = defaultdict()
try:
for post in row:
# Checks if the post card is empty or not
if "class" in post.attrib:
single_post["post_link"] = post[0].attrib["href"]
# Extract post info
if single_post["post_link"] not in a_tags:
single_post["alt"] = post[0][0][0][0].attrib["alt"] if "alt" in post[0][0][0][0].attrib else ""
single_post["img_src"] = post[0][0][0][0].attrib["src"] if "src" in post[0][0][0][0].attrib else ""
data["posts"].append(single_post)
a_tags.append(single_post["post_link"])
single_post = defaultdict()
else:
break
except Exception as e:
print("Error while handling individual post")
| 5,350,325 |
def fatal_errors():
"""Context manager meant to wrap an entire program and present schema errors in
an easy-to-read way.
"""
try:
yield
except FatalSchemaError as e:
u.err(f"{e.context}\n{e.message}\n\n")
sys.exit(1)
except s.SchemaError as e:
u.err(f"\n{e.code}\n\n")
sys.exit(1)
| 5,350,326 |
def bmm(tensor1, tensor2):
"""
Performs a batch matrix-matrix product of this tensor
and tensor2. Both tensors must be 3D containing equal number
of matrices.
If this is a (b x n x m) Tensor, batch2 is a (b x m x p) Tensor,
Result will be a (b x n x p) Tensor.
Parameters
----------
tensor1 : TensorBase
The first operand in the bmm operation
tensor2 : TensorBase
The second operand in the bmm operation
Returns
-------
TensorBase:
Output Tensor; with bmm operation
"""
_ensure_tensorbase(tensor1)
_ensure_tensorbase(tensor2)
if tensor2.data.ndim != 3:
print("dimension of tensor2 is not 3")
elif tensor1.data.ndim != 3:
print("dimension of tensor1 is not 3")
elif tensor1.encrypted or tensor2.encrypted:
return NotImplemented
else:
out = np.matmul(tensor1.data, tensor2.data)
return TensorBase(out)
| 5,350,327 |
def test_check_custom_attr():
"""Test if Doc and Token custom attributes exist"""
doc = nlp("Joey doesnt share food")
token = doc[0]
# add new custom attributes
doc.set_attribute(name="doc_tag", value="doc_value")
token.set_attribute(name="token_tag", value="token_value")
# check if the custom attributes exist
assert doc.has_attribute("doc_tag")
assert token.has_attribute("token_tag")
| 5,350,328 |
def parse_variable(config, v):
"""Parse a configuration variable from a string that should look like 'key = value'
and write that value to config[key].
:param config: The configuration dict to wich to write the key,value pair
:param v: A string of the form 'key = value'
"""
if '=' not in v:
raise ValueError('Improper variable specificationi: %s. Use syntax: key = value.'%v)
key, value = v.split('=',1)
key = key.strip()
# Cut off any trailing comment
if '#' in value:
value = value.split('#')[0]
value = value.strip()
if value[0] in ['{','[','(']:
if value[-1] not in ['}',']',')']:
raise ValueError('List symbol %s not properly matched'%value[0])
values = value[1:-1].split(',')
values = [ vv.strip() for vv in values ]
else:
values = value.split() # on whitespace
if len(values) == 1:
config[key] = values[0]
else:
config[key] = values
| 5,350,329 |
def get_monotask_from_macrotask(monotask_type, macrotask):
""" Returns a Monotask of the specified type from the provided Macrotask. """
return next((monotask for monotask in macrotask.monotasks if isinstance(monotask, monotask_type)))
| 5,350,330 |
def vivo_query(query, parms):
"""
A new VIVO query function using SPARQLWrapper. Tested with Stardog, UF VIVO and Dbpedia
:param query: SPARQL query. VIVO PREFIX will be added
:param parms: dictionary with query parms: queryuri, username and password
:return: result object, typically JSON
:rtype: dict
"""
from SPARQLWrapper import SPARQLWrapper, JSON
logger.debug(u"in vivo_query\n{}".format(parms))
sparql = SPARQLWrapper(parms['queryuri'])
new_query = parms['prefix'] + '\n' + query
sparql.setQuery(new_query)
logger.debug(new_query)
sparql.setReturnFormat(JSON)
sparql.addParameter("email", parms['username'])
sparql.addParameter("password", parms['password'])
# sparql.setCredentials(parms['username'], parms['password'])
results = sparql.query()
results = results.convert()
return results
| 5,350,331 |
def get_services_by_type(service_type, db_session):
# type: (Str, Session) -> Iterable[models.Service]
"""
Obtains all services that correspond to requested service-type.
"""
ax.verify_param(service_type, not_none=True, not_empty=True, http_error=HTTPBadRequest,
msg_on_fail="Invalid 'service_type' value '" + str(service_type) + "' specified")
services = db_session.query(models.Service).filter(models.Service.type == service_type)
return sorted(services, key=lambda svc: svc.resource_name)
| 5,350,332 |
def copy_arch(arch, library_dir, libgfortran, libquadmath):
"""Copy libraries specific to a given architecture.
Args:
arch (str): The architecture being copied.
library_dir (str): The directory containing the dynamic libraries.
libgfortran (str): The name (not path) of the ``libgfortran``
dynamic library.
libquadmath (str): The name (not path) of the ``libquadmath``
dynamic library.
Returns:
Tuple[str, str, str, str]: Four-tuple of
* The path to the ``arch``-specific location of the newly
created ``libgfortran``
* The path to the location of the universal ``libgfortran``
(not yet created, but reference here as the ``install_name``)
* The path to the ``arch``-specific location of the newly
created ``libquadmath``
* The path to the location of the universal ``libquadmath``
(not yet created, but reference here as the ``install_name``)
"""
sub_dir = os.path.join(FRANKENSTEIN, arch)
os.mkdir(sub_dir)
# Determine the old/new filenames.
old_libgfortran = os.path.join(library_dir, libgfortran)
arch_libgfortran = os.path.join(sub_dir, libgfortran)
universal_libgfortran = os.path.join(FRANKENSTEIN, libgfortran)
old_libquadmath = os.path.join(library_dir, libquadmath)
arch_libquadmath = os.path.join(sub_dir, libquadmath)
universal_libquadmath = os.path.join(FRANKENSTEIN, libquadmath)
# Update ``libgfortran``
copyfile(old_libgfortran, arch_libgfortran)
os.chmod(arch_libgfortran, 0o644)
subprocess.check_call(
("install_name_tool", "-id", universal_libgfortran, arch_libgfortran)
)
subprocess.check_call(
(
"install_name_tool",
"-change",
old_libquadmath,
universal_libquadmath,
arch_libgfortran,
)
)
os.chmod(arch_libgfortran, 0o444)
print("{}:".format(arch_libgfortran))
print("\t``install_name``:")
print("\t\t{}".format(universal_libgfortran))
print("\tDependencies:")
dependencies = get_dependencies(arch_libgfortran, check_exists=False)
for dependency in dependencies:
print("\t\t{}".format(dependency))
# Update ``libquadmath``
copyfile(old_libquadmath, arch_libquadmath)
os.chmod(arch_libquadmath, 0o644)
subprocess.check_call(
("install_name_tool", "-id", universal_libquadmath, arch_libquadmath)
)
os.chmod(arch_libquadmath, 0o444)
print("{}:".format(arch_libquadmath))
print("\t``install_name``:")
print("\t\t{}".format(universal_libquadmath))
print("\tDependencies:")
dependencies = get_dependencies(arch_libquadmath, check_exists=False)
for dependency in dependencies:
print("\t\t{}".format(dependency))
return (
arch_libgfortran,
universal_libgfortran,
arch_libquadmath,
universal_libquadmath,
)
| 5,350,333 |
def getREADMEforDescription(readmePath=os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.md')):
"""Use the Markdown from the file for the package's long_description.
long_description_content_type should be 'text/markdown' in this case.
This is why we need the README to be in the MANIFEST.in file.
"""
try:
with open(readmePath) as readme:
return '\n' + readme.read()
except FileNotFoundError:
return 'Package for fuzzing.'
| 5,350,334 |
def init_db():
"""Initializes the database."""
try:
with _get_conn().cursor() as cursor:
# execute schema sql file
with app.open_resource('db/schema/0001/user.sql', mode='r') as f:
sql = f.read()
print sql
result = cursor.execute(sql)
print result
finally:
print _get_conn().close()
| 5,350,335 |
def abstractable(cls):
"""
A class decorator that scoops up AbstractValueRange class properties in order
to create .validate and .abstract methods for the class. Note that properties
added after the class is defined aren't counted. Each AbstractValueRange
found is is also replaced with a class instance constructed from it.
"""
cls._ranges = []
for prp in dir(cls):
a = getattr(cls, prp)
if isinstance(a, AbstractValueRange):
cls._ranges.append((prp, a))
setattr(cls, prp, cls(a.val))
cls._ranges = sorted(cls._ranges, key=lambda nr: nr[1].mn)
@classmethod
def validate(cls, val):
ovn = min(r.mn for (n, r) in cls._ranges)
ovx = max(r.mx for (n, r) in cls._ranges)
return (isinstance(val, float) and val >= ovn and val <= ovx)
@classmethod
def abstract(cls, val):
found = None
for (n, r) in cls._ranges[:-1]:
if (
( r.mn == r.mx and val == r.mn )
or (val >= r.mn and val < r.mx)
):
found = r.val
elif val < r.mn:
break
# check final range including top
if found == None:
(n, r) = cls._ranges[-1]
if (r.mn == r.mx and val == r.mn) or (val >= r.mn and val <= r.mx):
found = r.val
if found == None:
raise ValueError(
"Can't abstract value '{}' as a {}: outside acceptable range.".format(
val,
cls.__name__
)
)
return cls(found)
def _pack_(self):
for (n, r) in type(self)._ranges:
if self == r.val:
return n
return self
@classmethod
def _unpack_(cls, obj):
return cls(obj)
cls.validate = validate
cls.abstract = abstract
cls._pack_ = _pack_
cls._unpack_ = _unpack_
return cls
| 5,350,336 |
def summary1c(sequence):
"""
What comes in: A sequence of integers, all >= 2.
What goes out:
-- Returns the sum of INDICES of the items in the sequence
that are prime.
Side effects: None.
Examples:
-- If the given sequence is [20, 23, 29, 30, 33, 29, 100, 2, 4],
then the returned value is 15, since the primes in the sequence
are at INDICES 1, 2, 5 and 7, and 1 + 2 + 5 + 7 = 15.
"""
total = 0
for k in range(len(sequence)):
if is_prime(sequence[k]):
total += k
return total
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# -------------------------------------------------------------------------
| 5,350,337 |
def choose_page(btn_click_list: List[Tuple[int, str]]) -> str:
"""
Given a list of tuples of (num_clicks, next_page) choose the next_page that
corresponds to exactly 1 num_clicks.
This is to help with deciding which page to go to next when clicking on one
of many buttons on a page.
The expectation is that exactly one button will have been clicked, so we get
a deterministic next page.
:param btn_click_list: List of tuples of (num_clicks, next_page).
:return: The id of the next page.
"""
for tup in btn_click_list:
if tup[0] == 1:
return tup[1]
raise ValueError(
"No clicks were detected, or the click list is misconfigured: {}".format(
btn_click_list
)
)
| 5,350,338 |
def get_content(request, path=''):
"""Get content from datastore as requested on the url path
Args:
path - comes without leading slash. / added in code
"""
content = StaticContent.get_by_key_name("/%s" % path)
if not content:
if path == '':
# Nothing generated yet. Inform user to create some content
return render_to_response("blog/themes/%s/listing.html" % config.theme,
{'config': config, 'no_post': True,})
else:
raise NotFound
serve = True
# check modifications and etag
if 'If-Modified-Since' in request.headers:
last_seen = datetime.datetime.strptime(
request.headers['If-Modified-Since'], HTTP_DATE_FMT)
if last_seen >= content.last_modified.replace(microsecond=0):
serve = False
if 'If-None-Match' in request.headers:
etags = [x.strip('" ')
for x in request.headers['If-None-Match'].split(',')]
if content.etag in etags:
serve = False
response = _output(content, serve)
return response
| 5,350,339 |
def reconstruct_entity(input_examples, entitys_iter):
""" the entitys_iter contains the prediction entity of the splited examples.
We need to reconstruct the complete entitys for each example in input_examples.
and return the results as dictionary.
input_examples: each should contains (start, end) indice.
entitys_iter: iterator of entitys
Overlaps follows first in first set order:
--------------------------------------
O O O B-PER I-PER
O O O O B-GPE I-GPE
O B-LOC I-LOC O O
--------------------------------------
O O O B-PER I-PER O B-GPE I-GPE O O
--------------------------------------
return: the complete entitys of each input example.
"""
predict_entitys = []
for i, example in enumerate(input_examples):
_entity = []
for span in example.sentence_spans:
_, _, start, end = span
# +1 to skip the first padding
_entity.extend(next(entitys_iter)[start : end])
predict_entitys.append(_entity)
assert len(predict_entitys) == len(input_examples)
return predict_entitys
| 5,350,340 |
def sudo_password_handler(spawn, context):
""" Password handler for sudo command
Need a better place for 'sudo' password, using line_password as workaround
"""
credentials = context.get('credentials')
if credentials:
try:
spawn.sendline(
to_plaintext(credentials[SUDO_CRED_NAME]['password']))
except KeyError as exc:
raise UniconAuthenticationError("No password has been defined "
"for credential '{}'.".format(SUDO_CRED_NAME))
else:
spawn.sendline(context['line_password'])
| 5,350,341 |
def ensure_valid_schema(spec: Dict) -> List[str]:
"""
Ensure that the provided spec has no schema errors.
Returns a list with all the errors found.
"""
error_messages = []
validator = cerberus.Validator(yaml.safe_load(SNOWFLAKE_SPEC_SCHEMA))
validator.validate(spec)
for entity_type, err_msg in validator.errors.items():
if isinstance(err_msg[0], str):
error_messages.append(f"Spec error: {entity_type}: {err_msg[0]}")
continue
for error in err_msg[0].values():
error_messages.append(f"Spec error: {entity_type}: {error[0]}")
if error_messages:
return error_messages
schema = {
"databases": yaml.safe_load(SNOWFLAKE_SPEC_DATABASE_SCHEMA),
"roles": yaml.safe_load(SNOWFLAKE_SPEC_ROLE_SCHEMA),
"users": yaml.safe_load(SNOWFLAKE_SPEC_USER_SCHEMA),
"warehouses": yaml.safe_load(SNOWFLAKE_SPEC_WAREHOUSE_SCHEMA),
}
validators = {
"databases": cerberus.Validator(schema["databases"]),
"roles": cerberus.Validator(schema["roles"]),
"users": cerberus.Validator(schema["users"]),
"warehouses": cerberus.Validator(schema["warehouses"]),
}
entities_by_type = []
for entity_type, entities in spec.items():
if entities and entity_type in ["databases", "roles", "users", "warehouses"]:
entities_by_type.append((entity_type, entities))
for entity_type, entities in entities_by_type:
for entity_dict in entities:
for entity_name, config in entity_dict.items():
validators[entity_type].validate(config)
for field, err_msg in validators[entity_type].errors.items():
error_messages.append(
VALIDATION_ERR_MSG.format(
entity_type, entity_name, field, err_msg[0]
)
)
return error_messages
| 5,350,342 |
def test__dialect__base_parse_struct(
dialect,
sqlfile,
code_only,
yamlfile,
yaml_loader,
):
"""For given test examples, check parsed structure against yaml."""
parsed: Optional[BaseSegment] = parse_example_file(dialect, sqlfile)
actual_hash = compute_parse_tree_hash(parsed)
# Load the YAML
expected_hash, res = yaml_loader(make_dialect_path(dialect, yamlfile))
if not parsed:
assert parsed == res
return
# Verify the current parse tree matches the historic parse tree.
parsed_tree = parsed.to_tuple(code_only=code_only, show_raw=True)
# The parsed tree consists of a tuple of "File:", followed by the
# statements. So only compare when there is at least one statement.
if parsed_tree[1] or res[1]:
assert parsed_tree == res
# Verify the current hash matches the historic hash. The main purpose of
# this check is to force contributors to use the generator script to
# create these files. New contributors have sometimes been unaware of
# this tool and have attempted to craft the YAML files manually. This
# can lead to slight differences, confusion, and errors.
assert expected_hash == actual_hash, (
"Parse tree hash does not match. Please run "
"'python test/generate_parse_fixture_yml.py' to create YAML files "
"in test/fixtures/dialects."
)
| 5,350,343 |
def realord(s, pos=0):
"""
Returns the unicode of a character in a unicode string, taking surrogate pairs into account
"""
if s is None:
return None
code = ord(s[pos])
if code >= 0xD800 and code < 0xDC00:
if len(s) <= pos + 1:
print("realord warning: missing surrogate character")
return 0
code2 = ord(s[pos + 1])
if code2 >= 0xDC00 and code < 0xE000:
code = 0x10000 + ((code - 0xD800) << 10) + (code2 - 0xDC00)
return hex(code).replace("x", "")
| 5,350,344 |
def forceAspect(ax,aspect=1):
"""
Forces the aspect ratio to be equal
Copy of Yann's answer to the SO question:
http://stackoverflow.com/questions/7965743/\
how-can-i-set-the-aspect-ratio-in-matplotlib
:param ax:
:param aspect:
"""
im = ax.get_images()
extent = im[0].get_extent()
ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)
| 5,350,345 |
def evaluateSpectral(left_state,right_state,xy):
"""Use this method to compute the Roe Average.
q(state)
q[0] = rho
q[1] = rho*u
q[2] = rho*v
q[3] = rho*e
"""
spec_state = numpy.zeros(left_state.shape)
rootrhoL = numpy.sqrt(left_state[0])
rootrhoR = numpy.sqrt(right_state[0])
tL = left_state/left_state[0] #Temporary variable to access e, u, v, and w - Left
tR = right_state/right_state[0] #Temporary variable to access e, u, v, and w - Right
#Calculations
denom = 1/(rootrhoL+rootrhoR)
spec_state[0] = rootrhoL*rootrhoR
spec_state[1] = (rootrhoL*tL[1]+rootrhoR*tR[1])*denom
spec_state[2] = (rootrhoL*tL[2]+rootrhoR*tR[2])*denom
spec_state[3] = (rootrhoL*tL[3]+rootrhoR*tR[3])*denom
spvec = (spec_state[0],spec_state[0]*spec_state[1],spec_state[0]*spec_state[2],spec_state[0]*spec_state[3])
P = getPressure(spvec)
dim = 1 if xy else 2 #if true provides u dim else provides v dim
spectralRadius = (numpy.sqrt(gamma*P/spec_state[0])+abs(spec_state[dim]))
spectralRadius = 0 if numpy.isnan(spectralRadius) else spectralRadius #sets spectral radius to zero if it's nan
return spectralRadius*(left_state-right_state)
| 5,350,346 |
def get_multidata_bbg(requests):
"""function for multiple asynchronous refdata requests, returns a
dictionary of the form correlationID:result.
Function Parameters
----------
requests : dictionary of correlationID:request pairs. CorrelationIDs
are unique integers (cannot reuse until previous requests have
returned). Requests can be either dicts of named arguments or
list-likes of ordered arguments. Although technically anything
can be made into a blpapi.CorrelationId, integers simplify usage.
Request Parameters
----------
identifiers : list-like object of bloomberg identifiers of the form
'symbol [exchange] <yellow key>'. Symbol can be ticker/name/
cusip/etc.
fields : list-like object of bloomberg field mnemonics or CALCRT ID.
Although either can be input, only the mnemonic will be output.
overrides : list-like object of tuples or dictionary. Tuples must be of
the form [(fieldId, value), ], while dictionaries are
{fieldId: value, }.
FieldId(s) are mnemonics or CALCRT IDs, values will be converted
to the proper type if possible.
"""
with bs.Session() as session:
try:
if not isinstance(requests, dict):
raise be.InputError('request_mult_refdata requires a '
'dictionary of correlationId:input pairs')
for corr_id, req in requests.items():
if isinstance(req, dict):
inputs = req
elif hasattr(req, '__iter__'):
if len(req) == 3:
pass
elif len(req) == 2:
req = list(req)
req.append(None)
else:
raise be.InputError('Request {0} has {1} items'
', expected 2-3.'.format(corr_id, len(req)))
inputs = dict(zip((IDS, FLDS, OVDS), req))
else:
raise be.InputError('Request {0} is of type: {0}, '
'expected dict or list-like'.format(corr_id,
type(req)))
_ref_req_queue(session, corr_id, inputs)
except be.InputError as err:
print err
_refdata_to_bloomberg(session)
session.queue.join()
rtn = session.correlation_ids
return rtn
| 5,350,347 |
def specific_kinetic_energy(particles):
"""
Returns the specific kinetic energy of each particle in the set.
>>> from amuse.datamodel import Particles
>>> particles = Particles(2)
>>> particles.vx = [1.0, 1.0] | units.ms
>>> particles.vy = [0.0, 0.0] | units.ms
>>> particles.vz = [0.0, 0.0] | units.ms
>>> particles.mass = [1.0, 1.0] | units.kg
>>> particles.specific_kinetic_energy()
quantity<[0.5, 0.5] m**2 * s**-2>
"""
return 0.5*(particles.vx**2+particles.vy**2+particles.vz**2)
| 5,350,348 |
def parse_loot_percentage(text):
"""Use to parse loot percentage string, ie: Roubo: 50% becomes 0.5"""
percentage = float(text.split(':')[1].strip("%")) / 100
return percentage
| 5,350,349 |
def major_minor_change(old_version, new_version):
"""Check if a major or minor change occurred."""
major_mismatch = old_version.major != new_version.major
minor_mismatch = old_version.minor != new_version.minor
if major_mismatch or minor_mismatch:
return True
return False
| 5,350,350 |
def request_mnu_data() -> Iterator[MnuData]:
"""
Request info from the NHK website
"""
yield from map(parse_json_entry, request_mnu_json())
| 5,350,351 |
def list_all_queues(path: str, vhost: Optional[str] = '/') -> List[Dict]:
"""Send a request to RabbitMQ api to list all the data queues.
Args:
path: Path to the RabbitMQ management api to send the request to.
vhost: Virtual host of the RabbitMQ.
Returns:
List of all the data queues.
"""
quoted_vhost = parse.quote_plus(vhost)
queues_path = path + f'api/queues/{quoted_vhost}'
queues = request_sender.make_request('GET', queues_path)
return queues
| 5,350,352 |
def submit_images_local(c):
"""This command isn't implemented please modify to use.
The call below will work for submitting jobs to execute locally on a GPU.
Here we also map a volume to the docker container executing locally. This is the
location we tell our script to look for our training and validation data. Feel free to
adjust the other arguments as required by your trainining script.
"""
raise NotImplementedError(
"You need to modify this call before being able to use it"
)
from aml_compute import PyTorchExperimentCLI
exp = PyTorchExperimentCLI("<YOUR-EXPERIMENT-NAME>")
run = exp.submit_local(
os.path.join(_BASE_PATH, "src"),
"<YOUR-TRAINING-SCRIPT>",
{
"--training_data_path": "/data/train",
"--validation_data_path": "/data/validation",
"--epochs": "1",
"--data_type": "images",
"--data-format": "channels_first",
},
dependencies_file=os.path.join(_BASE_PATH, "environment_gpu.yml"),
docker_args=["-v", f"{env_values['data']}:/data"],
wait_for_completion=True,
)
print(run)
| 5,350,353 |
def get_tf_model_variables(config_path, init_checkpoint):
"""Return tf model parameters in a dictionary format.
Args:
config_path: path to TF model configuration file
init_checkpoint: path to saved TF model checkpoint
Returns:
tf_config: dictionary tf model configurations
tf_variables: dictionary of tf variables
tf_model: tensorflow BERT model generated using input config and checkpoint
"""
# Load saved model configuration
config = configs.BertConfig.from_json_file(config_path)
# Generate BERT TF model and initiate variable update from checkpoint
seq_len = 20
_, tf_model = bert_models.squad_model(config, seq_len)
checkpoint = tf.train.Checkpoint(model=tf_model)
checkpoint.restore(init_checkpoint).assert_existing_objects_matched()
tf_config = config.__dict__
tf_variables = {v.name: v.numpy() for v in tf_model.variables}
return tf_config, tf_variables, tf_model
| 5,350,354 |
def org_unit_type_filter(queryset, passed_in_org_types):
"""Get specific Organisational units based on a filter."""
for passed_in_org_type in passed_in_org_types:
queryset = queryset.filter(org_unit_type_id=passed_in_org_type)
return queryset
| 5,350,355 |
def circle_and_rectangle():
"""
-- Constructs an rg.RoseWindow.
-- Constructs and draws a rg.Circle and rg.Rectangle
on the window such that:
-- They fit in the window and are easily visible.
-- The rg.Circle is filled with 'blue'
-- Prints (on the console, on SEPARATE lines) the following data
associated with your rg.Circle:
-- Its outline thickness.
-- Its fill color.
-- Its center.
-- Its center's x coordinate.
-- Its center's y coordinate.
-- Prints (on the console, on SEPARATE lines) the same data
but for your rg.Rectangle.
-- Waits for the user to press the mouse, then closes the window.
Here is an example of the output on the console,
for one particular circle and rectangle:
1
blue
Point(180.0, 115.0)
180
115
1
None
Point(75.0, 150.0)
75.0
150.0
"""
# construct rose window
window = rg.RoseWindow(400, 400, 'hello')
# crate a circle with color blue
center1 = rg.Point(100, 200)
r = 40
circle1 = rg.Circle(center1, r)
circle1.fill_color = 'blue'
# draw the circle
circle1.attach_to(window)
window.render()
#print everything
print(circle1)
#crate a rectangle
point1 = rg.Point(300, 50)
point2 = rg.Point(350, 150)
rectangle = rg.Rectangle(point1, point2)
rectangle.attach_to(window)
window.render()
#print everything
print(rectangle)
#close window when mousoe click
window.close_on_mouse_click()
# ------------------------------------------------------------------
# done: 3. Implement this function, per its doc-string above.
# -- ANY objects that meet the criteria are fine.
# Put a statement in main to test this function
# (by calling this function).
#
# IMPORTANT: Use the DOT TRICK to guess the names of the relevant
# instance variables for outline thickness, etc.
# ------------------------------------------------------------------
| 5,350,356 |
def is_str_digit(n: str) -> bool:
"""Check whether the given string is a digit or not. """
try:
float(n)
return True
except ValueError:
return False
| 5,350,357 |
def draw_all_poly_detection(im_array, detections, class_names, scale, cfg, threshold=0.2):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import cv2
import random
color_white = (255, 255, 255)
im = image.transform_inverse(im_array, cfg.network.PIXEL_MEANS)
# change to bgr
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
if DEBUG:
class_names = ['__background__', 'fg']
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:8] * scale
score = det[-1]
if score < threshold:
continue
bbox = map(int, bbox)
# draw first point
cv2.circle(im, (bbox[0], bbox[1]), 3, (0, 0, 255), -1)
for i in range(3):
cv2.line(im, (bbox[i * 2], bbox[i * 2 + 1]), (bbox[(i+1) * 2], bbox[(i+1) * 2 + 1]), color=color, thickness=2)
cv2.line(im, (bbox[6], bbox[7]), (bbox[0], bbox[1]), color=color, thickness=2)
cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
return im
| 5,350,358 |
def test_requests():
"""WebService_test.test_requests()
Use TestApp to confirm correct response status, status int,
and content-type.
"""
app = webtest.TestApp(WebService(TestFactory()))
# Check invalid request (bad values)
response = app.get("/?id=bad", expect_errors=True)
assert_equal(response.status_int, 400)
assert_equal(response.status, "400 Bad Request")
assert_equal(response.content_type, "text/plain")
# Check invalid request (duplicates)
response = app.get("/?id=BOU&id=BOU", expect_errors=True)
assert_equal(response.status_int, 400)
assert_equal(response.status, "400 Bad Request")
assert_equal(response.content_type, "text/plain")
# Check valid request (upper and lower case)
response = app.get("/?id=BOU")
assert_equal(response.status_int, 200)
assert_equal(response.status, "200 OK")
assert_equal(response.content_type, "text/plain")
# Test internal server error (use fake factory)
app = webtest.TestApp(WebService(ErrorFactory(), error_stream=None))
response = app.get("/?id=BOU", expect_errors=True)
assert_equal(response.status_int, 500)
assert_equal(response.status, "500 Internal Server Error")
assert_equal(response.content_type, "text/plain")
| 5,350,359 |
def generate_dswx_layers(input_list, output_file,
hls_thresholds = None,
dem_file=None,
output_interpreted_band=None,
output_rgb_file=None,
output_infrared_rgb_file=None,
output_binary_water=None,
output_confidence_layer=None,
output_diagnostic_layer=None,
output_non_masked_dswx=None,
output_shadow_masked_dswx=None,
output_landcover=None,
output_shadow_layer=None,
output_cloud_mask=None,
output_dem_layer=None,
landcover_file=None,
built_up_cover_fraction_file=None,
flag_offset_and_scale_inputs=False,
scratch_dir='.',
product_id=None,
flag_debug=False):
"""Apply shadow layer onto interpreted layer
Parameters
----------
input_list: list
Input file list
output_file: str
Output filename
hls_thresholds: HlsThresholds (optional)
HLS reflectance thresholds for generating DSWx-HLS products
dem_file: str (optional)
DEM filename
output_interpreted_band: str (optional)
Output interpreted band filename
output_rgb_file: str (optional)
Output RGB filename
output_infrared_rgb_file: str (optional)
Output infrared RGB filename
output_binary_water: str (optional)
Output binary water filename
output_confidence_layer: str (optional)
Output confidence layer filename
output_diagnostic_layer: str (optional)
Output diagnostic layer filename
output_non_masked_dswx: str (optional)
Output (non-masked) interpreted layer filename
output_shadow_masked_dswx: str (optional)
Output shadow-masked filename
output_landcover: str (optional)
Output landcover classification file
output_shadow_layer: str (optional)
Output shadow layer filename
output_cloud_mask: str (optional)
Output cloud/cloud-shadow mask filename
output_dem_layer: str (optional)
Output elevation layer filename
landcover_file: str (optional)
Output landcover filename
built_up_cover_fraction_file: str (optional)
Output built-up cover fraction filename
flag_offset_and_scale_inputs: bool (optional)
Flag indicating if DSWx-HLS should be offsetted and scaled
scratch_dir: str (optional)
Temporary directory
product_id: str (optional)
Product ID that will be saved in the output' product's
metadata
flag_debug: bool (optional)
Flag to indicate if execution is for debug purposes. If so,
only a subset of the image will be loaded into memory
Returns
-------
success : bool
Flag success indicating if execution was successful
"""
if hls_thresholds is None:
hls_thresholds = parse_runconfig_file()
if scratch_dir is None:
scratch_dir = '.'
logger.info('input parameters:')
logger.info(' file(s):')
for input_file in input_list:
logger.info(f' {input_file}')
logger.info(f' output_file: {output_file}')
logger.info(f' DEM file: {dem_file}')
logger.info(f' scratch directory: {scratch_dir}')
os.makedirs(scratch_dir, exist_ok=True)
image_dict = {}
offset_dict = {}
scale_dict = {}
output_files_list = []
build_vrt_list = []
dem = None
shadow_layer = None
if product_id is None and output_file:
product_id = os.path.splitext(os.path.basename(output_file))[0]
elif product_id is None:
product_id = 'dswx_hls'
dswx_metadata_dict = _get_dswx_metadata_dict(product_id)
version = None
if not isinstance(input_list, list) or len(input_list) == 1:
success = _load_hls_product_v1(input_list, image_dict, offset_dict,
scale_dict, dswx_metadata_dict,
flag_offset_and_scale_inputs,
flag_debug = flag_debug)
if success:
version = '1.4'
else:
success = None
# If success is None or False:
if success is not True:
success = _load_hls_product_v2(input_list, image_dict, offset_dict,
scale_dict, dswx_metadata_dict,
flag_offset_and_scale_inputs,
flag_debug = flag_debug)
if not success:
logger.info(f'ERROR could not read file(s): {input_list}')
return False
version = '2.0'
hls_dataset_name = image_dict['hls_dataset_name']
_populate_dswx_metadata_datasets(dswx_metadata_dict, hls_dataset_name,
dem_file=None, landcover_file=None, built_up_cover_fraction_file=None)
spacecraft_name = dswx_metadata_dict['SPACECRAFT_NAME']
logger.info(f'processing HLS {spacecraft_name[0]}30 dataset v.{version}')
blue = image_dict['blue']
green = image_dict['green']
red = image_dict['red']
nir = image_dict['nir']
swir1 = image_dict['swir1']
swir2 = image_dict['swir2']
qa = image_dict['qa']
geotransform = image_dict['geotransform']
projection = image_dict['projection']
length = image_dict['length']
width = image_dict['width']
sun_azimuth_angle_meta = dswx_metadata_dict['MEAN_SUN_AZIMUTH_ANGLE'].split(', ')
sun_zenith_angle_meta = dswx_metadata_dict['MEAN_SUN_ZENITH_ANGLE'].split(', ')
if len(sun_azimuth_angle_meta) == 2:
sun_azimuth_angle = (float(sun_azimuth_angle_meta[0]) +
float(sun_azimuth_angle_meta[1])) / 2.0
else:
sun_azimuth_angle = float(sun_azimuth_angle_meta[0])
if len(sun_zenith_angle_meta) == 2:
sun_zenith_angle = (float(sun_zenith_angle_meta[0]) +
float(sun_zenith_angle_meta[1])) / 2.0
else:
sun_zenith_angle = float(sun_zenith_angle_meta[0])
# Sun elevation and zenith angles are complementary
sun_elevation_angle = 90 - float(sun_zenith_angle)
logger.info(f'Mean Sun azimuth angle: {sun_azimuth_angle}')
logger.info(f'Mean Sun elevation angle: {sun_elevation_angle}')
if dem_file is not None:
# DEM
if output_dem_layer is None:
dem_cropped_file = tempfile.NamedTemporaryFile(
dir=scratch_dir, suffix='.tif').name
else:
dem_cropped_file = output_dem_layer
dem = _relocate(dem_file, geotransform, projection,
length, width, scratch_dir,
resample_algorithm='cubic',
relocated_file=dem_cropped_file)
# TODO:
# 1. crop DEM with a margin
# 2. save metadata to DEM layer
hillshade = _compute_hillshade(dem_cropped_file, scratch_dir,
sun_azimuth_angle, sun_elevation_angle)
shadow_layer = _compute_otsu_threshold(hillshade, is_normalized = True)
if output_shadow_layer:
_save_array(shadow_layer, output_shadow_layer,
dswx_metadata_dict, geotransform, projection,
description=band_description_dict['SHAD'],
output_files_list=build_vrt_list)
if landcover_file is not None:
if output_landcover is None:
relocated_landcover_file = tempfile.NamedTemporaryFile(
dir=scratch_dir, suffix='.tif').name
else:
relocated_landcover_file = output_landcover
# Land Cover
# TODO output_landcover will be the output of create_landcover_mask()
landcover = _relocate(landcover_file, geotransform, projection,
length, width, scratch_dir,
relocated_file=relocated_landcover_file)
if built_up_cover_fraction_file is not None:
# Build-up cover fraction
built_up_cover_fraction = _relocate(built_up_cover_fraction_file,
geotransform, projection,
length, width, scratch_dir,
relocated_file =
'temp_built_up_cover_fraction.tif')
# Set invalid pixels to fill value (255)
if not flag_offset_and_scale_inputs:
invalid_ind = np.where(blue < -5000)
else:
invalid_ind = np.where(blue < -0.5)
if output_rgb_file:
_save_output_rgb_file(red, green, blue, output_rgb_file,
offset_dict, scale_dict,
flag_offset_and_scale_inputs,
geotransform, projection,
invalid_ind=invalid_ind,
output_files_list=output_files_list)
if output_infrared_rgb_file:
_save_output_rgb_file(swir1, nir, red, output_infrared_rgb_file,
offset_dict, scale_dict,
flag_offset_and_scale_inputs,
geotransform, projection,
invalid_ind=invalid_ind,
output_files_list=output_files_list,
flag_infrared=True)
diagnostic_layer = _compute_diagnostic_tests(
blue, green, red, nir, swir1, swir2, hls_thresholds)
if output_diagnostic_layer:
_save_array(diagnostic_layer, output_diagnostic_layer,
dswx_metadata_dict, geotransform, projection,
description=band_description_dict['DIAG'],
output_files_list=build_vrt_list)
interpreted_dswx_band = generate_interpreted_layer(diagnostic_layer)
if invalid_ind is not None:
interpreted_dswx_band[invalid_ind] = 255
if output_non_masked_dswx:
save_dswx_product(interpreted_dswx_band,
output_non_masked_dswx,
dswx_metadata_dict,
geotransform,
projection,
description=band_description_dict['WTR-1'],
scratch_dir=scratch_dir,
output_files_list=build_vrt_list)
if shadow_layer is not None:
shadow_masked_dswx = _apply_shadow_layer(
interpreted_dswx_band, shadow_layer)
else:
shadow_masked_dswx = interpreted_dswx_band
if output_shadow_masked_dswx is not None:
save_dswx_product(shadow_masked_dswx, output_shadow_masked_dswx,
dswx_metadata_dict,
geotransform,
projection,
description=band_description_dict['WTR-2'],
scratch_dir=scratch_dir,
output_files_list=build_vrt_list)
cloud, masked_dswx_band = _compute_mask_and_filter_interpreted_layer(
shadow_masked_dswx, qa)
if invalid_ind is not None:
# Set invalid pixels to mask fill value (255)
cloud[invalid_ind] = 255
masked_dswx_band[invalid_ind] = 255
if output_interpreted_band:
save_dswx_product(masked_dswx_band,
output_interpreted_band,
dswx_metadata_dict,
geotransform,
projection,
description=band_description_dict['WTR'],
scratch_dir=scratch_dir,
output_files_list=build_vrt_list)
if output_cloud_mask:
save_mask(cloud, output_cloud_mask, dswx_metadata_dict, geotransform,
projection,
description=band_description_dict['CLOUD'],
output_files_list=build_vrt_list)
binary_water_layer = _get_binary_water_layer(masked_dswx_band)
if output_binary_water:
_save_binary_water(binary_water_layer, output_binary_water,
dswx_metadata_dict,
geotransform, projection,
description=band_description_dict['BWTR'],
output_files_list=build_vrt_list)
# TODO: fix CONF layer!!!
if output_confidence_layer:
_save_binary_water(binary_water_layer, output_confidence_layer,
dswx_metadata_dict,
geotransform, projection,
description=band_description_dict['CONF'],
output_files_list=build_vrt_list)
# save output_file as GeoTIFF
if output_file and not output_file.endswith('.vrt'):
save_dswx_product(masked_dswx_band,
output_file,
dswx_metadata_dict,
geotransform,
projection,
bwtr=binary_water_layer,
diag=diagnostic_layer,
wtr_1=interpreted_dswx_band,
wtr_2=shadow_masked_dswx,
shad=shadow_layer,
cloud=cloud,
dem=dem,
scratch_dir=scratch_dir,
output_files_list=output_files_list)
# save output_file as VRT
elif output_file:
vrt_options = gdal.BuildVRTOptions(resampleAlg='nearest')
gdal.BuildVRT(output_file, build_vrt_list, options=vrt_options)
build_vrt_list.append(output_file)
logger.info(f'file saved: {output_file}')
logger.info('list of output files:')
for filename in build_vrt_list + output_files_list:
logger.info(filename)
return True
| 5,350,360 |
def range_check_function(bottom, top):
"""Returns a function that checks if bottom <= arg < top, allowing bottom and/or top to be None"""
import operator
if top is None:
if bottom is None:
# Can't currently happen (checked before calling this), but let's do something reasonable
return lambda _: True
else:
return partial(operator.le, bottom)
elif bottom is None:
return partial(operator.gt, top)
else:
def range_f(v):
return v >= bottom and v < top
return range_f
| 5,350,361 |
def filter_employee():
""" When the client requests a specific employee.
Valid queries:
?employeeid=<employeeid>
Returns: json representation of product.
"""
query_parameters = request.args
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = conn.cursor()
lookup_code = query_parameters.get('employeeid')
base_query = "SELECT * FROM employee WHERE"
if lookup_code:
query = "{} employeeid = '{}'".format(base_query, lookup_code)
cursor.execute(query)
record_list = cursor.fetchall()
conn.close()
data_list = parse_employee_info(record_list)
return jsonify(data_list)
else:
conn.close()
return "<h1>404</h1><p>The employeeid was not found.</p>"
| 5,350,362 |
def update_ufos(ai_settings,stats,sb,screen,ship,ufos,bullets):
"""update the position of ufos"""
check_fleet_edges(ai_settings, ufos)
ufos.update()
#检测ufos和飞船之间的撞船
if pygame.sprite.spritecollideany(ship,ufos):
ship_hit(ai_settings,stats,sb,screen,ship,ufos,bullets)
#检测ufos是否撞到底端
check_ufos_bottom(ai_settings,stats,sb,screen,ship,ufos,bullets)
| 5,350,363 |
def ParallelLSTDQ(D,env,w,damping=0.001,ncpus=None):
"""
D : source of samples (s,a,r,s',a')
env: environment contianing k,phi,gamma
w : weights for the linear policy evaluation
damping : keeps the result relatively stable
ncpus : the number of cpus to use
"""
if ncpus:
nprocess = ncpus
else:
nprocess = cpu_count()
pool = Pool(nprocess)
indx = chunk(len(D),nprocess)
results = []
for (i,j) in indx:
r = pool.apply_async(dict_loop,(D[i:j],env,w,0.0)) # note that damping needs to be zero here
results.append(r)
k = len(w)
A = sp.identity(k,format='csr') * damping
b = sp_create(k,1,'csr')
for r in results:
T,t = r.get()
A = A + T
b = b + t
# close out the pool of workers
pool.close()
pool.join()
w,info = solve(A,b,method="spsolve")
return A,b,w,info
| 5,350,364 |
def get_value_from_settings_with_default_string(wf, value, default_value):
"""Returns either a value as set in the settings file or a default as specified by caller"""
try:
ret = wf.settings[value]['value']
return str(ret)
except KeyError:
return default_value
| 5,350,365 |
def jsonify(records):
"""
Parse asyncpg record response into JSON format
"""
return [dict(r.items()) for r in records]
| 5,350,366 |
def acceptExtin(in_f, out_f, addr=None):
""" Create a command source with the given fds as input and output. """
# Force new versions to be loaded.
#
# deep_reload(Hub)
nubID = g.nubIDs.gimme()
d = Hub.ASCIIReplyDecoder(debug=1)
e = Hub.ASCIICmdEncoder(debug=1, sendCommander=True)
nub = Hub.StdinNub(poller, in_f, out_f, name=name, encoder=e, decoder=d, debug=1)
c.taster.addToFilter(('tcc', 'dis', 'hub', 'msg'), (), ('hub'))
hub.addActor(nub)
| 5,350,367 |
def write_commits(commit_content, file_name):
"""
Writes commit content to a provided file.
:param commit_content:
:param file_name:
:return: none
"""
with open(file_name, "w") as json_file:
json.dump(commit_content, json_file)
| 5,350,368 |
def smoothing_cross_entropy(logits,
labels,
vocab_size,
confidence,
gaussian=False,
zero_pad=True):
"""Cross entropy with label smoothing to limit over-confidence.
Args:
logits: Tensor of size [batch_size, ?, vocab_size]
labels: Tensor of size [batch_size, ?]
vocab_size: Tensor representing the size of the vocabulary.
confidence: Used to determine on and off values for label smoothing.
If `gaussian` is true, `confidence` is the variance to the gaussian
distribution.
gaussian: Uses a gaussian distribution for label smoothing
zero_pad: use 0 as the probabitlity of the padding
in the smoothed labels. By setting this, we replicate the
numeric calculation of tensor2tensor, which doesn't set the
<BOS> token in the vocabulary.
Returns:
the cross entropy loss.
"""
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
# Low confidence is given to all non-true labels, uniformly.
if zero_pad:
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 2)
else:
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
if gaussian and confidence > 0.0:
labels = tf.cast(labels, tf.float32)
normal_dist = tf.distributions.Normal(loc=labels, scale=confidence)
soft_targets = normal_dist.prob(
tf.cast(tf.range(vocab_size), tf.float32)\
[:, None, None])
# Reordering soft_targets from [vocab_size, batch_size, ?]
# to match logits: [batch_size, ?, vocab_size]
soft_targets = tf.transpose(soft_targets, perm=[1, 2, 0])
else:
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence,
dtype=logits.dtype)
if zero_pad:
soft_targets = tf.concat([tf.expand_dims(\
tf.zeros_like(labels, dtype=tf.float32), 2),\
soft_targets[:, :, 1:]], -1)
if hasattr(tf.nn, 'softmax_cross_entropy_with_logits_v2'):
cross_entropy_fn = tf.nn.softmax_cross_entropy_with_logits_v2
else:
cross_entropy_fn = tf.nn.softmax_cross_entropy_with_logits
return cross_entropy_fn(
logits=logits, labels=soft_targets)
| 5,350,369 |
def test_ass_style_list_double_parenthood() -> None:
"""Test that style insertion cannot reclaim parenthood from another list."""
style = AssStyle(name="dummy style")
styles1 = AssStyleList()
styles2 = AssStyleList()
styles1.append(style)
with pytest.raises(TypeError):
styles2.append(style)
assert len(styles1) == 1
assert len(styles2) == 0
assert style.parent == styles1
| 5,350,370 |
def get_db_matching_location_interurban(latitude, longitude) -> dict:
"""
extracts location from db by closest geo point to location found, using road number if provided and limits to
requested resolution
:param latitude: location latitude
:param longitude: location longitude
"""
def get_bounding_box(latitude, longitude, distance_in_km):
latitude = math.radians(latitude)
longitude = math.radians(longitude)
radius = 6371
# Radius of the parallel at given latitude
parallel_radius = radius * math.cos(latitude)
lat_min = latitude - distance_in_km / radius
lat_max = latitude + distance_in_km / radius
lon_min = longitude - distance_in_km / parallel_radius
lon_max = longitude + distance_in_km / parallel_radius
rad2deg = math.degrees
return rad2deg(lat_min), rad2deg(lon_min), rad2deg(lat_max), rad2deg(lon_max)
try:
from anyway.app_and_db import db
except ModuleNotFoundError:
pass
distance_in_km = 5
lat_min, lon_min, lat_max, lon_max = get_bounding_box(latitude, longitude, distance_in_km)
baseX = lon_min
baseY = lat_min
distanceX = lon_max
distanceY = lat_max
polygon_str = "POLYGON(({0} {1},{0} {3},{2} {3},{2} {1},{0} {1}))".format(
baseX, baseY, distanceX, distanceY
)
query_obj = (
db.session.query(AccidentMarkerView)
.filter(AccidentMarkerView.geom.intersects(polygon_str))
.filter(AccidentMarkerView.accident_year >= 2014)
.filter(AccidentMarkerView.provider_code != BE_CONST.RSA_PROVIDER_CODE)
.filter(not_(AccidentMarkerView.road_segment_name == None))
)
markers = pd.read_sql_query(query_obj.statement, query_obj.session.bind)
geod = Geodesic.WGS84
# relevant_fields = resolution_dict[resolution]
# markers = db.get_markers_for_location_extraction()
markers["geohash"] = markers.apply( # pylint: disable=maybe-no-member
lambda x: geohash.encode(x["latitude"], x["longitude"], precision=4), axis=1
) # pylint: disable=maybe-no-member
markers_orig = markers.copy() # pylint: disable=maybe-no-member
markers = markers.loc[(markers["road1"] != None)] # pylint: disable=maybe-no-member
if markers.count()[0] == 0:
markers = markers_orig
# FILTER BY GEOHASH
curr_geohash = geohash.encode(latitude, longitude, precision=4)
if markers.loc[markers["geohash"] == curr_geohash].count()[0] > 0:
markers = markers.loc[markers["geohash"] == curr_geohash].copy()
# CREATE DISTANCE FIELD
markers["dist_point"] = markers.apply(
lambda x: geod.Inverse(latitude, longitude, x["latitude"], x["longitude"])["s12"], axis=1
).replace({np.nan: None})
most_fit_loc = (
markers.loc[markers["dist_point"] == markers["dist_point"].min()].iloc[0].to_dict()
)
final_loc = {}
for field in ["road1", "road_segment_name"]:
loc = most_fit_loc[field]
if loc not in [None, "", "nan"]:
if not (isinstance(loc, np.float64) and np.isnan(loc)):
final_loc[field] = loc
return final_loc
| 5,350,371 |
def get(settings_obj, key, default=None, callback=None):
"""
Return a Sublime Text plugin setting value.
Parameters:
settings_obj - a sublime.Settings object or a dictionary containing
settings
key - the name of the setting
default - the default value to return if the key value is not found.
callback - a callback function that, if provided, will be called with
the found and default values as parameters.
"""
# Parameter validation
if not isinstance(settings_obj, (dict, sublime.Settings)):
raise AttributeError("Invalid settings object")
if not isinstance(key, str):
raise AttributeError("Invalid callback function")
if callback is not None and not hasattr(callback, '__call__'):
raise AttributeError("Invalid callback function")
setting = settings_obj.get(key, default)
final_val = None
if isinstance(setting, dict) and "#multiconf#" in setting:
reject_item = False
for entry in setting["#multiconf#"]:
reject_item = False if isinstance(entry, dict) and len(entry) else True
k, v = entry.popitem()
if reject_item:
continue
for qual in re.compile(QUALIFIERS).finditer(k):
if Qualifications.exists(qual.group(1)):
reject_item = not Qualifications.eval_qual(qual.group(1), qual.group(2))
else:
reject_item = True
if reject_item:
break
if not reject_item:
final_val = v
break
if reject_item:
final_val = default
else:
final_val = setting
return callback(final_val, default) if callback else final_val
| 5,350,372 |
def parse_json_confing(config_file):
"""Parse JSON for config
JSON will can look like this:
{
"request_type": "server",
"comboBase": "www.cdn.com"
"base": "/base/path", //build directory
"js_path": "js", //path relative to base
"css_path": "path/to/css", //path relative to base; note: combo loader will try and search/replace images in CSS files
"filter": DEBUG|MIN
}
Otherwise uses the *.ini file to load the server config
"""
f = open(config_file, 'r')
content = f.read()
f.close()
config = json.loads(content)
#parse through options to load files by file name
missing_keys = []
for key in required_config_keys:
if key not in config:
missing_keys.append(key)
if missing_keys:
raise Exception("Required keys are missing in config :: required are: %s ::: config is missing: %s"
% (required_config_keys, missing_keys))
config['request_type'] = REQUEST_TYPES[config['request_type'].lower()]
return config
| 5,350,373 |
def site():
"""Main front-end web application"""
html = render.html("index")
return html
| 5,350,374 |
def validate_user_options(args):
"""Check the user has provided suitable operational options
:param args: cmd-line args parser
Return nothing
"""
logger = logging.getLogger(__name__)
if 'genbank' in args.source:
logger.info("Extract GenBank protein sequences")
if 'uniprot' in args.source:
logger.info("Extracting UniProt protein sequences")
if args.blastdb is False and args.fasta_dir is False and args.fasta_file is False:
logger.error(
"No output option provided.\n"
"No path to create a BLAST db, output dir or output file provided\n"
"Call at least one of --blastdb, --fasta_dir, --fasta_file\n"
"Terminating program."
)
sys.exit(1)
if args.blastdb:
logger.info(f"Enabled building a BLAST db at:\n{args.blastdb}")
if args.fasta_dir:
logger.info(
f"Enabled writing a unqiue sequence to a separate FASTA file in:\n{args.fasta_dir}"
)
if args.fasta_file:
logger.info(
f"Enabled writing all extracted sequences to a single FASTA file at:{args.fasta_file}"
)
return
| 5,350,375 |
def parse_vad_label(line, frame_size: float = 0.032, frame_shift: float = 0.008):
"""Parse VAD information in each line, and convert it to frame-wise VAD label.
Args:
line (str): e.g. "0.2,3.11 3.48,10.51 10.52,11.02"
frame_size (float): frame size (in seconds) that is used when
extarcting spectral features
frame_shift (float): frame shift / hop length (in seconds) that
is used when extarcting spectral features
Returns:
frames (List[int]): frame-wise VAD label
Examples:
>>> label = parse_vad_label("0.3,0.5 0.7,0.9")
[0, ..., 0, 1, ..., 1, 0, ..., 0, 1, ..., 1]
>>> print(len(label))
110
NOTE: The output label length may vary according to the last timestamp in `line`,
which may not correspond to the real duration of that sample.
For example, if an audio sample contains 1-sec silence at the end, the resulting
VAD label will be approximately 1-sec shorter than the sample duration.
Thus, you need to pad zeros manually to the end of each label to match the number
of frames in the feature. E.g.:
>>> feature = extract_feature(audio) # frames: 320
>>> frames = feature.shape[1] # here assumes the frame dimention is 1
>>> label = parse_vad_label(vad_line) # length: 210
>>> import numpy as np
>>> label_pad = np.pad(label, (0, np.maximum(frames - len(label), 0)))[:frames]
"""
frame2time = lambda n: n * frame_shift + frame_size / 2
frames = []
frame_n = 0
for time_pairs in line.split():
start, end = map(float, time_pairs.split(","))
assert end > start, (start, end)
while frame2time(frame_n) < start:
frames.append(0)
frame_n += 1
while frame2time(frame_n) <= end:
frames.append(1)
frame_n += 1
return frames
| 5,350,376 |
def preprocess_image(img, img_width, img_height):
"""Preprocesses the image before feeding it into the ML model"""
x = get_square_image(img)
x = np.asarray(img.resize((img_width, img_height))).astype(np.float32)
x_transposed = x.transpose((2,0,1))
x_batchified = np.expand_dims(x_transposed, axis=0)
return x_batchified
| 5,350,377 |
def Pose_2_KUKA(H):
"""Converts a pose (4x4 matrix) to an XYZABC KUKA target (Euler angles), required by KUKA KRC controllers.
:param H: pose
:type H: :class:`.Mat`
.. seealso:: :class:`.Mat`, :func:`~robodk.TxyzRxyz_2_Pose`, :func:`~robodk.Pose_2_TxyzRxyz`, :func:`~robodk.Pose_2_ABB`, :func:`~robodk.Pose_2_Adept`, :func:`~robodk.Pose_2_Comau`, :func:`~robodk.Pose_2_Fanuc`, :func:`~robodk.Pose_2_KUKA`, :func:`~robodk.Pose_2_Motoman`, :func:`~robodk.Pose_2_Nachi`, :func:`~robodk.Pose_2_Staubli`, :func:`~robodk.Pose_2_UR`, :func:`~robodk.quaternion_2_pose`
"""
x = H[0, 3]
y = H[1, 3]
z = H[2, 3]
if (H[2, 0]) > (1.0 - 1e-10):
p = -pi / 2
r = 0
w = atan2(-H[1, 2], H[1, 1])
elif (H[2, 0]) < (-1.0 + 1e-10):
p = pi / 2
r = 0
w = atan2(H[1, 2], H[1, 1])
else:
p = atan2(-H[2, 0], sqrt(H[0, 0] * H[0, 0] + H[1, 0] * H[1, 0]))
w = atan2(H[1, 0], H[0, 0])
r = atan2(H[2, 1], H[2, 2])
return [x, y, z, w * 180 / pi, p * 180 / pi, r * 180 / pi]
| 5,350,378 |
def skip_regenerate_image(request: FixtureRequest) -> Optional[str]:
"""Enable parametrization for the same cli option"""
return _request_param_or_config_option_or_default(request, 'skip_regenerate_image', None)
| 5,350,379 |
def executeGC(img_name, filename, gamma):
"""
:type img_name: str, the file name of the single channel image
:type filename: str, the name for saving
:type gamma: float
"""
img = cv.imread(img_name, 0)
img = img.astype(np.float)
r, c = img.shape
for i in range(r):
for j in range(c):
img[i, j] = 255 * (img[i, j] / 255.0) ** (1 / gamma)
img = img.astype(np.int16)
img = Image.fromarray(img)
# img.show()
img = img.convert('L')
img.save(filename)
| 5,350,380 |
def parse_config(tool_name, key_col_name, value_col_name):
"""Parses the "execute" field for the given tool from installation config
file.
Parameters:
tool_name: Tool name to search from file.
Raises:
STAPLERerror if config file does not exists.
STAPLERerror if tool value can not be read from file.
STAPLERerror if tool value was an empty string.
Returns:
String containing the user specified run command, None if no special
command has been defined.
"""
# Return None for the generic_base class, as it should not be in the
# config file in any case
try:
run_command = read_value_from_multi_table(CONFIG_FILE_PATH,
tool_name,
key_col_name,
value_col_name)
except STAPLERerror:
print 'Error when reading installation configuration file for ' \
'tool {0}'.format(tool_name)
logging.error('Error when reading installation configuration file '
'for the tool {0}'.format(tool_name))
raise
if run_command == 'none':
raise NotConfiguredError()
if run_command == '':
raise STAPLERerror('Error! Empty value for tool {0} was found from '
'installation configuration file !):\n{1}'.format(tool_name,
CONFIG_FILE_PATH))
return run_command
| 5,350,381 |
def path_leaf(path):
"""
Extracts file name from given path
:param str path: Path be extracted the file name from
:return str: File name
"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
| 5,350,382 |
def delete_cache_clusters(
cluster_ids: List[str],
final_snapshot_id: str = None,
configuration: Configuration = None,
secrets: Secrets = None,
) -> List[AWSResponse]:
"""
Deletes one or more cache clusters and creates a final snapshot
Parameters:
cluster_ids: list: a list of one or more cache cluster ids
final_snapshot_id: str: an identifier to give the final snapshot
"""
client = aws_client("elasticache", configuration, secrets)
cache_clusters = describe_cache_clusters(cluster_ids, client)
results = []
for c in cache_clusters:
logger.debug("Deleting Cache Cluster: %s." % c["CacheClusterId"])
params = dict(CacheClusterId=c["CacheClusterId"])
if final_snapshot_id:
params["FinalSnapshotIdentifier"] = final_snapshot_id
results.append(client.delete_cache_cluster(**params)["CacheCluster"])
return results
| 5,350,383 |
def define_styleGenerator(content_nc: int, style_nc: int, n_c: int, n_blocks=4, norm='instance', use_dropout=False, padding_type='zero', cbam=False, gpu_ids=[]):
"""
This ResNet applies the encoded style from the style tensor onto the given content tensor.
Parameters:
----------
- content_nc (int): number of channels in the content tensor
- style_nc (int): number of channels in the style tensor
- n_c (int): number of channels used inside the network
- n_blocks (int): number of Resnet blocks
- norm_layer: normalization layer
- use_dropout: (boolean): if use dropout layers
- padding_type (str): the name of padding layer in conv layers: reflect | replicate | zero
- cbam (boolean): If true, use the Convolution Block Attention Module
- gpu_ids: [int]: GPU ids available to this network. Default = []
"""
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
styleGenerator = StyleGenerator(content_nc, style_nc, n_c, n_blocks=n_blocks, norm_layer=norm_layer, use_dropout=use_dropout, padding_type=padding_type, cbam=False)
init_weights(styleGenerator, "kaiming", activation='leaky_relu')
if len(gpu_ids):
return nn.DataParallel(styleGenerator, device_ids=gpu_ids)
else:
return styleGenerator
| 5,350,384 |
def answer():
"""
answer
"""
# logger
M_LOG.info("answer")
if "answer" == flask.request.form["type"]:
# save answer
gdct_data["answer"] = {"id": flask.request.form["id"],
"type": flask.request.form["type"],
"sdp": flask.request.form["sdp"]}
# return ok
return flask.Response(status=200)
# return
return flask.Response(status=400)
| 5,350,385 |
def load_requirements():
""" Helps to avoid storing requirements in more than one file"""
reqs = parse_requirements('requirements-to-freeze.txt', session=False)
reqs_list = [str(ir.req) for ir in reqs]
return reqs_list
| 5,350,386 |
def assert_is_dot_format(dot):
""" Checks that the dot is usable by graphviz. """
# We launch a process calling graphviz to render the dot. If the exit code is not 0 we assume that the syntax
# wasn't good
def run_graph(dot):
""" Runs graphviz to see if the syntax is good. """
graph = AGraph()
graph = graph.from_string(dot)
extension = 'png'
graph.draw(path='output.png', prog='dot', format=extension)
sys.exit(0)
p = Process(target=run_graph, args=(dot,))
p.start()
p.join()
assert p.exitcode == 0
| 5,350,387 |
def discover(type=None, regex=None, paths=None):
"""Find and return available plug-ins
This function looks for files within paths registered via
:func:`register_plugin_path` and those added to `PYBLISHPLUGINPATH`.
It determines *type* - :class:`Selector`, :class:`Validator`,
:class:`Extractor` or :class:`Conform` - based on whether it
matches it's corresponding regular expression; e.g.
"$validator_*^" for plug-ins of type Validator.
Arguments:
type (str, optional): !DEPRECATED! Only return plugins of
specified type. E.g. validators, extractors. In None is specified,
return all plugins. Available options are "selectors", validators",
"extractors", "conformers", "collectors" and "integrators".
regex (str, optional): Limit results to those matching `regex`.
Matching is done on classes, as opposed to
filenames, due to a file possibly hosting
multiple plugins.
paths (list, optional): Paths to discover plug-ins from.
If no paths are provided, all paths are searched.
"""
if type is not None:
warnings.warn("type argument has been deprecated and does nothing")
if regex is not None:
warnings.warn("discover(): regex argument "
"has been deprecated and does nothing")
plugins = dict()
# Include plug-ins from registered paths
for path in paths or plugin_paths():
path = os.path.normpath(path)
if not os.path.isdir(path):
continue
for fname in os.listdir(path):
if fname.startswith("_"):
continue
abspath = os.path.join(path, fname)
if not os.path.isfile(abspath):
continue
mod_name, mod_ext = os.path.splitext(fname)
if not mod_ext == ".py":
continue
module = types.ModuleType(mod_name)
module.__file__ = abspath
try:
with open(abspath) as f:
six.exec_(f.read(), module.__dict__)
# Store reference to original module, to avoid
# garbage collection from collecting it's global
# imports, such as `import os`.
sys.modules[abspath] = module
except Exception as err:
log.debug("Skipped: \"%s\" (%s)", mod_name, err)
continue
for plugin in plugins_from_module(module):
if plugin.__name__ in plugins:
log.debug("Duplicate plug-in found: %s", plugin)
continue
plugin.__module__ = module.__file__
plugins[plugin.__name__] = plugin
# Include plug-ins from registration.
# Directly registered plug-ins take precedence.
for plugin in registered_plugins():
if plugin.__name__ in plugins:
log.debug("Duplicate plug-in found: %s", plugin)
continue
plugins[plugin.__name__] = plugin
plugins = list(plugins.values())
sort(plugins) # In-place
return plugins
| 5,350,388 |
def create_index(column_names, unique=False):
"""
Create a new index of the columns in column_names, where column_names is
a list of strings. If unique is True, it will be a
unique index.
"""
connection = _State.connection()
_State.reflect_metadata()
table_name = _State.table.name
table = _State.table
index_name = re.sub(r'[^a-zA-Z0-9]', '', table_name) + '_'
index_name += '_'.join(re.sub(r'[^a-zA-Z0-9]', '', x)
for x in column_names)
if unique:
index_name += '_unique'
columns = []
for column_name in column_names:
columns.append(table.columns[column_name])
current_indices = [x.name for x in table.indexes]
index = sqlalchemy.schema.Index(index_name, *columns, unique=unique)
if index.name not in current_indices:
index.create(bind=_State.engine)
| 5,350,389 |
def next_permutation(a):
"""Generate the lexicographically next permutation inplace.
https://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
Return false if there is no next permutation.
"""
# Find the largest index i such that a[i] < a[i + 1]. If no such
# index exists, the permutation is the last permutation
for i in reversed(range(len(a) - 1)):
if a[i] < a[i + 1]:
break # found
else: # no break: not found
a.reverse()
return False # no next permutation
# Find the largest index j greater than i such that a[i] < a[j]
j = next(j for j in reversed(range(i + 1, len(a))) if a[i] < a[j])
# Swap the value of a[i] with that of a[j]
a[i], a[j] = a[j], a[i]
# Reverse sequence from a[i + 1] up to and including the final element a[n]
a[i + 1:] = reversed(a[i + 1:])
return True
| 5,350,390 |
def normalize(v):
"""
Calculate normalized vector
:param v: input vector
:return: normalized vector
"""
from numpy.linalg import norm
return v/norm(v)
| 5,350,391 |
def replace_hyphen_by_romaji(text):
"""
長音「ー」などを仮名に置換する。
"""
# error check
if len(text) < 2:
return ""
while "-" in list(text) or "~" in list(text):
text_ = text
if (text[0] == "-" or text[0] == "~") and len(text) >= 2:
text = text[2:]
continue
text = re.sub(r"(?P<vowel>[aeiou])[-~][-~]", r"\g<vowel>x\g<vowel>", text) # "-" を 2文字
text = re.sub(r"A[-~][-~]", r"Axa", text)
text = re.sub(r"E[-~][-~]", r"Exe", text)
text = re.sub(r"O[-~][-~]", r"Oxo", text)
text = re.sub(r"U[-~][-~]", r"Uxu", text)
if text_ == text:
break # 変化しなかったら終わり
return text
| 5,350,392 |
def data_source_iterator(config: Dict[str, Any]) -> Iterable[DataFrame]:
"""Load all data tables defined by the provided config file."""
for source in config["sources"]:
try:
yield read_data(config["schema"], source["state"], source["url"])
print(f"Data successfully downloaded for {source['state']}: {source['url']}")
except:
log_msg = f"Failed to process data for {source['state']}: {source['url']}"
logging.error(log_msg, exc_info=True)
| 5,350,393 |
def aucroc_ic50(df,threshold=500):
"""
Compute AUC ROC for predictions and targets in DataFrame, based on a given threshold
Parameters
----------
df : pandas.DataFrame with predictons in column "preds" and targets in column "targs" in nM
threshold: float, binding affinty threshold for binders in nM
Returns
--------
numpy.nan or float
"""
df =df[~df["preds"].isnull()]
is_binder = df["targs"] >= threshold
if is_binder.mean()==1.0 or is_binder.mean()==0.0 or np.isnan(is_binder.mean()):
return np.nan
else:
return roc_auc_score(1.0*is_binder,df["preds"])
| 5,350,394 |
def infer_getattr(node, context=None):
"""Understand getattr calls
If one of the arguments is an Uninferable object, then the
result will be an Uninferable object. Otherwise, the normal attribute
lookup will be done.
"""
obj, attr = _infer_getattr_args(node, context)
if (
obj is util.Uninferable
or attr is util.Uninferable
or not hasattr(obj, "igetattr")
):
return util.Uninferable
try:
return next(obj.igetattr(attr, context=context))
except (StopIteration, InferenceError, AttributeInferenceError):
if len(node.args) == 3:
# Try to infer the default and return it instead.
try:
return next(node.args[2].infer(context=context))
except InferenceError as exc:
raise UseInferenceDefault from exc
raise UseInferenceDefault
| 5,350,395 |
def get_plugin(molcapsule: 'PyObject *', plug_no: 'int') -> "PyObject *":
"""get_plugin(molcapsule, plug_no) -> PyObject *"""
return _libpymolfile.get_plugin(molcapsule, plug_no)
| 5,350,396 |
def apply_input(dut, f):
"""Apply DuT input stimulus."""
# start the module
dut.active_i <= 1
# get file size
f_size = f.size()
# iterate over all 64 bit input data words
for i in range(f_size*8/BIT_WIDTH_INPUT):
# wait for fifo to become not full
while True:
yield RisingEdge(dut.clk)
if int(dut.fifo_full_o) == 0:
break
# read 64 bit input data from file
data = f.read_reverse_byte_order(i*BIT_WIDTH_INPUT/8,
BIT_WIDTH_INPUT/8)
# apply data to fifo
dut.fifo_din_i <= data
dut.fifo_wr_en_i <= 1
yield RisingEdge(dut.clk)
dut.fifo_wr_en_i <= 0
# wait until there are less than 256 entries in the FIFO
while True:
yield RisingEdge(dut.clk)
if int(dut.fifo_prog_empty):
break
# stop the module
dut.active_i <= 0
# wait for the module to become deactivated
while True:
yield RisingEdge(dut.clk)
if int(dut.active_o) == 0:
break
if int(dut.fifo_rd_data_count) > 0:
# flush the remaining data words from FIFO to ring buffer
dut.flush_i <= 1
yield RisingEdge(dut.clk)
dut.flush_i <= 0
yield RisingEdge(dut.clk)
# wait for module to become deactivated again
while True:
yield RisingEdge(dut.clk)
if int(dut.active_o) == 0:
break
# fifo must now be empty
assert int(dut.fifo_rd_data_count) == 0
| 5,350,397 |
def fake_redis_con():
"""
Purpose:
Create Fake Redis Connection To Test With
Args:
N/A
Return:
fake_redis_con (Pytest Fixture (FakeRedis Connection Obj)): Fake redis connection
that simulates redis functionality for testing
"""
return fakeredis.FakeStrictRedis()
| 5,350,398 |
def verbose(function, *args, **kwargs):
"""Improved verbose decorator to allow functions to override log-level
Do not call this directly to set global verbosrity level, instead use
set_log_level().
Parameters
----------
function - function
Function to be decorated to allow for overriding global verbosity
level
Returns
-------
dec - function
The decorated function
"""
try:
arg_names = [parameter.name for parameter in
inspect.signature(function).parameters.values() if
(parameter.kind == parameter.POSITIONAL_OR_KEYWORD)]
except:
arg_names = inspect.getargspec(function).args
if len(arg_names) > 0 and arg_names[0] == 'self':
default_level = getattr(args[0], 'verbose', None)
else:
default_level = None
if('verbose' in arg_names):
verbose_level = args[arg_names.index('verbose')]
else:
verbose_level = default_level
if verbose_level is not None:
old_level = set_log_level(verbose_level, True)
# set it back if we get an exception
try:
ret = function(*args, **kwargs)
except:
set_log_level(old_level)
raise
set_log_level(old_level)
return ret
else:
ret = function(*args, **kwargs)
return ret
| 5,350,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.