content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def lambda_handler(event, context):
"""
Listens to SQS events fired when new data files are added to the data
bucket's data directory. If the updated key matches a set of criteria,
converts the raw data file into an index and writes to an output S3 bucket.
As per the default configuration, the bucket must be
- the hashing data bucket eg.
dipanjanm-hashing-data20210224213427723700000003
- the key name must be threat_exchange_data/pdq.te
Which means adding new versions of the datasets will not have an effect. You
must add the exact pdq.te file.
"""
if not was_pdq_data_updated(event):
logger.info("PDQ Data Not Updated, skipping")
return
logger.info("PDQ Data Updated, updating pdq hash index")
logger.info("Retreiving PDQ Data from S3")
with metrics.timer(metrics.names.pdq_indexer_lambda.download_datafile):
pdq_data_file = s3_client.get_object(
Bucket=THREAT_EXCHANGE_DATA_BUCKET_NAME, Key=THREAT_EXCHANGE_PDQ_DATA_KEY
)
with metrics.timer(metrics.names.pdq_indexer_lambda.parse_datafile):
pdq_data_reader = csv.DictReader(
codecs.getreader("utf-8")(pdq_data_file["Body"]),
fieldnames=PDQ_DATA_FILE_COLUMNS,
)
pdq_data = [
(
row["hash"],
# Also add hash to metadata for easy look up on match
{
"id": int(row["id"]),
"hash": row["hash"],
},
)
for row in pdq_data_reader
]
with metrics.timer(metrics.names.pdq_indexer_lambda.build_index):
logger.info("Creating PDQ Hash Index")
index = PDQIndex.build(pdq_data)
logger.info("Putting index in S3")
index_bytes = pickle.dumps(index)
with metrics.timer(metrics.names.pdq_indexer_lambda.upload_index):
s3_client.put_object(
Bucket=INDEXES_BUCKET_NAME, Key=PDQ_INDEX_KEY, Body=index_bytes
)
logger.info("Index update complete")
metrics.flush()
| 5,349,800 |
def pinghost(host):
"""
Ping target with a 1-second timeout limit
:param str host: Destination to reach. IP address or domain name
:returns: True if reached, otherwise False
"""
host = str(host).split(':')[0] # leave off the port if exists
# print "Pinging"
if os.name == 'posix':
target = "ping -W1 -c 1 " + host + " > /dev/null 2>&1 "
else:
target = "ping " + host + " -w 1000 -n 1 > nul 2>&1"
response = os.system(target)
# Note:original response is 1 for fail; 0 for success; so we flip it
return not response
| 5,349,801 |
def get_trade_factors(name: str,
mp: float,
allow_zero: bool,
long_open_values: List,
long_close_values: List,
short_open_values: List = None,
short_close_values: List = None) -> dict:
"""获取指定 name 下的交易因子
:param allow_zero: 是否使用基础型
:param name: 因子系统的名称
:param mp: 单个标的最大允许持仓,小于0表示仓位百分比,大于0表示手数
:param long_open_values: 开多因子值
:param long_close_values: 平多因子值
:param short_open_values: 开空因子值
:param short_close_values: 平空因子值
:return: 因子交易系统
example:
===================
>>> factors = get_trade_factors(name="日线笔结束", long_open_values=['BDE'], long_close_values=['BUE'])
"""
if not short_close_values:
short_close_values = []
if not short_open_values:
short_open_values = []
def __is_match(v, x):
if allow_zero:
if v in x.name:
return 1
else:
return 0
else:
if v in x.name and "0" not in x.name:
return 1
else:
return 0
long_open_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in long_open_values]) > 0]
long_close_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in long_close_values]) > 0]
short_open_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in short_open_values]) > 0]
short_close_factors = ["{}@{}".format(name, x.value) for x in Factors.__members__.values()
if sum([__is_match(v, x) for v in short_close_values]) > 0]
factors_ = {
"name": name,
"version": factors_all[name].__name__,
"mp": mp,
"long_open_factors": long_open_factors,
"long_close_factors": long_close_factors,
"short_open_factors": short_open_factors,
"short_close_factors": short_close_factors,
}
return factors_
| 5,349,802 |
def mbstrlen(src):
"""Return the 'src' string (Multibytes ASCII string) length.
:param src: the source string
"""
try:
return len(src.decode("utf8", errors = "replace"))
except Exception, err:
LOG.error("String convert issue %s", err)
return len(src)
| 5,349,803 |
def activate(paramOverrides={}, modelOverride=None, filename=None):
"""Activate the veneer when beginning to compile a Scenic module."""
global activity, _globalParameters, lockedParameters, lockedModel, currentScenario
if paramOverrides or modelOverride:
assert activity == 0
_globalParameters.update(paramOverrides)
lockedParameters = set(paramOverrides)
lockedModel = modelOverride
activity += 1
assert not evaluatingRequirement
assert not evaluatingGuard
assert currentSimulation is None
newScenario = DynamicScenario._dummy(filename) # placeholder scenario for top-level code
scenarioStack.append(newScenario)
currentScenario = newScenario
| 5,349,804 |
def test_get_version_with_absolute_path_to_root(mock_open_fix, mock_is_dir_true,
mock_is_path_true, versioner):
"""Tests if version number is returned when absolute path to
projects root is given and default VERSION.txt is used as a file."""
assert versioner.get_version(
root="/home/tomasz_kluczkowski/Dev/versioner", prompt=False) == "1.02"
| 5,349,805 |
def test_dimmer_turn_off(mock_openzwave):
"""Test turning off a dimmable Z-Wave light."""
node = MockNode()
value = MockValue(data=46, node=node)
values = MockLightValues(primary=value)
device = zwave.get_device(node=node, values=values, node_config={})
device.turn_off()
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 0
| 5,349,806 |
def adapted_border_postprocessing(border_prediction, cell_prediction):
"""
:param border_prediction:
:param cell_prediction:
:return:
"""
prediction_border_bin = np.argmax(border_prediction, axis=-1)
cell_prediction = cell_prediction > 0.5
seeds = border_prediction[:, :, 1] * (1 - border_prediction[:, :, 2]) > 0.5 # Substract borders from cell seed
seeds = measure.label(seeds, background=0)
prediction_instance = watershed(image=cell_prediction,
markers=seeds,
mask=cell_prediction,
watershed_line=False,
)
prediction_instance = measure.label(prediction_instance, background=0)
colors = get_colors()
prediction_instance_rgb = label2rgb(prediction_instance, colors=colors, kind='overlay', bg_label=0)
prediction_instance = np.expand_dims(prediction_instance, axis=-1)
prediction_border_bin = np.expand_dims(prediction_border_bin, axis=-1)
return prediction_instance.astype(np.uint16), prediction_instance_rgb.astype(np.uint8), prediction_border_bin.astype(np.uint8)
| 5,349,807 |
def calc_bonding_volume(rc_klab, dij_bar, rd_klab=None, reduction_ratio=0.25):
"""
Calculate the association site bonding volume matrix
Dimensions of (ncomp, ncomp, nbeads, nbeads, nsite, nsite)
Parameters
----------
rc_klab : numpy.ndarray
This matrix of cutoff distances for association sites for each site type in each group type
dij_bar : numpy.ndarray
Component averaged hard sphere diameter
rd_klab : numpy.ndarray, Optional, default=None
Position of association site in each group (nbead, nbead, nsite, nsite)
reduction_ratio : float, Optional, default=0.25
Reduced distance of the sites from the center of the sphere of interaction. This value is used when site position, rd_klab is None
Returns
-------
Kijklab : numpy.ndarray
Matrix of binding volumes
"""
ncomp = len(dij_bar)
nbead, _, nsite, _ = np.shape(rc_klab)
Kijklab = np.zeros((ncomp, ncomp, nbead, nbead, nsite, nsite))
for i in range(ncomp):
for j in range(ncomp):
for k in range(nbead):
for l in range(nbead):
for a in range(nsite):
for b in range(nsite):
if rc_klab[k, l, a, b] != 0:
if rd_klab == None:
rd = reduction_ratio * dij_bar[i, j]
else:
rd = rd_klab[k, l, a, b]
tmp0 = np.pi * dij_bar[i, j] ** 2 / (18 * rd ** 2)
tmp11 = np.log(
(rc_klab[k, l, a, b] + 2 * rd) / dij_bar[i, j]
)
tmp12 = (
6 * rc_klab[k, l, a, b] ** 3
+ 18 * rc_klab[k, l, a, b] ** 2 * rd
- 24 * rd ** 3
)
tmp21 = rc_klab[k, l, a, b] + 2 * rd - dij_bar[i, j]
tmp22 = (
22 * rd ** 2
- 5 * rd * rc_klab[k, l, a, b]
- 7 * rd * dij_bar[i, j]
- 8 * rc_klab[k, l, a, b] ** 2
+ rc_klab[k, l, a, b] * dij_bar[i, j]
+ dij_bar[i, j] ** 2
)
Kijklab[i, j, k, l, a, b] = tmp0 * (
tmp11 * tmp12 + tmp21 * tmp22
)
return Kijklab
| 5,349,808 |
def scale_enum(anchor, scales):
"""Enumerate a set of anchors for each scale wrt an anchor.
"""
w_w, h_h, x_ctr, y_ctr = genwhctrs(anchor)
w_s = w_w * scales
h_s = h_h * scales
anchors = makeanchors(w_s, h_s, x_ctr, y_ctr)
return anchors
| 5,349,809 |
def L1():
"""
Graph for computing 'L1'.
"""
graph = beamline(scatter=True)
for node in ['scattered_beam', 'two_theta', 'L2', 'Ltotal']:
del graph[node]
return graph
| 5,349,810 |
def _highlight_scoring(
original_example, subset_adversarial_result, adversarial_span_dict
):
"""
Calculate the highlighting score using classification results of adversarial examples
:param original_example:
:param subset_adversarial_result:
:param adversarial_span_dict:
"""
original_utterance = " ".join(nltk.word_tokenize(original_example[1]))
original_idx = original_example[0]
original_intent = original_example[3]
original_confidence = original_example[4]
original_position = original_example[6]
tokens = original_utterance.split(" ")
highlight = np.zeros(len(tokens), dtype="float32")
for idx in range(len(subset_adversarial_result)):
adversarial_example = subset_adversarial_result.iloc[idx]
if not adversarial_example["top_predicts"]:
continue
predict_dict = dict()
predict_intent_list = list()
for prediction in adversarial_example["top_predicts"]:
predict_dict[prediction["intent"]] = prediction["confidence"]
predict_intent_list.append(prediction["intent"])
if original_intent in predict_dict:
adversarial_position = list(predict_dict.keys()).index(original_intent)
adversarial_confidence = predict_dict[original_intent]
else:
adversarial_position = len(list(predict_dict.keys()))
adversarial_confidence = 0
start, end = adversarial_span_dict[
adversarial_example["utterance"] + "_" + str(original_idx)
]
highlight = _scoring_function(
highlight,
original_position,
adversarial_position,
original_confidence,
adversarial_confidence,
start,
end,
)
return highlight
| 5,349,811 |
def aniquilar(base, eliminaciones):
"""
Eliminar todas las claves (anidado) mencionadas en ''eliminaciones'', de
``base``.
.. versionadded:: 1.0
"""
for clave, valor in six.iteritems(eliminaciones):
if isinstance(valor, dict):
# NOTE: no probar si la clave [clave] existe; si algo está
# listado en una estructura de eliminaciones, debe existir en
# alguna fuente en algún lugar y, por lo tanto, también en la
# caché que se borra.
aniquilar(base[clave], eliminaciones[clave])
else: # implicitly None
del base[clave]
| 5,349,812 |
def server_hello(cmd, response):
"""Test command
"""
return response
| 5,349,813 |
def init_db():
"""Initialise DB"""
clear_data()
add_admin_user()
add_settings()
add_forecast_areas()
add_regular_railway_data()
add_rapid_railway_data()
add_bullet_railway_data()
db.session.commit()
| 5,349,814 |
def applyRigidAlignment(outDir, refFile, inDataListSeg, inDataListImg=[], icp_iterations=200):
"""
This function takes in a filelists(binary and raw) and makes the
size and spacing the same as the reference
"""
isoValue = 1e-20
antialias_iterations = 30
print("\n############# Rigidly Align #############")
# create output dirs
segoutDir = os.path.join(outDir, 'segmentations') if inDataListImg else outDir
if not os.path.exists(segoutDir):
os.makedirs(segoutDir)
if inDataListImg:
rawoutDir = os.path.join(outDir, 'images')
if not os.path.exists(rawoutDir):
os.makedirs(rawoutDir)
# apply rigid alignment
outSegDataList = []
outRawDataList = []
# get reference image
refImg = Image(refFile)
refImg.antialias(antialias_iterations)
for i in range(len(inDataListSeg)):
segoutname = rename(inDataListSeg[i], segoutDir, 'aligned')
outSegDataList.append(segoutname)
if inDataListImg:
rawoutname = rename(inDataListImg[i], rawoutDir, 'aligned')
outRawDataList.append(rawoutname)
# resize images to reference images
img = Image(inDataListSeg[i])
img.antialias(antialias_iterations)
rigidTransform = img.createTransform(refImg, TransformType.IterativeClosestPoint, isoValue, icp_iterations)
img.applyTransform(rigidTransform, refImg.origin(), refImg.dims(), refImg.spacing(), refImg.coordsys(), InterpolationType.Linear).binarize().write(segoutname)
if inDataListImg:
img = Image(inDataListImg[i])
img.applyTransform(rigidTransform, refImg.origin(), refImg.dims(), refImg.spacing(), refImg.coordsys(), InterpolationType.Linear).write(rawoutname)
return [outSegDataList, outRawDataList] if inDataListImg else outSegDataList
| 5,349,815 |
def test_vectorised_likelihood_not_vectorised_error(model, error):
"""
Assert the value is False if the likelihood is not vectorised and raises
an error.
"""
def dummy_likelihood(x):
if hasattr(x, '__len__'):
raise error
else:
return np.log(np.random.rand())
model._vectorised_likelihood = None
model.log_likelihood = MagicMock(side_effect=dummy_likelihood)
model.new_point = MagicMock(return_value=np.random.rand(10))
out = Model.vectorised_likelihood.__get__(model)
assert model._vectorised_likelihood is False
assert out is False
| 5,349,816 |
def display_timestamps_pair(time_m_2):
"""Takes a list of the following form: [(a1, b1), (a2, b2), ...] and
returns a string (a_mean+/-a_error, b_mean+/-b_error).
"""
if len(time_m_2) == 0:
return '(empty)'
time_m_2 = np.array(time_m_2)
return '({}, {})'.format(
display_timestamps(time_m_2[:, 0]),
display_timestamps(time_m_2[:, 1]),
)
| 5,349,817 |
def get_legendre(degree, length):
"""
Producesthe Legendre polynomials of order `degree`.
Parameters
----------
degree : int
Highest order desired.
length : int
Number of samples of the polynomials.
Returns
-------
legendre : np.ndarray
A `degree`*`length` array with all the polynomials up to order `degree`
"""
def _bonnet(d, x):
if(d == 0):
return np.ones_like(x)
elif(d == 1):
return x
else:
return ((2*d-1)*x*_bonnet(d-1, x)-(d-1)*_bonnet(d-2, x))/d
x = np.linspace(-1, 1, length)
legendre = np.empty([length, degree+1])
for n in range(degree+1):
legendre[:, n] = _bonnet(n, x)
return legendre
| 5,349,818 |
def forbid_start():
"""Pseudo context manager to forbid BUI to start during the with clause."""
global FORBID_START
FORBID_START = True
try:
yield None
finally:
FORBID_START = False
| 5,349,819 |
def _get_table_names():
"""Gets an alphabetically ordered list of table names from facet_fields.csv.
Table names are fully qualified: <project id>:<dataset id>:<table name>
"""
config_path = os.path.join(app.app.config['DATASET_CONFIG_DIR'],
'bigquery.json')
table_names = _parse_json_file(config_path)['table_names']
table_names.sort()
return table_names
| 5,349,820 |
def openmm_simulate_amber_explicit(
pdb_file,
top_file=None,
check_point=None,
GPU_index=0,
output_traj="output.dcd",
output_log="output.log",
output_cm=None,
report_time=10*u.picoseconds,
sim_time=10*u.nanoseconds,
reeval_time=None,
):
"""
Start and run an OpenMM NPT simulation with Langevin integrator at 2 fs
time step and 300 K. The cutoff distance for nonbonded interactions were
set at 1.0 nm, which commonly used along with Amber force field. Long-range
nonbonded interactions were handled with PME.
Parameters
----------
top_file : topology file (.top, .prmtop, ...)
This is the topology file discribe all the interactions within the MD
system.
pdb_file : coordinates file (.gro, .pdb, ...)
This is the molecule configuration file contains all the atom position
and PBC (periodic boundary condition) box in the system.
GPU_index : Int or Str
The device # of GPU to use for running the simulation. Use Strings, '0,1'
for example, to use more than 1 GPU
output_traj : the trajectory file (.dcd)
This is the file stores all the coordinates information of the MD
simulation results.
output_log : the log file (.log)
This file stores the MD simulation status, such as steps, time, potential
energy, temperature, speed, etc.
report_time : 10 ps
The program writes its information to the output every 10 ps by default
sim_time : 10 ns
The timespan of the simulation trajectory
"""
# set up save dir for simulation results
work_dir = os.getcwd()
time_label = int(time.time())
omm_path = create_md_path(time_label)
print(f"Running simulation at {omm_path}")
# setting up running path
os.chdir(omm_path)
top = pmd.load_file(top_file, xyz = pdb_file)
system = top.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=1*u.nanometer,
constraints=app.HBonds)
dt = 0.002*u.picoseconds
integrator = omm.LangevinIntegrator(300*u.kelvin, 1/u.picosecond, dt)
system.addForce(omm.MonteCarloBarostat(1*u.bar, 300*u.kelvin))
try:
platform = omm.Platform_getPlatformByName("CUDA")
properties = {'DeviceIndex': str(GPU_index), 'CudaPrecision': 'mixed'}
except Exception:
platform = omm.Platform_getPlatformByName("OpenCL")
properties = {'DeviceIndex': str(GPU_index)}
simulation = app.Simulation(top.topology, system, integrator, platform, properties)
# simulation.context.setPositions(top.positions)
if top.get_coordinates().shape[0] == 1:
simulation.context.setPositions(top.positions)
shutil.copy2(pdb_file, './')
else:
positions = random.choice(top.get_coordinates())
simulation.context.setPositions(positions/10)
#parmed \AA to OpenMM nm
top.write_pdb('start.pdb', coordinates=positions)
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(300*u.kelvin, random.randint(1, 10000))
simulation.step(int(100*u.picoseconds / (2*u.femtoseconds)))
report_freq = int(report_time/dt)
simulation.reporters.append(app.DCDReporter(output_traj, report_freq))
if output_cm:
simulation.reporters.append(ContactMapReporter(output_cm, report_freq))
simulation.reporters.append(app.StateDataReporter(output_log,
report_freq, step=True, time=True, speed=True,
potentialEnergy=True, temperature=True, totalEnergy=True))
simulation.reporters.append(app.CheckpointReporter('checkpnt.chk', report_freq))
if check_point:
simulation.loadCheckpoint(check_point)
nsteps = int(sim_time/dt)
simulation.step(nsteps)
if reeval_time:
nsteps = int(reeval_time/dt)
niter = int(sim_time/reeval_time)
for i in range(niter):
if os.path.exists('../halt'):
return
elif os.path.exists('new_pdb'):
print("Found new.pdb, starting new sim...")
# cleaning up old runs
del simulation
# starting new simulation with new pdb
with open('new_pdb', 'r') as fp:
new_pdb = fp.read().split()[0]
os.chdir(work_dir)
openmm_simulate_amber_explicit(
new_pdb, top_file=top_file,
check_point=None,
GPU_index=GPU_index,
output_traj=output_traj,
output_log=output_log,
output_cm=output_cm,
report_time=report_time,
sim_time=sim_time,
reeval_time=reeval_time,
)
else:
simulation.step(nsteps)
else:
nsteps = int(sim_time/dt)
simulation.step(nsteps)
os.chdir(work_dir)
if not os.path.exists('../halt'):
openmm_simulate_amber_explicit(
pdb_file, top_file=top_file,
check_point=None,
GPU_index=GPU_index,
output_traj=output_traj,
output_log=output_log,
output_cm=output_cm,
report_time=report_time,
sim_time=sim_time,
reeval_time=reeval_time,
)
else:
return
| 5,349,821 |
def _get_controller_of(pod):
"""Get a pod's controller's reference.
This uses the pod's metadata, so there is no guarantee that
the controller object reference returned actually corresponds to a
controller object in the Kubernetes API.
Args:
- pod: kubernetes pod object
Returns: the reference to a controller object
"""
if pod["metadata"].get("ownerReferences"):
for owner_ref in pod["metadata"]["ownerReferences"]:
if owner_ref.get("controller"):
return owner_ref
return None
| 5,349,822 |
def proxify_device_objects(
obj: Any,
proxied_id_to_proxy: Dict[int, ProxyObject],
found_proxies: List[ProxyObject],
):
""" Wrap device objects in ProxyObject
Search through `obj` and wraps all CUDA device objects in ProxyObject.
It uses `proxied_id_to_proxy` to make sure that identical CUDA device
objects found in `obj` are wrapped by the same ProxyObject.
Parameters
----------
obj: Any
Object to search through or wrap in a ProxyObject.
proxied_id_to_proxy: Dict[int, ProxyObject]
Dict mapping the id() of proxied objects (CUDA device objects) to
their proxy and is updated with all new proxied objects found in `obj`.
found_proxies: List[ProxyObject]
List of found proxies in `obj`. Notice, this includes all proxies found,
including those already in `proxied_id_to_proxy`.
Returns
-------
ret: Any
A copy of `obj` where all CUDA device objects are wrapped in ProxyObject
"""
return dispatch(obj, proxied_id_to_proxy, found_proxies)
| 5,349,823 |
def draw_triangles_1(length):
"""
Draw a red triangle
:param length: Length of 1 side of triangle
:pre: pos:lower left, turtle down , facing east
:post: pos:lower left, turtle down , facing east
:return: None
"""
turtle.pencolor('red')
for _ in range(3):
turtle.forward(length)
turtle.left(120)
| 5,349,824 |
def monotonicity(x, rounding_precision = 3):
"""Calculates monotonicity metric of a value of[0-1] for a given array.\nFor an array of length n, monotonicity is calculated as follows:\nmonotonicity=abs[(num. positive gradients)/(n-1)-(num. negative gradients)/(n-1)]."""
n = x.shape[0]
grad = np.gradient(x)
pos_grad = np.sum(grad>0)
neg_grad = np.sum(grad<0)
monotonicity = np.abs( pos_grad/(n-1) - neg_grad/(n-1) )
return np.round(monotonicity, rounding_precision)
| 5,349,825 |
def get_cluster_cids():
"""return list of CIDs with pin types"""
output = subprocess.check_output([
'docker-compose', 'exec', '-T', 'cluster', 'ipfs-cluster-ctl', 'pin',
'ls'
])
return [
'-'.join([l.split()[0], l.split()[-1].lower()])
for l in output.decode('utf-8').splitlines()
]
| 5,349,826 |
def download(homework, version="latest", redownload=False):
"""Download data files for the specified datasets. Defaults to downloading latest version on server.
Parameters:
homework (str): The name of the dataset to download data for, or "all" to download data for all datasets
version (str, optional): Which version of the data files to download. Defaults to latest on server.
redownload (bool, optional): Whether to redownload the data files, even if that version of the data is already downloaded. Default False.
Returns:
bool: Indicates whether download was successful.
"""
# Process the optional "all" parameter
if homework == "all":
homeworks = [
"bio462_hw1",
"bio462_hw2",
"bio462_hw3",
"bio462_hw4",
"bio462_hw5",
"bio462_hw6"
]
overall_result = True
for homework in homeworks:
if not download(homework, redownload=redownload):
overall_result = False
return overall_result
# Get our dataset path
homework = homework.lower()
dataset_path = get_dataset_path(homework)
# Update the index
update_index(homework)
# Load the index
index = get_index(homework)
# Validate the version number, including parsing if it's "latest"
version = validate_version(version, homework, use_context="download")
# Construct the path to the directory for this version
version_path = os.path.join(dataset_path, f"{homework}_v{version}")
# See if they've downloaded this version before. Get list of files to download.
version_index = index.get(version)
if os.path.isdir(version_path):
if redownload:
files_to_download = list(version_index.keys())
else:
files_to_download = []
for data_file in version_index.keys():
# Compare the server and local hashes, to make sure there was no data corruption
file_path = os.path.join(version_path, data_file)
if os.path.isfile(file_path):
file_index = version_index.get(data_file)
server_hash = file_index.get("hash")
local_hash = hash_file(file_path)
if local_hash == server_hash:
continue
files_to_download.append(data_file)
if len(files_to_download) == 0:
return True
else:
os.mkdir(version_path)
files_to_download = list(version_index.keys())
# Download the files
password_protected_datasets = [
"gbm",
"lscc"
]
password = None
total_files = len(files_to_download)
for data_file in files_to_download:
if (homework in password_protected_datasets) and (password is None):
password = getpass.getpass(prompt=f'Password for {homework} homework: ') # We manually specify the prompt parameter so it shows up in Jupyter Notebooks
print("\033[F", end='\r') # Use an ANSI escape sequence to move cursor back up to the beginning of the last line, so in the next line we can clear the password prompt
print("\033[K", end='\r') # Use an ANSI escape sequence to print a blank line, to clear the password prompt
file_index = version_index.get(data_file)
server_hash = file_index.get("hash")
file_url = file_index.get("url")
file_path = os.path.join(version_path, data_file)
file_number = files_to_download.index(data_file) + 1
downloaded_path = download_file(file_url, file_path, server_hash, password=password, file_message=f"{homework} v{version} data files", file_number=file_number, total_files=total_files)
while downloaded_path == "wrong_password":
password = getpass.getpass(prompt="Wrong password. Try again: ")
print("\033[F", end='\r') # Use an ANSI escape sequence to move cursor back up to the beginning of the last line, so in the next line we can clear the password prompt
print("\033[K", end='\r') # Use an ANSI escape sequence to print a blank line, to clear the password prompt
downloaded_path = download_file(file_url, file_path, server_hash, password=password, file_message=f"{homework} v{version} data files", file_number=file_number, total_files=total_files)
return True
| 5,349,827 |
def parse_value_namedobject(tt):
"""
<!ELEMENT VALUE.NAMEDOBJECT (CLASS | (INSTANCENAME, INSTANCE))>
"""
check_node(tt, 'VALUE.NAMEDOBJECT')
k = kids(tt)
if len(k) == 1:
object = parse_class(k[0])
elif len(k) == 2:
path = parse_instancename(kids(tt)[0])
object = parse_instance(kids(tt)[1])
object.path = path
else:
raise ParseError('Expecting one or two elements, got %s' %
repr(kids(tt)))
return (name(tt), attrs(tt), object)
| 5,349,828 |
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload Dyson cloud."""
# Nothing needs clean up
return True
| 5,349,829 |
def quat_correct(quat):
""" Converts quaternion to minimize Euclidean distance from previous quaternion (wxyz order) """
for q in range(1, quat.shape[0]):
if np.linalg.norm(quat[q-1] - quat[q], axis=0) > np.linalg.norm(quat[q-1] + quat[q], axis=0):
quat[q] = -quat[q]
return quat
| 5,349,830 |
def vis_bbox(im, dets):
"""Visual debugging of detections."""
for i in range(dets.shape[0]):
bbox = tuple(int(np.round(x)) for x in dets[i, :4])
# class_name = CLASS_NAME[int(dets[i, 4]) - 1]
class_name = ' '
score = 0.99
cv2.rectangle(im, bbox[0:2], bbox[2:4], (0, 204, 0), 2)
cv2.putText(im, '%s: %.3f' % (class_name, score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,
1.0, (0, 0, 255), thickness=1)
return im
| 5,349,831 |
def parse_record1(raw_record):
"""Parse raw record and return it as a set of unique symbols without \n"""
return set(raw_record) - {"\n"}
| 5,349,832 |
def test_config_start_no_api(test_microvm_with_ssh, vm_config_file):
"""Test microvm start when API server thread is disabled."""
test_microvm = test_microvm_with_ssh
log_fifo_path = os.path.join(test_microvm.path, 'log_fifo')
metrics_fifo_path = os.path.join(test_microvm.path, 'metrics_fifo')
log_fifo = log_tools.Fifo(log_fifo_path)
metrics_fifo = log_tools.Fifo(metrics_fifo_path)
test_microvm.create_jailed_resource(log_fifo.path, create_jail=True)
test_microvm.create_jailed_resource(metrics_fifo.path, create_jail=True)
_configure_vm_from_json(test_microvm, vm_config_file)
test_microvm.jailer.extra_args.update({'no-api': None})
test_microvm.spawn()
# Get Firecracker PID so we can check the names of threads.
firecracker_pid = test_microvm.jailer_clone_pid
# Get names of threads in Firecracker.
cmd = 'ps -T --no-headers -p {} | awk \'{{print $5}}\''.format(
firecracker_pid
)
# Retry running 'ps' in case it failed to list the firecracker process
# The regex matches any expression that contains 'firecracker' and does
# not contain 'fc_api'
retry_call(
utils.search_output_from_cmd,
fkwargs={
"cmd": cmd,
"find_regex": re.compile("^(?!.*fc_api)(?:.*)?firecracker",
re.DOTALL)
},
exceptions=RuntimeError,
tries=10,
delay=1)
# Check that microvm was successfully booted.
lines = log_fifo.sequential_reader(1)
assert lines[0].startswith('Running Firecracker')
| 5,349,833 |
def execute_contract_creation(
laser_evm, contract_initialization_code, contract_name=None
) -> Account:
""" Executes a contract creation transaction from all open states"""
# TODO: Resolve circular import between .transaction and ..svm to import LaserEVM here
open_states = laser_evm.open_states[:]
del laser_evm.open_states[:]
new_account = laser_evm.world_state.create_account(
0, concrete_storage=True, dynamic_loader=None
)
if contract_name:
new_account.contract_name = contract_name
for open_world_state in open_states:
next_transaction_id = get_next_transaction_id()
transaction = ContractCreationTransaction(
open_world_state,
BitVec("creator{}".format(next_transaction_id), 256),
next_transaction_id,
new_account,
Disassembly(contract_initialization_code),
[],
BitVec("gas_price{}".format(next_transaction_id), 256),
BitVec("call_value{}".format(next_transaction_id), 256),
BitVec("origin{}".format(next_transaction_id), 256),
CalldataType.SYMBOLIC,
)
_setup_global_state_for_execution(laser_evm, transaction)
laser_evm.exec(True)
return new_account
| 5,349,834 |
def isValid(text):
"""
Returns True if the input is related to the meaning of life.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\byour awesome\b', text, re.IGNORECASE))
| 5,349,835 |
def list_shared_with(uri, async_req=False):
"""Return array sharing policies"""
(namespace, array_name) = split_uri(uri)
api_instance = client.client.array_api
try:
return api_instance.get_array_sharing_policies(
namespace=namespace, array=array_name, async_req=async_req
)
except GenApiException as exc:
raise tiledb_cloud_error.check_exc(exc) from None
| 5,349,836 |
def format_html_data_table(dataframe, list_of_malformed, addLineBreak=False):
"""
Returns the predicted values as the data table
"""
if list_of_malformed:
list_of_malformed = str(list_of_malformed)
else:
list_of_malformed = "None"
# format numeric data into string format
for column_name in dataframe.select_dtypes(include=[np.float]).columns:
dataframe[column_name] = dataframe[column_name].apply(lambda x: "{0:.2f}%".format(x))
return html.Div([html.P("Total Number of Audio Clips : "+ str(dataframe.shape[0]),
style={"color":"white",
'text-decoration':'underline'}),
html.P("Error while prediction: " + list_of_malformed,
style={"color":"white"})] + \
([html.Br()] if addLineBreak else []) + \
[html.Hr(),
dash_table.DataTable(id='datatable-interactivity-predictions',
columns=[{"name": format_label_name(i),
"id": i,
"deletable": True} for i in dataframe.columns],
data=dataframe.to_dict("rows"),
style_header={'backgroundColor': 'rgb(30, 30, 30)',
"fontWeight": "bold",
'border': '1px solid white'},
style_cell={'backgroundColor': 'rgb(50, 50, 50)',
'color': 'white',
'whiteSpace':'normal',
'maxWidth': '240px'},
style_table={"maxHeight":"350px",
"overflowY":"scroll",
"overflowX":"auto"}),
html.Hr()] + \
([html.Br()] if addLineBreak else []))
| 5,349,837 |
def colorDistance(col1, col2):
"""Returns a number between 0 and root(3) stating how similar
two colours are - distance in r,g,b, space. Only used to find
names for things."""
return math.sqrt(
(col1.red - col2.red)**2 +
(col1.green - col2.green)**2 +
(col1.blue - col2.blue)**2
)
| 5,349,838 |
def combine_from_streaming(stream: Iterable[runtime_pb2.Tensor]) -> runtime_pb2.Tensor:
""" Restore a result of split_into_chunks into a single serialized tensor """
stream = iter(stream)
first_chunk = next(stream)
serialized_tensor = runtime_pb2.Tensor()
serialized_tensor.CopyFrom(first_chunk)
buffer_chunks = [first_chunk.buffer]
for tensor_part in stream:
buffer_chunks.append(tensor_part.buffer)
serialized_tensor.buffer = b''.join(buffer_chunks)
return serialized_tensor
| 5,349,839 |
def portageq_envvar(options, out, err):
"""
return configuration defined variables. Use envvar2 instead, this will be removed.
"""
return env_var.function(options, out, err)
| 5,349,840 |
def create_hash_factory(hashfun, complex_types=False, universe_size=None):
"""Create a function to make hash functions
:param hashfun: hash function to use
:type hashfun: callable
:param complex_types: whether hash function supports hashing of complex types,
either through native support or through repr
:type complex_types: bool
:param universe_size: upper limit to hash value
:type universe_size: int, long
:returns: a hash factory
:rtype: callable
"""
def hash_factory(seed):
if complex_types:
if universe_size is None:
fun = lambda x: hashfun(x, seed)
else:
fun = lambda x: hashfun(x, seed) % universe_size
else:
if universe_size is None:
fun = lambda x: hashfun(hashable(x), seed)
else:
fun = lambda x: hashfun(hashable(x), seed) % universe_size
return fun
return hash_factory
| 5,349,841 |
def testParamXMLFile():
"""
@tests:
ParamXMLFile.__init__
ParamXMLFile.load
ParamXMLFile.store
"""
fh = pyopenms.ParamXMLFile()
p = pyopenms.Param()
fh.store(b"test.ini", p)
fh.load(b"test.ini", p)
| 5,349,842 |
def estimate_key(note_info, method="krumhansl", *args, **kwargs):
"""
Estimate key of a piece by comparing the pitch statistics of the
note array to key profiles [2]_, [3]_.
Parameters
----------
note_info : structured array, `Part` or `PerformedPart`
Note information as a `Part` or `PerformedPart` instances or
as a structured array. If it is a structured array, it has to
contain the fields generated by the `note_array` properties
of `Part` or `PerformedPart` objects. If the array contains
onset and duration information of both score and performance,
(e.g., containing both `onset_beat` and `onset_sec`), the score
information will be preferred.
method : {'krumhansl'}
Method for estimating the key. For now 'krumhansl' is the only
supported method.
args, kwargs
Positional and Keyword arguments for the key estimation method
Returns
-------
str
String representing the key name (i.e., Root(alteration)(m if minor)).
See `partitura.utils.key_name_to_fifths_mode` and
`partitura.utils.fifths_mode_to_key_name`.
References
----------
.. [2] Krumhansl, Carol L. (1990) "Cognitive foundations of musical pitch",
Oxford University Press, New York.
.. [3] Temperley, D. (1999) "What's key for key? The Krumhansl-Schmuckler
key-finding algorithm reconsidered". Music Perception. 17(1),
pp. 65--100.
"""
if method not in ("krumhansl",):
raise ValueError('For now the only valid method is "krumhansl"')
if method == "krumhansl":
kid = ks_kid
if "key_profiles" not in kwargs:
kwargs["key_profiles"] = "krumhansl_kessler"
else:
if kwargs["key_profiles"] not in VALID_KEY_PROFILES:
raise ValueError(
"Invalid key_profiles. " 'Valid options are "ks", "cmbs" or "kp"'
)
note_array = ensure_notearray(note_info)
return kid(note_array, *args, **kwargs)
| 5,349,843 |
def print_parsable_dstip(data, srcip, dstip):
"""Returns a parsable data line for the destination data.
:param data: the data source
:type data: dictionary
:param scrip: the source ip
:type srcip: string
:param dstip: the destination ip
:type dstip: string
:return: a line of urls and their hitcount
"""
line = []
for item in header_order:
if item in data[srcip]['targets'][dstip]:
value = data[srcip]['targets'][dstip][item]
elif item == "src":
value = srcip
elif item == "dst":
value = dstip.replace(":", "|")
else:
value = ""
if value != "":
line.append(str(value))
if 'url' in data[srcip]['targets'][dstip]:
line.append(print_parsable_urls(data[srcip]['targets'][dstip]['url']))
line = "|".join(line)
return line
| 5,349,844 |
def OrListSelector(*selectors) -> pyrosetta.rosetta.core.select.residue_selector.OrResidueSelector:
"""
OrResidueSelector but 2+
(not a class, but returns a Or
:param selectors:
:return:
"""
sele = pyrosetta.rosetta.core.select.residue_selector.FalseResidueSelector()
for subsele in selectors:
sele = pyrosetta.rosetta.core.select.residue_selector.OrResidueSelector(subsele, sele)
return sele
| 5,349,845 |
def plot_regress_exog(res, exog_idx, exog_name='', fig=None):
"""Plot regression results against one regressor.
This plots four graphs in a 2 by 2 figure: 'endog versus exog',
'residuals versus exog', 'fitted versus exog' and
'fitted plus residual versus exog'
Parameters
----------
res : result instance
result instance with resid, model.endog and model.exog as attributes
exog_idx : int
index of regressor in exog matrix
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : matplotlib figure instance
Notes
-----
This is currently very simple, no options or varnames yet.
"""
fig = utils.create_mpl_fig(fig)
if exog_name == '':
exog_name = 'variable %d' % exog_idx
#maybe add option for wendog, wexog
#y = res.endog
x1 = res.model.exog[:,exog_idx]
ax = fig.add_subplot(2,2,1)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.model.endog, 'o')
ax.set_title('endog versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,2)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.resid, 'o')
ax.axhline(y=0)
ax.set_title('residuals versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,3)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.fittedvalues, 'o')
ax.set_title('Fitted versus exog', fontsize='small')# + namestr)
ax = fig.add_subplot(2,2,4)
#namestr = ' for %s' % self.name if self.name else ''
ax.plot(x1, res.fittedvalues + res.resid, 'o')
ax.set_title('Fitted plus residuals versus exog', fontsize='small')# + namestr)
fig.suptitle('Regression Plots for %s' % exog_name)
return fig
| 5,349,846 |
def capture_output(output=None):
"""Temporarily redirect stdout into a string buffer."""
if output is None:
output = StringIO()
try:
setup_redirect(output)
yield output
finally:
reset_redirect()
| 5,349,847 |
def get_circles_with_admin_access(account_id: int) -> List[Circle]:
"""
SELECT
management_style,
c_name
FROM (
SELECT
'SELF_ADMIN' AS management_style,
c.management_style AS c_management_style,
c.admin_circle AS c_admin_circle,
c.created_by AS c_created_by,
c.updated_by AS c_updated_by,
c.id AS c_id,
c.created_at AS c_created_at,
c.updated_at AS c_updated_at,
c.name AS c_name,
c.description AS c_description
FROM circle AS c
JOIN circle_member ON c.id = circle_member.circle
WHERE C.management_style = 'SELF_ADMIN' AND circle_member.account = 5
UNION
SELECT
'ADMIN_CIRCLE' AS management_style,
c.management_style AS c_management_style,
c.admin_circle AS c_admin_circle,
c.created_by AS c_created_by,
c.updated_by AS c_updated_by,
c.id AS c_id,
c.created_at AS c_created_at,
c.updated_at AS c_updated_at,
c.name AS c_name,
c.description AS c_description
FROM circle AS ac
JOIN circle AS C ON c.admin_circle = ac.id
JOIN circle_member ON ac.id = circle_member.circle
WHERE c.management_style = 'ADMIN_CIRCLE' AND circle_member.account = 5
) AS anon_1
"""
ac = aliased(Circle, name="ac")
c = aliased(Circle, name="c")
self_admin = db.session.query(c). \
join(c.members). \
filter(CircleMember.account_id == account_id). \
filter(c._management_style == CircleManagementStyle.SELF_ADMIN.name)
admin_circle = db.session.query(c). \
join(ac, c.admin_circle_id == ac.id). \
join(ac.members). \
filter(c._management_style == CircleManagementStyle.ADMIN_CIRCLE.name). \
filter(CircleMember.account_id == account_id)
return self_admin.union(admin_circle).all()
| 5,349,848 |
def combine(arr):
""" makes overlapping sequences 1 sequence """
def first(item):
return item[0]
def second(item):
return item[1]
if len(arr) == 0 or len(arr) == 1:
return arr
sarr = []
for c, val in enumerate(arr):
sarr.append((val[0], val[1], c))
sarr = sorted(sarr, key = second)
sarr = sorted(sarr, key = first)
chains = [[sarr[0][0], sarr[0][1], [sarr[0][2]]]]
for s, e, c in sarr[1:]: #start, end, counter
if s <= chains[-1][1] +1:
chains[-1][1] = max(e, chains[-1][1])
chains[-1][2].append(c)
else:
chains.append([s, e, [c]])
return chains
| 5,349,849 |
def sub_fft(f_fft, g_fft):
"""Substraction of two polynomials (FFT representation)."""
return sub(f_fft, g_fft)
| 5,349,850 |
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy_demo.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names is not None:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
| 5,349,851 |
def create_demo():
"""
Create project cli demo
"""
typer.secho(
"Welcome to flybirds cli. Please enter any information to continue.",
fg=typer.colors.MAGENTA,
)
user_dict = {
'device_id': "127.0.0.1:8200",
'package_name': "ctrip.android.view",
'web_driver_agent': "com.fd.test.WebDriverAgentLib.xctrunner",
'headless': True,
'browser_type': ['chromium']
}
project_name = typer.prompt("Please input your project name>>")
user_dict['project_name'] = project_name
platform_start = "Please input your test platform? "
platform_ending = typer.style("(Android/IOS/Web)", fg=typer.colors.CYAN,
bold=True)
p_message = platform_start + platform_ending
test_platform = typer.prompt(p_message)
if test_platform is None or test_platform.strip().lower() not in [
'android', 'ios', 'web']:
test_platform = 'android'
test_platform = test_platform.strip().lower()
user_dict['test_platform'] = test_platform
if test_platform in ['android', 'ios']:
if test_platform == 'ios':
user_dict['package_name'] = "com.ctrip.inner.wireless"
is_bundle = typer.confirm(
"Do you want to configure your webDriverAgent now?"
"(this step can be skipped)")
if is_bundle:
web_driver_agent = typer.prompt(
"Please input your Bundle ID of"
" webDriverAgent?")
user_dict['web_driver_agent'] = web_driver_agent
else:
typer.secho(
"You can configure your Bundle ID of webDriverAgent later"
" in the project's"
" flybirds_config.json file.", fg=typer.colors.YELLOW)
connect_device = typer.confirm(
"Do you want to configure your deviceId now?"
"(this step can be skipped)")
if connect_device:
device_id = typer.prompt("Please input your deviceId?")
user_dict['device_id'] = device_id
else:
typer.secho(
"You can configure your deviceId later in the project's"
" flybirds_config.json file.", fg=typer.colors.YELLOW)
if_package = typer.confirm(
"Do you want to configure your packageName now?"
"(this step can be skipped)")
if if_package:
package_name = typer.prompt(
"Please input your packageName?(You can use"
" the ADB command to view your package name"
", such as: adb shell pm list packages |"
" findstr 'trip')"
)
user_dict['package_name'] = package_name
else:
typer.secho(
"You can configure your packageName later in the project's"
" flybirds_config.json file.", fg=typer.colors.YELLOW)
if test_platform == 'web':
message_start = "Please enter the number represented by the " \
"browserType you want to test? Multiple browsers " \
"are separated by commas(,)."
ending = typer.style("(1:chromium 2:firefox 3:webkit)",
fg=typer.colors.CYAN, bold=True)
message = message_start + ending
out_index = typer.prompt(message)
index_arr = out_index.strip().split(',')
browser_dict = {
'1': "chromium",
'2': "firefox",
'3': "webkit"
}
browser_types = []
[browser_types.append(browser_dict.get(i)) for i in index_arr if
i in browser_dict.keys()]
# add default value
if len(browser_types) < 1:
browser_types.append('chromium')
user_dict['browser_type'] = browser_types
headless = typer.confirm(
"Do you want to launch browser in headless mode?")
user_dict['headless'] = headless
try:
typer.echo(f"Cloning into '{project_name}'...")
total = 900
with typer.progressbar(length=total, label="Processing") as progress:
demo_path = copy_from_template(progress, user_dict)
typer.secho(
f"Done it! Create Project {project_name} has success!\n"
f"You can find it at: {demo_path}",
fg=typer.colors.MAGENTA,
)
except Exception as e:
typer.secho(
f"Error!! create project {project_name} has error, errMsg: {e}",
fg=typer.colors.MAGENTA,
err=True,
)
| 5,349,852 |
def load_parameters(model_type, parameter_file):
"""
Loads in all parameter values given in a parameter file.
Parameters:
model_type (str):
sets the type of model, which determines the exact parameter set that is needed. Possible values for
the parameter model_type are: 'full model', 'density model' and 'constant force model'.
parameter_file (str):
the path to the parameter file.
Returns:
list: the values of the parameters in the order as specified in the <model_type>_parameter_names lists.
"""
if model_type == 'full model':
parameters = [0.]*len(full_model_parameter_names)
parameters_found = [0]*len(full_model_parameter_names)
parameter_names = full_model_parameter_names
elif model_type == 'density model':
parameters = [0.]*len(density_model_parameter_names)
parameters_found = [0]*len(density_model_parameter_names)
parameter_names = density_model_parameter_names
else:
print("ERROR: the parameter <model_type> has to be given one of the three values: "
"'full model' or 'density model'.")
sys.exit(1)
f = open(parameter_file)
for line in f.readlines():
line_split = line.split()
try:
idx = parameter_names.index(line_split[0])
parameters_found[idx] = 1
if line_split[0] == 'N_h' or line_split[0] == 'N_max':
parameters[idx] = int(line_split[2])
else:
parameters[idx] = float(line_split[2])
except ValueError:
print("WARNING: Parameter {} cannot be interpreted for the model type "
"'{}'!".format(line_split[0], model_type))
f.close()
if 0 in parameters_found:
print("ERROR: Not all necessary parameters for the model type '{}' were defined in the given "
"file!".format(model_type))
sys.exit(1)
return parameters
| 5,349,853 |
def handle_dat_edge(data_all):
"""
把dat_edge个每一条记录的info拆开,然后输出,方便后续的计算
为了简化计算,忽略时间信息,把所有的月份的联系记录汇总起来
"""
def cal_multi_3(string):
s = string.split(',')
month_times = len(s)
df = list(map(lambda x: list(map(eval, x.split(':')[1].split('_'))), s))
times_sum, weight_sum = pd.DataFrame(df).sum().values
return month_times, times_sum, weight_sum
def cal_single_3(string):
times_sum, weight_sum = list(map(eval, string.split(':')[1].split('_')))
return 1, times_sum, weight_sum
length = list(map(len, map(lambda x: x.split(','), data_all['info'])))
dat_edge_single = data_all[np.array(length) == 1]
dat_edge_multi = data_all[np.array(length) > 1]
multi_pre_df = map(cal_multi_3, dat_edge_multi['info'])
multi_feature_3 = pd.DataFrame(list(multi_pre_df), columns=['month_times', 'times_sum', 'weight_sum'])
id_part = dat_edge_multi[['from_id', 'to_id']].reset_index(drop=True)
multi_result = pd.concat([id_part, multi_feature_3], axis=1)
single_pre_df = map(cal_single_3, dat_edge_single['info'])
single_feature_3 = pd.DataFrame(list(single_pre_df), columns=['month_times', 'times_sum', 'weight_sum'])
id_part = dat_edge_single[['from_id', 'to_id']].reset_index(drop=True)
single_result = pd.concat([id_part, single_feature_3], axis=1)
both_result = pd.concat([multi_result, single_result], ignore_index=True)
return both_result
| 5,349,854 |
def quadraric_distortion_scale(distortion_coefficient, r_squared):
"""Calculates a quadratic distortion factor given squared radii.
The distortion factor is 1.0 + `distortion_coefficient` * `r_squared`. When
`distortion_coefficient` is negative (barrel distortion), the distorted radius
is only monotonically increasing only when
`r_squared` < r_squared_max = -1 / (3 * distortion_coefficient).
Args:
distortion_coefficient: A tf.Tensor of a floating point type. The rank can
be from zero (scalar) to r_squared's rank. The shape of
distortion_coefficient will be appended by ones until the rank equals that
of r_squared.
r_squared: A tf.Tensor of a floating point type, containing
(x/z)^2 + (y/z)^2. We use r_squared rather than r to avoid an unnecessary
sqrt, which may introduce gradient singularities. The non-negativity of
r_squared only enforced in debug mode.
Returns:
A tf.Tensor of r_squared's shape, the correction factor that should
multiply the projective coordinates (x/z) and (y/z) to apply the
distortion.
"""
return 1 + distortion_coefficient * r_squared
| 5,349,855 |
def convert_pdf_to_png(source, target):
"""
Wrapper for the ImageMagick convert utility.
:param source: Source file in PDF format.
:type source: str
:param target: Output file in PNG format.
:type target: str
"""
assert os.path.exists(source), "Source file does not exist: %s" % source
command = "convert -flatten -density 300 -fuzz 1% -trim +repage {} {}".format(
source, target)
command_ok = execute_command(command)
if not command_ok:
print("ImageMagick does not seem to be installed \
or is not in the path - not adding any images.")
| 5,349,856 |
def run(executable: str, *args: str):
"""
Run executable using core.process configuration, replacing bin with configured one, appending and prepending args.
"""
command_list = effective_command(executable, *args)
process = subprocess.run(command_list,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if os.name == "nt":
# On windows, there's ANSI code after output that has to be dropped...
try:
eof_index = process.stdout.index(b"\x1b[0m")
process.stdout = process.stdout[:eof_index]
except ValueError:
pass
return process.stdout
| 5,349,857 |
def open_path(path, **kwargs):
"""
Parameters
----------
path: str
window: tuple
e.g. ('1990-01-01','2030-01-01')
kwargs: all other kwargs the particular file might take, see the module for details
Returns
-------
"""
info = _tools.path2info(path)
module = arm_products[info['product']]['module']
out = module.open_path(path, **kwargs)
return out
| 5,349,858 |
def generate_code() -> str:
"""Generates password reset code
:return: Password reset code
:rtype: str
"""
return str(uuid.uuid4())
| 5,349,859 |
def process_file(name, files, url):
"""
Save file to shared folder on server, and return
the name of the file.
"""
def allowed_file(filename):
if "." not in filename:
return False
ext = filename.rsplit(".", 1)[1].lower()
return ext in config.ALLOWED_EXTENSIONS
if name not in files:
flash("No file part found")
return redirect(url)
file = files[name]
if not file.filename:
flash("No file selected")
return redirect(url)
if not allowed_file(file.filename):
flash("File is not allowed")
return redirect(url)
filename = secure_filename(file.filename)
filepath = os.path.join(config.INPUTS_FOLDER, filename)
file.save(filepath)
return filename
| 5,349,860 |
def decode_auth_token(auth_token):
"""
Decodes the auth token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
| 5,349,861 |
def cached(path: str, validate: bool = False):
"""Similar to ``define``, but cache to a file.
:param path:
the path of the cache file to use
:param validate:
if `True`, always execute the function. The loaded result will be
passed to the function, when the cache exists. In that case the
function should return the value to use. If the returned value is
not identical to the loaded value, the cache is updated with the
new value.
Usage::
@cached('./cache/result')
def dataset():
...
return result
or::
@cached('./cache/result', validate=True)
def model(result=None):
if result is not None:
# running to validate ...
return result
"""
def update_cache(result):
print("save cache", path)
with open(path, "wb") as fobj:
pickle.dump(result, fobj)
def load_cache():
print("load cache", path)
with open(path, "rb") as fobj:
return pickle.load(fobj)
def decorator(func):
if os.path.exists(path):
result = load_cache()
if not validate:
return result
else:
print("validate")
new_result = func(result)
if new_result is not result:
update_cache(new_result)
return new_result
else:
print("compute")
result = func()
update_cache(result)
return result
return decorator
| 5,349,862 |
def json(*arguments):
"""
Transform *arguments parameters into JSON.
"""
return ast.Json(*arguments)
| 5,349,863 |
def get_default_volume_size():
"""
:returns int: the default volume size (in bytes) supported by the
backend the acceptance tests are using.
"""
default_volume_size = environ.get("FLOCKER_ACCEPTANCE_DEFAULT_VOLUME_SIZE")
if default_volume_size is None:
raise SkipTest(
"Set acceptance testing default volume size using the " +
"FLOCKER_ACCEPTANCE_DEFAULT_VOLUME_SIZE environment variable.")
return int(default_volume_size)
| 5,349,864 |
def set_global_user(**decorator_kwargs):
"""
Wrap a Flask blueprint view function to set the global user
``flask.g.user`` to an instance of ``CurrentUser``, according to the
information from the JWT in the request headers. The validation will also
set the current token.
This requires a flask application and request context.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
set_current_user(**decorator_kwargs)
return func(*args, **kwargs)
return wrapper
return decorator
| 5,349,865 |
def teleport(targets: selector, destination: Union[selector, AbsPos, RelPos, LocPos, CurrentPos], facing: Union[selector, AbsPos, RelPos, LocPos, CurrentPos] = None, facing_type: container_type = container_type.entity):
"""
Args:
targets (selector): The targets to teleport
destination (Union[selector, AbsPos, RelPos, LocPos, CurrentPos]): The location to teleport the player.
facing (Union[selector, AbsPos, RelPos, LocPos, CurrentPos], optional): The location to face. Defaults to None.
facing_type (container_type, optional): Automatically assigned based on the parameter for ``facing``. Override when results aren't as expected. Defaults to container_type.entity.
"""
if facing is None:
Handler._cmds.append(f"teleport {Handler._translate(targets)} {Handler._translate(destination)}")
else:
# Get the container type
if Handler._translate(facing_type) == "storage":
Handler._warn("Container type 'storage' is not supported for 'teleport'. Assuming 'block'.")
facing = "block"
elif Handler._translate(facing).startswith("@"):
facing_type = "entity"
elif isinstance(facing, (AbsPos, RelPos, LocPos, CurrentPos)):
facing_type = "block"
# Modify the end of the command depending on whether or not the facing type is an entity
if Handler._translate(facing_type) == "entity":
cmd_suffix = f"facing entity {Handler._translate(facing)}"
else:
cmd_suffix = f"facing {Handler._translate(facing)}"
# Allows for facing a block or entity if the destination is a selector
if Handler._translate(destination).startswith("@"):
Handler._cmds.append(f"execute as {Handler._translate(targets)} at {Handler._translate(destination)} run tp @s ~ ~ ~ {cmd_suffix}")
# Allows for teleporting to another entity while facing a block
elif isinstance(facing, (AbsPos, RelPos, LocPos, CurrentPos)):
Handler._cmds.append(f"execute positioned {Handler._translate(facing)} as {Handler._translate(destination)} facing entity @s feet positioned as @s positioned ^ ^ ^1 facing entity @s feet run tp {Handler._translate(targets)} ^ ^ ^1 ~ ~")
else:
Handler._cmds.append(f"teleport {Handler._translate(targets)} {Handler._translate(destination)} {cmd_suffix}")
| 5,349,866 |
def create_game_directory(gamedir, template, setting_dict=None):
"""
Initialize a new game directory named dirname
at the current path. This means copying the
template directory from muddery's root.
"""
def copy_tree(source, destination):
"""
copy file tree
"""
if not os.path.exists(destination):
# If does not exist, create one.
os.mkdir(destination)
# traverse files and folders
names = os.listdir(source)
for name in names:
srcname = os.path.join(source, name)
dstname = os.path.join(destination, name)
try:
if os.path.isdir(srcname):
# If it is a folder, copy it recursively.
copy_tree(srcname, dstname)
else:
# Copy file.
shutil.copy2(srcname, dstname)
except Exception, e:
print("Can not copy file:%s to %s for %s." % (srcname, dstname, e))
global GAME_DIR
GAME_DIR = gamedir
if os.path.exists(GAME_DIR):
print("Cannot create new Muddery game dir: '%s' already exists." % gamedir)
sys.exit()
template_dir = ""
if template:
template_dir = os.path.join(configs.MUDDERY_TEMPLATE, template)
if not os.path.exists(template_dir):
print('Sorry, template "%s" does not exist.\nThese are available templates:' % template)
dirs = os.listdir(configs.MUDDERY_TEMPLATE)
for dir in dirs:
full_path = os.path.join(configs.MUDDERY_TEMPLATE, dir)
if os.path.isdir(full_path):
print(" %s" % dir)
print("")
sys.exit()
# copy default template directory
default_template = os.path.join(configs.MUDDERY_LIB, configs.TEMPLATE_DIR)
shutil.copytree(default_template, GAME_DIR)
# write config file
create_config_file(gamedir, template)
if template_dir:
copy_tree(template_dir, GAME_DIR)
# pre-build settings file in the new GAME_DIR
create_settings_file(setting_dict)
| 5,349,867 |
def test_ts_ground_elements_surfaces():
"""Check timeseries ground elements are created correctly"""
# Create timeseries coords
gnd_element_coords = TsLineCoords.from_array(
np.array([[[-1, -1], [0, 0]], [[1, 1], [0, 0]]]))
pt_coords_1 = TsPointCoords.from_array(np.array([[-0.5, -1], [0, 0]]))
pt_coords_2 = TsPointCoords.from_array(np.array([[0.5, 0], [0, 0]]))
# Create gnd element
gnd_element = TsGroundElement(
gnd_element_coords,
list_ordered_cut_pts_coords=[pt_coords_1, pt_coords_2])
# Check that structures contain the correct number of ts surfaces
assert len(gnd_element.surface_list) == 3
assert len(gnd_element.surface_dict[0]['left']) == 1
assert len(gnd_element.surface_dict[1]['left']) == 2
assert len(gnd_element.surface_dict[0]['right']) == 2
assert len(gnd_element.surface_dict[1]['right']) == 1
# Check that the objects are the same
assert (gnd_element.surface_list[0]
== gnd_element.surface_dict[0]['left'][0])
assert (gnd_element.surface_list[0]
== gnd_element.surface_dict[1]['left'][0])
assert (gnd_element.surface_list[1]
== gnd_element.surface_dict[0]['right'][0])
assert (gnd_element.surface_list[1]
== gnd_element.surface_dict[1]['left'][1])
assert (gnd_element.surface_list[2]
== gnd_element.surface_dict[0]['right'][1])
assert (gnd_element.surface_list[2]
== gnd_element.surface_dict[1]['right'][0])
# Now check surfaces lengths
np.testing.assert_allclose(gnd_element.surface_list[0].length, [0.5, 0])
np.testing.assert_allclose(gnd_element.surface_list[1].length, [1, 1])
np.testing.assert_allclose(gnd_element.surface_list[2].length, [0.5, 1])
# Check coords of surfaces
np.testing.assert_allclose(gnd_element.surface_list[0].b1.x, [-1, -1])
np.testing.assert_allclose(gnd_element.surface_list[0].b2.x, [-0.5, -1])
| 5,349,868 |
def async_register_implementation(
hass: HomeAssistant, domain: str, implementation: AbstractOAuth2Implementation
) -> None:
"""Register an OAuth2 flow implementation for an integration."""
if isinstance(implementation, LocalOAuth2Implementation) and not hass.data.get(
DATA_VIEW_REGISTERED, False
):
hass.http.register_view(OAuth2AuthorizeCallbackView()) # type: ignore
hass.data[DATA_VIEW_REGISTERED] = True
implementations = hass.data.setdefault(DATA_IMPLEMENTATIONS, {})
implementations.setdefault(domain, {})[implementation.domain] = implementation
| 5,349,869 |
def showIntroScreen(screenSurf, p1name, p2name):
"""This is the screen that plays if the user selected "view intro" from the starting screen."""
screenSurf.fill(SKY_COLOR)
drawText('P y t h o n G O R I L L A S', screenSurf, SCR_WIDTH / 2, 15, WHITE_COLOR, SKY_COLOR, pos='center')
drawText('STARRING:', screenSurf, SCR_WIDTH / 2, 55, WHITE_COLOR, SKY_COLOR, pos='center')
drawText('%s AND %s' % (p1name, p2name), screenSurf, SCR_WIDTH / 2, 115, WHITE_COLOR, SKY_COLOR, pos='center')
x = 278
y = 175
for i in range(2):
drawGorilla(screenSurf, x-13, y, RIGHT_ARM_UP)
drawGorilla(screenSurf, x+47, y, LEFT_ARM_UP)
pygame.display.update()
time.sleep(2)
drawGorilla(screenSurf, x-13, y, LEFT_ARM_UP)
drawGorilla(screenSurf, x+47, y, RIGHT_ARM_UP)
pygame.display.update()
time.sleep(2)
for i in range(4):
drawGorilla(screenSurf, x-13, y, LEFT_ARM_UP)
drawGorilla(screenSurf, x+47, y, RIGHT_ARM_UP)
pygame.display.update()
time.sleep(0.3)
drawGorilla(screenSurf, x-13, y, RIGHT_ARM_UP)
drawGorilla(screenSurf, x+47, y, LEFT_ARM_UP)
pygame.display.update()
time.sleep(0.3)
| 5,349,870 |
def sample_wfreq(sample):
"""Return the Weekly Washing Frequency as a number."""
# `sample[3:]` strips the `BB_` prefix
results = session.query(Samples_Metadata.WFREQ).\
filter(Samples_Metadata.SAMPLEID == sample[3:]).all()
wfreq = np.ravel(results)
# Return only the first integer value for washing frequency
return jsonify(int(wfreq[0]))
| 5,349,871 |
def generate_static_sku_detail_html(sku_id):
"""
生成静态商品详情页
:param sku_id: 商品sku id
:return:
"""
categories = get_categories()
# 获取商品sku规格
sku = SKU.objects.get(id=sku_id)
goods = sku.goods
sku_specs = sku.skuspecification_set.all().order_by('spec_id')
sku_key = list()
for spec in sku_specs:
sku_key.append(spec.option.id)
skus = goods.sku_set.filter(is_launched=True)
# 这里可以增加缓存,把每一类商品的map图存到缓存
map = cache.get('goods' + str(goods.id))
if map is None:
sku_specs_option_sku_id_map = dict()
for s in skus:
s_specs = s.skuspecification_set.all().order_by('spec_id')
s_key = list()
for s_spec in s_specs:
s_key.append(s_spec.option.id)
sku_specs_option_sku_id_map[tuple(s_key)] = s.id
cache.set('goods' + str(goods.id), sku_specs_option_sku_id_map, 3600)
else:
sku_specs_option_sku_id_map = map
goods_specs = goods.goodsspecification_set.all().order_by('id')
if len(sku_key) < len(goods_specs):
return
for index, spec in enumerate(goods_specs):
key = sku_key[:]
spec_options = spec.specificationoption_set.all().order_by('id')
for option in spec_options:
key[index] = option.id
option.sku_id = sku_specs_option_sku_id_map.get(tuple(key))
spec.spec_options = spec_options
data = {
'sku': sku,
'goods_specs': goods_specs,
'goods': goods,
}
# 组合参数
context = {
'categories': categories,
'goods': data.get('goods'),
'sku': data.get('sku'),
'specs': data.get('goods_specs')
}
# 进行返回
template = loader.get_template('detail.html')
html_text = template.render(context)
file_path = os.path.join(settings.STATICFILES_DIRS[0], 'detail/'+str(sku_id)+'.html')
with open(file_path, 'w', encoding='utf-8') as f:
f.write(html_text)
| 5,349,872 |
def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools):
"""Modify Galaxy app's toolbox for migrated or installed tool tests."""
if testing_installed_tools:
# TODO: Do this without modifying app - that is a pretty violation
# of Galaxy's abstraction - we shouldn't require app at all let alone
# be modifying it.
tool_configs = app.config.tool_configs
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG)
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove(relative_migrated_tool_panel_config)
for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS:
tool_configs.append(installed_tool_panel_config)
from galaxy import tools # delay import because this brings in so many modules for small tests # noqa: E402
app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
| 5,349,873 |
def OffsiteRestore(
source_dir,
encryption_password=None,
dir_substitution=None,
display_only=False,
ssd=False,
output_stream=sys.stdout,
preserve_ansi_escape_sequences=False,
):
"""\
Restores content created by previous Offsite backups.
"""
dir_substitutions = dir_substitution
del dir_substitution
with StreamDecorator.GenerateAnsiSequenceStream(
output_stream,
preserve_ansi_escape_sequences=preserve_ansi_escape_sequences,
) as output_stream:
with output_stream.DoneManager(
line_prefix="",
prefix="\nResults: ",
suffix="\n",
) as dm:
# Get the dirs
dirs = []
for item in os.listdir(source_dir):
fullpath = os.path.join(source_dir, item)
if not os.path.isdir(fullpath):
continue
dirs.append(fullpath)
if not dirs:
dm.stream.write(
"No subdirectories were found in '{}'.\n".format(source_dir),
)
dm.result = -1
return dm.result
dirs = sorted(dirs)
# Get the file data
file_data = OrderedDict()
hashed_filenames = {}
dm.stream.write(
"Reading file data from {}...".format(inflect.no("directory", len(dirs))),
)
with dm.stream.DoneManager(
suffix="\n",
) as dir_dm:
for index, dir in enumerate(dirs):
dir_dm.stream.write(
"'{}' ({} of {})...".format(dir, index + 1, len(dirs)),
)
with dir_dm.stream.DoneManager() as this_dir_dm:
data_filename = os.path.join(dir, DATA_FILENAME)
if not os.path.isfile(data_filename):
# See if there is compressed data to decompress
for zipped_ext in ["", ".001"]:
potential_filename = os.path.join(
dir,
"{}{}".format(OFFSITE_BACKUP_FILENAME, zipped_ext),
)
if not os.path.isfile(potential_filename):
continue
# Extract the data
temp_dir = dir + ".tmp"
FileSystem.RemoveTree(temp_dir)
FileSystem.MakeDirs(temp_dir)
this_dir_dm.stream.write("Decompressing data...")
with this_dir_dm.stream.DoneManager(
suffix="\n",
) as decompress_dm:
command_line = '7za e -y "-o{dir}"{password} "{input}"'.format(
dir=temp_dir,
input=potential_filename,
password=' "-p{}"'.format(
encryption_password,
) if encryption_password else "",
)
decompress_dm.result = Process.Execute(
command_line,
decompress_dm.stream,
)
if decompress_dm.result != 0:
return decompress_dm.result
this_dir_dm.stream.write("Removing original data...")
with this_dir_dm.stream.DoneManager():
FileSystem.RemoveTree(dir)
this_dir_dm.stream.write("Restoring compressed data...")
with this_dir_dm.stream.DoneManager():
shutil.move(temp_dir, dir)
break
if not os.path.isfile(data_filename):
this_dir_dm.stream.write(
"INFO: The file '{}' was not found in the directory '{}'.\n".format(
DATA_FILENAME,
dir,
),
)
this_dir_dm.result = 1
continue
try:
with open(data_filename) as f:
data = json.load(f)
except:
this_dir_dm.stream.write(
"ERROR: The data in '{}' is corrupt.\n".format(
data_filename,
),
)
this_dir_dm.result = -1
continue
for file_info_index, file_info in enumerate(data):
operation = file_info["operation"]
if operation not in ["add", "modify", "remove"]:
this_dir_dm.stream.write(
"ERROR: The file info operation '{}' is not valid (Index: {}).\n".format(
operation,
file_info_index,
),
)
this_dir_dm.result = -1
continue
filename = file_info["filename"]
# Check if the data is in the expected state
if operation == "add":
if filename in file_data:
this_dir_dm.stream.write(
"ERROR: Information for the file '{}' has already been added and cannot be added again (Index: {}).\n".format(
filename,
file_info_index,
),
)
this_dir_dm.result = -1
continue
elif operation in ["modify", "remove"]:
if filename not in file_data:
this_dir_dm.stream.write(
"ERROR: Information for the file '{}' was not previously provided (Index: {}).\n".format(
filename,
file_info_index,
),
)
this_dir_dm.result = -1
continue
else:
assert False, operation
# Add or remove the data
if operation in ["add", "modify"]:
hash = file_info["hash"]
if hash not in hashed_filenames:
hashed_filename = os.path.join(dir, hash)
if not os.path.isfile(hashed_filename):
this_dir_dm.stream.write(
"ERROR: Contents for the file '{}' were not found at '{}' (Index: {}).\n".format(
filename,
hasehd_filename,
file_info_index,
),
)
this_dir_dm.result = -1
continue
hashed_filenames[hash] = hashed_filename
file_data[filename] = hashed_filenames[hash]
elif operation == "remove":
del file_data[filename]
else:
assert False, operation
keys = sorted(six.iterkeys(file_data))
# Perform destination substitutions (if necessary)
if dir_substitutions:
for key in keys:
for k, v in six.iteritems(dir_substitutions):
new_key = key.replace(k, v)
file_data[new_key] = file_data[key]
del file_data[key]
keys = sorted(six.iterkeys(file_data))
if display_only:
dm.stream.write(
"{} to restore...\n\n".format(inflect.no("file", len(keys))),
)
for key in keys:
dm.stream.write(" - {0:<100} <- {1}\n".format(key, file_data[key]))
return dm.result
with dm.stream.SingleLineDoneManager("Copying Files...") as copy_dm:
# ----------------------------------------------------------------------
def Execute(task_index, on_status_update):
dest = keys[task_index]
source = file_data[dest]
on_status_update(FileSystem.GetSizeDisplay(os.path.getsize(source)))
dest_dir = os.path.dirname(dest)
if not os.path.isdir(dest_dir):
try:
os.makedirs(dest_dir)
except:
# This can happen when attempting to create the dir from
# multiple threads simultaneously. If the error is something
# else, the copy statement below will raise an exception.
pass
shutil.copy2(source, dest)
# ----------------------------------------------------------------------
copy_dm.result = TaskPool.Execute(
[
TaskPool.Task("'{}' -> '{}'".format(file_data[key], key), Execute)
for key in keys
],
optional_output_stream=copy_dm.stream,
progress_bar=True,
num_concurrent_tasks=None if ssd else 1,
)
if copy_dm.result != 0:
return copy_dm.result
return dm.result
| 5,349,874 |
def speak_google(text, filename, model):
"""Synthesizes speech from the input string of text."""
from google.cloud import texttospeech
client = texttospeech.TextToSpeechClient()
input_text = texttospeech.types.SynthesisInput(text=text)
# Note: the voice can also be specified by name.
# Names of voices can be retrieved with client.list_voices().
voice = texttospeech.types.VoiceSelectionParams(
language_code='en-US',
ssml_gender=texttospeech.enums.SsmlVoiceGender.FEMALE,
name=model)
audio_config = texttospeech.types.AudioConfig(
audio_encoding=texttospeech.enums.AudioEncoding.MP3)
response = client.synthesize_speech(input_text, voice, audio_config)
# The response's audio_content is binary.
with open(filename, 'wb') as out:
out.write(response.audio_content)
print('Audio content written to file %s'%(filename))
| 5,349,875 |
def mlp_hyperparameter_tuning(no_of_hidden_neurons, epoch, alpha, roh, n_iter_no_change, X_train, X_validation, y_train, y_validation):
"""
INPUT
no_of_hidden_neurons: 1D int arary contains different values of no of neurons
present in 1st hidden layer (hyperparameter)
epoch: 1D int arary contains different values of epochs (hyperparameter)
alpha: 1D float array contains different values of alphas or learning rates (hyperparameter)
roh: 1D float array contains different values of tolerence or roh (hyperparameter)
n_iter_no_change: 1D int array conatins different values of
Number of iterations with no improvement to wait before stopping fitting (hyperparameter).
X_train: 2D array of shape = (no of patterns, no of features)
X_validation: 2D array of shape = (no of patterns, no of features)
y_train: 2D array of shape = (no of patterns, )
y_validation: 2D array of shape = (no of patterns, )
OUTPUT
best_hyperparameter: a tuple (epoch, alpha, roh, n_iter_no_change) which has best accuracy on the validation set.
"""
val_acc = []
for i in range(0, epoch.shape[0]):
mlp_classifier = MLPClassifier(hidden_layer_sizes = (no_of_hidden_neurons[i],), activation = 'logistic', solver = 'sgd', learning_rate = 'constant',\
learning_rate_init = alpha[i], max_iter = epoch[i], shuffle = True, random_state = 100, tol = roh[i],\
verbose = False, early_stopping = True, n_iter_no_change = n_iter_no_change[i]).fit(X_train, y_train)
# we are taking logloss function for error calculation
predicted = mlp_classifier.predict(X_validation)
val_acc.append(accuracy_score(y_validation, predicted)*100)
# Get the maximum accuracy on validation
max_value = max(val_acc)
max_index = val_acc.index(max_value)
best_hyperparameter = (no_of_hidden_neurons[max_index], epoch[max_index], alpha[max_index], roh[max_index], n_iter_no_change[max_index])
print("Best Hyperparameter:")
print("No of neurons in the 1st hidden layer = ", no_of_hidden_neurons[max_index])
print("Epoch = ", epoch[max_index])
print("Alpha = ", alpha[max_index])
print("Roh = ", roh[max_index])
print("n_iter_no_change (Number of iterations with no improvement) = ", n_iter_no_change[max_index])
return best_hyperparameter
| 5,349,876 |
def setup_work_directory(work_dir, text_file):
"""Set up a temporary working directory."""
me = os.path.realpath(__file__)
my_dir = os.path.dirname(me)
file_src = os.path.join(TEX_FILE, text_file)
file_dst = os.path.join(work_dir, text_file)
shutil.copyfile(file_src, file_dst)
# copy static assets
dir_src = os.path.join(my_dir, ASSETS_DIR_SRC)
dir_dst = os.path.join(work_dir, ASSETS_DIR_DST)
shutil.copytree(dir_src, dir_dst)
| 5,349,877 |
def play(p1:list[int], p2:list[int]) -> list[int]:
"""Gets the final hand of the winning player"""
while p1 and p2:
a = p1.pop(0)
b = p2.pop(0)
if a > b:
p1 += [a, b]
else:
p2 += [b, a]
return p1 + p2
| 5,349,878 |
def oda_update_uhf(dFs, dDs, dE):
"""
ODA update:
lbd = 0.5 - dE / E_deriv
"""
if type(dFs) is not list:
raise Exception("arg1 and arg2 are list of alpha/beta matrices.")
E_deriv = np.sum(dFs[0] * dDs[0] + dFs[1] * dDs[1])
lbd = 0.5 * (1. - dE / E_deriv)
if lbd < 0 or lbd > 1:
lbd = 0.9999 if dE < 0 else 1.e-4
return lbd
| 5,349,879 |
def authorize(*roles):
"""Decorator that authorizes (or not) the current user
Raises an exception if the current user does not have at least
one of the listed roles.
"""
def wrapper(func):
"""wraps the protected function"""
def authorize_and_call(*args, **kwargs):
"""checks authorization and calls function if authorized"""
user = context.request.user
if user.is_active:
if user.is_administrator:
return func(*args, **kwargs)
for role in roles:
if role in user.groups:
return func(*args, **kwargs)
raise zoom.exceptions.UnauthorizedException('Unauthorized')
return authorize_and_call
return wrapper
| 5,349,880 |
def trunked_greedy_by_size_offset_calculation(usage_recorders,
show_detail=False):
"""
An offset calculation algorithm designed for variable-length inputs.
@ params:
usage_recorders : tensor usage recoders (name, start_op, end_op, size)
global trunk_size_list : a list of list (name, offset)
@returns:
assigned_offset : a dict indicates the offset for each tensor
assigned_trunk : a dict indicates the trunk for each tensor
"""
global gTrunkList
# descend
usage_recorders = sorted(usage_recorders,
key=lambda tup: tup[3],
reverse=False)
assigned_offset = {}
assigned_trunk = {}
new_allocate_size = 0
time_start = time.time()
for i in range(len(gTrunkList._trunks)):
gTrunkList._trunks[i]._tensor_list = []
for t in usage_recorders:
t_name = t[0]
t_size = t[3]
is_assigned = False
for trunk_id, trunk in enumerate(gTrunkList._trunks):
trunk, offset = try_fit_trunk(t, trunk)
if offset is not None:
assigned_trunk[t_name] = trunk_id
assigned_offset[t_name] = offset
# update gTrunkList
gTrunkList._trunks[trunk_id] = trunk
is_assigned = True
break
# init new trunk, trunk id should be assigned after delete useless trunk
if is_assigned is False:
trunk_size = max(DEFAULT_TRUNK_SIZE,
math.ceil((t_size * K_SCALE + 31) // 32 * 32))
new_allocate_size += trunk_size
trunk = Trunk(trunk_size)
trunk._tensor_list.append((*t, 0)) #offset @ 0
gTrunkList.appendTrunk(trunk)
# TODO
trunk_id = len(gTrunkList._trunks) - 1
assigned_trunk[t_name] = trunk_id
assigned_offset[t_name] = 0
time_end = time.time()
core_cost = time_end - time_start
used_consumption = 0
total_consumption = 0
delete_trunk_list = []
# find trunk not used -> delete_trunk_list
for trunk_id, trunk in enumerate(gTrunkList._trunks):
max_end_offset = 0
for elem in trunk._tensor_list:
max_end_offset = max(elem[4] + elem[3],
max_end_offset) # offset + size
# print("trunk id", trunk_id, " usage ",
# max_end_offset / gTrunkList._trunks[trunk_id]._size)
used_consumption += max_end_offset
if max_end_offset == 0:
delete_trunk_list.insert(0, trunk_id)
else:
total_consumption += gTrunkList._trunks[trunk_id]._size
# delete
for id in delete_trunk_list:
gTrunkList._trunks.pop(id)
# adjust trunk ids
for trunk_id, trunk in enumerate(gTrunkList._trunks):
for tensor in trunk._tensor_list:
tensor_name = tensor[0]
assigned_trunk[tensor_name] = trunk_id
if show_detail:
print("=====allocation plan====")
print("trunk_id \t size")
for i, t in enumerate(gTrunkList._trunks):
print(i, t._size)
print("tensor name \t offset")
for t in assigned_offset.items():
t_name = t[0]
print("{", t_name, assigned_trunk[t_name], assigned_offset[t_name],
"},")
# print("{\"" + t_name + "\",", assigned_offset[t_name], "},")
print("=====allocation plan====")
used_consumption = used_consumption / 1024 / 1024
total_consumption = total_consumption / 1024 / 1024
new_allocate_size = new_allocate_size / 1024 / 1024
if show_detail:
print(
f"> debug total_consumption {total_consumption} MB used_consumption {used_consumption} MB percent {used_consumption/total_consumption}"
)
return assigned_offset, assigned_trunk, gTrunkList.getInfo(), (
total_consumption, new_allocate_size)
| 5,349,881 |
def main():
"""
A program to strip a string of punctuation and reverse it
:return: None
"""
in_string = "Enter a string's text here, to test the string!"
converted_string = clean_string_to_list(in_string)
reversed_list = reverse(converted_string)
print(in_string)
print(" ".join(reversed_list))
# reverse_list_test_suite() # Uncomment this line to test the fruitful functions.
| 5,349,882 |
def decrypt(secret, ciphertext):
"""Given the first 16 bytes of splunk.secret, decrypt a Splunk password"""
plaintext = None
if ciphertext.startswith("$1$"):
ciphertext = base64.b64decode(ciphertext[3:])
key = secret[:16]
algorithm = algorithms.ARC4(key)
cipher = Cipher(algorithm, mode=None, backend=default_backend())
decryptor = cipher.decryptor()
plaintext = decryptor.update(ciphertext)
chars = []
for char1, char2 in zip(plaintext[:-1], itertools.cycle("DEFAULTSA")):
chars.append(six.byte2int([char1]) ^ ord(char2))
plaintext = "".join([six.unichr(c) for c in chars])
elif ciphertext.startswith("$7$"):
ciphertext = base64.b64decode(ciphertext[3:])
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=b"disk-encryption",
iterations=1,
backend=default_backend()
)
key = kdf.derive(secret)
iv = ciphertext[:16] # pylint: disable=invalid-name
tag = ciphertext[-16:]
ciphertext = ciphertext[16:-16]
algorithm = algorithms.AES(key)
cipher = Cipher(algorithm, mode=modes.GCM(iv, tag), backend=default_backend())
decryptor = cipher.decryptor()
plaintext = decryptor.update(ciphertext).decode()
return plaintext
| 5,349,883 |
def load_skeleton(path):
"""
Load the skeleton from a pickle
"""
# Delayed import so script can be run with both Python 2 and 3
from unet_core.vessel_analysis import VesselTree
v = VesselTree()
v.load_skeleton(path)
return v.skeleton
| 5,349,884 |
def industries_hierarchy() -> pd.DataFrame:
"""Read the Dow Jones Industry hierarchy CSV file.
Reads the Dow Jones Industry hierarchy CSV file and returns
its content as a Pandas DataFrame. The root node has
the fcode `indroot` and an empty parent.
Returns
-------
DataFrame : A Pandas DataFrame with the columns:
* ind_fcode : string
Industry Factiva Code
* name : string
Name of the Industry
* parent : string
Factiva Code of the parent Industry
"""
ret_ind = pd.read_csv(ind_hrchy_path)
ret_ind = ret_ind.replace(np.nan, '', regex=True)
return ret_ind
| 5,349,885 |
def ready():
""" A readiness endpoint, checks on DB health too.
:return: a 200 OK status if this server is up, and the backing DB is ready too; otherwise, a
503 "Temporarily unavailable."
"""
try:
# TODO: Move to a DAO class
client = get_mongo_client()
info = {}
for key in MONGO_HEALTH_KEYS:
info[key] = client.server_info().get(key, "null")
if info.get("ok") == 1:
info["status"] = "UP"
else:
info["status"] = "WARN"
except pymongo.errors.ServerSelectionTimeoutError as ex:
info = {"status": "DOWN", "error": str(ex)}
response = make_response(jsonify({'status': 'UP', "mongo": info}))
if info['status'] != "UP":
response.status_code = 503
return response
| 5,349,886 |
def er_to_file(db_cfg, fetch_sql, tbl_names, head_row, file_path):
"""
保存oracle中指定的ER到doc文件。
:param db_cfg: dict 连接oracle的配置信息
:param fetch_sql: str 查询ER结构的语句
:param tbl_names: list 要导出ER表的表名称
:param head_row: list 表格标题行内容
:param file_path: str ER保存到的xlsx文件
:return:
"""
orcl = OrclPool(db_cfg)
doc = ErTransUtils(file_path)
# 若tbl_names未指定或者说为空,则获取该用户下所有表名称
tbl_names = tbl_names if tbl_names else ErTransUtils.fetch_tbl_names(orcl)
count = 1
for tbl_name in tbl_names:
tbl_name = tbl_name.upper()
print('[ NO.{}: | {} | processing ... ]'.format(count, tbl_name))
tbl_info = orcl.fetch_all(fetch_sql.format(tbl_name))
tmp_rows = []
for i in tbl_info:
tmp_rows.append(ErTransUtils.deal_line(i))
# 编辑doc对象
doc.to_word(tbl_name, head_row, tmp_rows)
print('[ NO.{}: | {} | done. ]'.format(count, tbl_name))
count += 1
print('[ All work done. ]')
| 5,349,887 |
def setup_callback(callback: typing.Awaitable):
"""
This function is used to setup the callback.
"""
callback.is_guild = False
""" The guild of the callback. """
callback.has_permissions = []
""" The permissions of the callback. """
callback.has_roles = []
""" The roles of the callback. """
callback.checks = []
""" The checks of the callback. """
callback.check_any = False
""" The check_any of the callback. """
return callback
| 5,349,888 |
def print_err(txt, include_moment=True):
"""Red collor print. Include time optional."""
print '\033[31m {} {} \n'.format(txt, str(datetime.now())
if include_moment else '')
| 5,349,889 |
def prepare_for_training(ds, ds_name, conf, cache):
"""
Cache -> shuffle -> repeat -> augment -> batch -> prefetch
"""
AUTOTUNE = tf.data.experimental.AUTOTUNE
# Resample dataset. NB: dataset is cached in resamler
if conf["resample"] and 'train' in ds_name:
ds = oversample(ds, ds_name, conf)
# Cache to SSD
elif cache:
cache_string = "{}/{}_{}_{}".format(
conf["cache_dir"], conf["img_shape"][0], conf["ds_info"], ds_name
)
ds = ds.cache(cache_string)
# Shuffle
if conf["shuffle_buffer_size"]>1:
ds = ds.shuffle(
buffer_size=conf["shuffle_buffer_size"],
seed=tf.constant(conf["seed"], tf.int64) if conf["seed"] else None
)
# Repeat forever
ds = ds.repeat()
#Augment
if conf["augment"] and "train" in ds_name:
ds = augment_ds(ds, conf, AUTOTUNE)
# Batch
ds = ds.batch(conf["batch_size"], drop_remainder=False)
# Prefetch - lets the dataset fetch batches in the background while the model is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
| 5,349,890 |
def residual3d(inp, is_training, relu_after=True, add_bn=True,
name=None, reuse=None):
""" 3d equivalent to 2d residual layer
Args:
inp (tensor[batch_size, d, h, w, channels]):
is_training (tensor[bool]):
relu_after (bool):
add_bn (bool): add bn before every relu
name (string):
reuse (bool):
"""
if name == None:
name = "residual3d"
out_dim = (int)(inp.shape[-1])
with tf.variable_scope(name, reuse=reuse):
out1 = tf.layers.conv3d(
inp, filters=out_dim, kernel_size=[3, 3, 3],
strides=[1, 1, 1], padding="same", activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer(), name="layer1",
reuse=reuse)
if add_bn:
out1 = batch_norm(
inp=out1,
is_training=is_training,
name="norm1",
reuse=reuse)
out1 = tf.nn.relu(out1)
out2 = tf.layers.conv3d(
out1, filters=out_dim, kernel_size=[3, 3, 3],
strides=[1, 1, 1], padding="same", activation=None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
bias_initializer=tf.zeros_initializer(), name="layer2",
reuse=reuse)
if relu_after and add_bn:
out2 = batch_norm(
inp=out2,
is_training=is_training,
name="norm2",
reuse=reuse)
if relu_after:
return tf.nn.relu(inp + out2)
else:
return inp + out2
| 5,349,891 |
def check_letters_presence(word_generator, available_letters):
"""
check that the allowed characters actually occur; since the generation is random, there is always a chance that
it didn't occur for the words finite amount of words generated; try N times such that the chance for any
false positive is < 0.001 (0.1%)
average letters generated: ``L = N * (16 + 2) / 2.``
number of possible letters m: ``m = len(available_letters)``
probability of a specific letter to not occur p: ``p = ((m - 1.) / m) ** L``
therefore, minimum N such that m * p < 0.001:
((m - 1.) / m) ** L = 0.001 / m
L * ln ((m - 1.) / m) = ln (0.001 / m)
N = (2. / 18) * ln (0.001 / m) / ln ((m - 1.) / m)
which for m 10 up to 100k is m < N < ~2m
"""
for check in available_letters:
for k in range(2 * len(available_letters)):
#for k in range(1):
if check in word_generator():
break
else:
m = len(available_letters)
prob_false_neg = m * (((m - 1.) / m) ** (18 * m))
raise AssertionError('didn\'t find "%s" in %d words (could be random, but p = %.6f)' % (check, 2 * m, prob_false_neg))
| 5,349,892 |
def marks(family, glyph):
"""
:param family:
:param glyph:
:return: True when glyph has at least one anchor
"""
has_mark_anchor = False
for anchor in glyph.anchors:
if anchor.name:
if anchor.name.startswith("_"):
has_mark_anchor = True
break
return has_mark_anchor
| 5,349,893 |
def generate_bom(pcb_modules, config, extra_data):
# type: (list, Config, dict) -> dict
"""
Generate BOM from pcb layout.
:param pcb_modules: list of modules on the pcb
:param config: Config object
:param extra_data: Extra fields data
:return: dict of BOM tables (qty, value, footprint, refs) and dnp components
"""
def convert(text):
return int(text) if text.isdigit() else text.lower()
def alphanum_key(key):
return [convert(c)
for c in re.split('([0-9]+)', key)]
def natural_sort(l):
"""
Natural sort for strings containing numbers
"""
return sorted(l, key=lambda r: (alphanum_key(r[0]), r[1]))
# build grouped part list
warning_shown = False
skipped_components = []
part_groups = {}
for i, m in enumerate(pcb_modules):
if skip_component(m, config, extra_data):
skipped_components.append(i)
continue
# group part refs by value and footprint
norm_value = units.componentValue(m.val)
extras = []
if config.extra_fields:
if m.ref in extra_data:
extras = [extra_data[m.ref].get(f, '')
for f in config.extra_fields]
else:
# Some components are on pcb but not in schematic data.
# Show a warning about possibly outdated netlist/xml file.
log.warn(
'Component %s is missing from schematic data.' % m.ref)
warning_shown = True
extras = [''] * len(config.extra_fields)
group_key = (norm_value, tuple(extras), m.footprint, m.attr)
valrefs = part_groups.setdefault(group_key, [m.val, []])
valrefs[1].append((m.ref, i))
if warning_shown:
log.warn('Netlist/xml file is likely out of date.')
# build bom table, sort refs
bom_table = []
for (norm_value, extras, footprint, attr), valrefs in part_groups.items():
bom_row = (
len(valrefs[1]), valrefs[0], footprint,
natural_sort(valrefs[1]), extras)
bom_table.append(bom_row)
# sort table by reference prefix, footprint and quantity
def sort_func(row):
qty, _, fp, rf, e = row
prefix = re.findall('^[A-Z]*', rf[0][0])[0]
if prefix in config.component_sort_order:
ref_ord = config.component_sort_order.index(prefix)
else:
ref_ord = config.component_sort_order.index('~')
return ref_ord, e, fp, -qty, alphanum_key(rf[0][0])
if '~' not in config.component_sort_order:
config.component_sort_order.append('~')
bom_table = sorted(bom_table, key=sort_func)
result = {
'both': bom_table,
'skipped': skipped_components
}
for layer in ['F', 'B']:
filtered_table = []
for row in bom_table:
filtered_refs = [ref for ref in row[3]
if pcb_modules[ref[1]].layer == layer]
if filtered_refs:
filtered_table.append((len(filtered_refs), row[1],
row[2], filtered_refs, row[4]))
result[layer] = sorted(filtered_table, key=sort_func)
return result
| 5,349,894 |
def make_wrapper_script(cmd):
"""Create a wrapper that runs the specified command locally and pauses."""
if os.name == 'nt':
template, ext = '@{exe}\n@pause', '.bat'
else:
# https://stackoverflow.com/questions/24016046/
# Needs testing!
template = '{exe}\necho "Press any key to continue . . ."\nread -rsn1'
ext = ''
try:
pythonroot = os.environ['VIRTUAL_ENV']
except KeyError:
print('WARNING: No active virtualenv; using main Python installation.')
pythonroot, _ = os.path.split(sys.executable)
exe = os.path.join(pythonroot, 'Scripts', cmd)
with open(f'{cmd}{ext}', 'w') as f:
f.write(template.format(exe=exe))
| 5,349,895 |
def _GetSharedLibraryInHost(soname, sosize, dirs):
"""Find a shared library by name in a list of directories.
Args:
soname: library name (e.g. libfoo.so)
sosize: library file size to match.
dirs: list of directories to look for the corresponding file.
Returns:
host library path if found, or None
"""
for d in dirs:
host_so_file = os.path.join(d, os.path.basename(soname))
if not os.path.isfile(host_so_file):
continue
if os.path.getsize(host_so_file) != sosize:
continue
logging.debug("%s match to the one in APK" % host_so_file)
return host_so_file
| 5,349,896 |
def check_n_jobs(n_jobs: int) -> int:
"""Check `n_jobs` parameter according to the scikit-learn convention.
Parameters
----------
n_jobs : int, positive or -1
The number of jobs for parallelization.
Returns
-------
n_jobs : int
Checked number of jobs.
"""
# scikit-learn convention
# https://scikit-learn.org/stable/glossary.html#term-n-jobs
if n_jobs is None:
return 1
elif not is_int(n_jobs):
raise ValueError(f"`n_jobs` must be None or an integer, but found: {n_jobs}")
elif n_jobs < 0:
return os.cpu_count() - n_jobs + 1
else:
return n_jobs
| 5,349,897 |
def test_skip_banner(mock_organizer_init):
"""Test the Organizer.skip_banner() function
Arguments:
mock_organizer_init: patched to not perform constructor
"""
# Create an Organizer: skip_banner and set_metasploit_globals are patched
organizer = Organizer(planner=None, mode="system", minimum_score=0.0)
# Assert the mocked Organizer.__init__ () was called.
mock_organizer_init.assert_called()
# Mock the executor
mock_executor = mock.MagicMock()
organizer._executor = mock_executor
# The read() method always returns static data
mock_executor.read.return_value = {
"prompt": "msf5> ",
"busy": False,
"data": "",
}
# Call skip banner
organizer._skip_banner()
# Ensure that the executor read was called
mock_executor.read.assert_called()
| 5,349,898 |
def write_itk_image(image, path):
"""Write an itk image to a path.
Parameters
----------
image : itk image or np.ndarray
Image to be written.
path : str
Path where the image should be written to.
"""
if isinstance(image, np.ndarray):
image = make_itk_image(image)
writer = itk.ImageFileWriter()
writer.SetFileName(path)
if os.path.splitext(path)[1] == '.nii':
Warning('You are converting nii, ' + \
'be careful with type conversions')
writer.Execute(image)
| 5,349,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.