content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def elastic(X, kernel, padding, alpha=34.0):
# type: (Tensor, Tensor, int, float) -> Tensor
"""
X: [(N,) C, H, W]
"""
H, W = X.shape[-2:]
dx = torch.rand(X.shape[-2:], device=kernel.device) * 2 - 1
dy = torch.rand(X.shape[-2:], device=kernel.device) * 2 - 1
xgrid = torch.arange(W, device=dx.device).repeat(H, 1)
ygrid = torch.arange(H, device=dy.device).repeat(W, 1).T
dx = alpha * F.conv2d(unsqueeze_as(dx, X, 0), kernel, bias=None, padding=padding)
dy = alpha * F.conv2d(unsqueeze_as(dy, X, 0), kernel, bias=None, padding=padding)
H /= 2
W /= 2
dx = (dx + xgrid - W) / W
dy = (dy + ygrid - H) / H
grid = torch.stack((dx.squeeze(1), dy.squeeze(1)), dim=-1)
return F.grid_sample(X, grid, padding_mode="reflection", align_corners=False)
| 5,349,600 |
def fixture_make_bucket(request):
"""
Return a factory function that can be used to make a bucket for testing.
:param request: The Pytest request object that contains configuration data.
:return: The factory function to make a test bucket.
"""
def _make_bucket(s3_stub, wrapper, bucket_name, region_name=None):
"""
Make a bucket that can be used for testing. When stubbing is used, a stubbed
bucket is created. When AWS services are used, the bucket is deleted after
the test completes.
:param s3_stub: The S3Stubber object, configured for stubbing or AWS.
:param wrapper: The bucket wrapper object, used to create the bucket.
:param bucket_name: The unique name for the bucket.
:param region_name: The Region in which to create the bucket.
:return: The test bucket.
"""
if not region_name:
region_name = s3_stub.region_name
s3_stub.stub_create_bucket(bucket_name, region_name)
# Bucket.wait_until_exists calls head_bucket on a timer until it returns 200.
s3_stub.stub_head_bucket(bucket_name)
bucket = wrapper.create_bucket(bucket_name, region_name)
def fin():
if not s3_stub.use_stubs and wrapper.bucket_exists(bucket_name):
bucket.delete()
request.addfinalizer(fin)
return bucket
return _make_bucket
| 5,349,601 |
def prop_nodes(graph,
nodes_generator,
message_func='default',
reduce_func='default',
apply_node_func='default'):
"""Functional method for :func:`dgl.DGLGraph.prop_nodes`.
Parameters
----------
node_generators : generator
The generator of node frontiers.
message_func : callable, optional
The message function.
reduce_func : callable, optional
The reduce function.
apply_node_func : callable, optional
The update function.
See Also
--------
dgl.DGLGraph.prop_nodes
"""
graph.prop_nodes(nodes_generator, message_func, reduce_func, apply_node_func)
| 5,349,602 |
def main():
"""Start main loop."""
logger.info("Starting main loop")
starttime = time.time()
i = 1
dispatch_task("trigger_on_startup")
dispatch_task("create_default_settings")
while IS_RUNNING:
# execute_next_task()
interval_trigger.tick()
# Sleep for exactly one second, taking drift and execution time into account
time.sleep(1 - ((time.time() - starttime) % 1))
i += 1
logger.info("Exiting main loop")
| 5,349,603 |
def get_member_struc(*args):
"""
get_member_struc(fullname) -> struc_t
Get containing structure of member by its full name "struct.field".
@param fullname (C++: const char *)
"""
return _ida_struct.get_member_struc(*args)
| 5,349,604 |
def pop():
"""Check the first task in redis(which is the task with the smallest score)
if the score(timestamp) is smaller or equal to current timestamp, the task
should be take out and done.
:return: True if task is take out, and False if it is not the time.
"""
task = connection.zrange(QUEUE_KEY, 0, 0)
if not task:
return False, 'No emails now!'
msg_id = task[0]
timestamp = connection.zscore(QUEUE_KEY, msg_id)
now = datetime.datetime.now().timestamp()
if timestamp < now or abs(timestamp - now) <= 1e-6:
message = connection.get(msg_id)
pipeline = connection.pipeline()
pipeline.zrem(QUEUE_KEY, msg_id)
pipeline.delete(msg_id)
pipeline.execute()
return True, message
return False, "It's too early now!"
| 5,349,605 |
def xml_to_dictform(node):
""" Converts a minidom node to "dict" form. See parse_xml_to_dictform. """
if node.nodeType != node.ELEMENT_NODE:
raise Exception("Expected element node")
result = (node.nodeName, {}, []) # name, attrs, items
if node.attributes != None:
attrs = node.attributes # hard to imagine a more contrived way of accessing attributes...
for key, value in ((attrs.item(i).name, attrs.item(i).value) for i in xrange(attrs.length)):
result[1][key] = value
for child in node.childNodes:
if child.nodeType == child.ELEMENT_NODE:
result[2].append(xml_to_dictform(child))
return result
| 5,349,606 |
def test_22(): # check pad_idseqs
""" OhOh this fails
seqs_token = [[
"Der", "Helmut", "Kohl", "speist", "Schweinshaxe", "mit",
"Kohl", "in", "Berlin", "."]]
"""
targets = [[
"[PAD]", "[UNK]", "PER", "PER", "[UNK]",
"[UNK]", "[UNK]", "[UNK]", "[UNK]", "LOC", "[UNK]"]]
seqs_token = [[
"Der", "Helmut", "Kohl", "speist", "Schweinshaxe", "mit",
"Blumenkohl", "in", "Berlin", "."]]
seqs_ner, SCHEME = nt.ner.factory("stanza-de")(
seqs_token, maxlen=11, padding='pre', truncating='pre')
# convert to targets to IDs
target_ids = [[SCHEME.index(ner) for ner in seq] for seq in targets]
assert seqs_ner == target_ids
| 5,349,607 |
def TranslateCoord(data, res, mode):
"""
Translates position of point to unified coordinate system
Max value in each direction is 1.0 and the min is 0.0
:param data: (tuple(float, float)) Position to be translated
:param res: (tuple(float, float)) Target resolution
:param mode: (TranslationMode) Work mode. Available modes are: Encode, Decode.
:returns: (tuple(int, int), tuple(float, float))
"""
x, y = data
resX, resY = res
#encode
if mode == TranslationMode.Encode:
uX = x / resX
uY = y / resY
return (uX, uY)
#decode
elif mode == TranslationMode.Decode:
x = Clamp(x, 0, 1)
y = Clamp(y, 0, 1)
tX = x * resX
tY = y * resY
return (int(tX), int(tY))
| 5,349,608 |
def _get_mutator_plugins_bucket_url():
"""Returns the url of the mutator plugin's cloud storage bucket."""
mutator_plugins_bucket = environment.get_value('MUTATOR_PLUGINS_BUCKET')
if not mutator_plugins_bucket:
logs.log_warn('MUTATOR_PLUGINS_BUCKET is not set in project config, '
'skipping custom mutator strategy.')
return None
return 'gs://%s' % mutator_plugins_bucket
| 5,349,609 |
def objectify_json_lines(path_buf_stream,
from_string=False,
fatal_errors=True,
encoding=_DEFAULT_ENCODING,
ensure_ascii=False,
encode_html_chars=False,
avoid_memory_pressure=True):
"""Generator return an object for each line of JSON in a file, stream or string
in: path_buf_stream:
(str) A string file path containing JSON
(stream) An open readable stream from a file containing JSON
(stream) A string of JSON content (also requires `from_string=True`)
This function intentionally operates as a generator, to avoid using huge
amounts of memory when loading a very large file- afterall, this is the
primary benefit of the JSON lines format. It is meant to be called many
times in succession, sometimes up to millions of times, so it is important
that it is relatively quick/simple.
There are three ways to invoke this function
Each of them returns a native Python object
for obj in objectify_json_lines('file.json'):
print(obj.items())
json_fd = open('file.json', 'r', encoding='utf-8')
for obj in objectify_json_lines(json_fd):
print(obj.items())
json_str = '{"A": "B"}\n{"C": "D"}'
for obj in objectify_json_lines(json_str, from_string=True):
print(obj.items())
"""
if from_string is True:
# If caller specifies path_buf_stream is a string, turn it into
# a stream to avoid an extra set of logic below
assert isinstance(path_buf_stream, str)
path_buf_stream = StringIO(path_buf_stream)
# If path_buf_stream has a read method, it is effectively stream
reader = getattr(path_buf_stream, 'read', None)
with (path_buf_stream if reader else open(path_buf_stream, 'r', encoding=encoding)) as infd:
# If the user doesn't care about memory pressure, don't bother with a generator, just
# give them a regular list of objects from the JSON lines file. I guess most of the time
# nobody cares, and have to work with a generator in Python3 can be annoying for the caller
if avoid_memory_pressure is False:
if fatal_errors is True:
try:
return [loads(line) for line in infd.read.splitlines() if line]
except JSONDecodeError:
return None
obj_list = list()
for line in infd.read.splitlines():
try:
obj = loads(line)
obj_list.append(obj)
except JSONDecodeError:
# Silently ignore bad lines ..
continue
return obj_list
for line in infd.readlines():
line = line.strip()
# Exception handlers are expensive to set up and even more expensive
# when they fire. If errors should be fatal, don't bother setting one
# up at all
if fatal_errors is True:
yield loads(line)
else:
# The more expensive path, preparing to catch an exception and
# continue gracefully if fatal_errors is False
try:
yield loads(line)
except Exception as err:
error('bad JSON-line line: {}'.format(repr(err)))
continue
| 5,349,610 |
def gll_int(f, a, b):
"""Integrate f from a to b using its values at gll points."""
n = f.size
x, w = gll(n)
return 0.5*(b-a)*np.sum(f*w)
| 5,349,611 |
def error404(request, exception):
"""View for 404 page."""
responses = open(os.path.join(BASE_DIR, 'CollaboDev/404_responses.txt'))
responses = responses.read().split('\n')
message = random.choice(responses)
context = {
'message': message,
'error': exception
}
return HttpResponseNotFound(render(request, '404.html', context))
| 5,349,612 |
def save_xp(
path: Union[str, Path],
consoles: Iterable[Console],
compress_level: int = 9,
) -> None:
"""Save tcod Consoles to a REXPaint file.
`path` is where to save the file.
`consoles` are the :any:`tcod.console.Console` objects to be saved.
`compress_level` is the zlib compression level to be used.
Color alpha will be lost during saving.
Consoles will be saved as-is as much as possible. You may need to convert
characters from Unicode to CP437 if you want to load the file in REXPaint.
.. versionadded:: 12.4
Example::
import numpy as np
import tcod
console = tcod.Console(80, 24) # Example console.
# Convert from Unicode to REXPaint's encoding.
# Required to load this console correctly in the REXPaint tool.
# Convert tcod's Code Page 437 character mapping into a NumPy array.
CP437_TO_UNICODE = np.asarray(tcod.tileset.CHARMAP_CP437)
# Initialize a Unicode-to-CP437 array.
# 0x20000 is the current full range of Unicode.
# fill_value=ord("?") means that "?" will be the result of any unknown codepoint.
UNICODE_TO_CP437 = np.full(0x20000, fill_value=ord("?"))
# Assign the CP437 mappings.
UNICODE_TO_CP437[CP437_TO_UNICODE] = np.arange(len(CP437_TO_UNICODE))
# Convert from Unicode to CP437 in-place.
console.ch[:] = UNICODE_TO_CP437[console.ch]
# Convert console alpha into REXPaint's alpha key color.
KEY_COLOR = (255, 0, 255)
is_transparent = console.rgba["bg"][:, :, 3] == 0
console.rgb["bg"][is_transparent] = KEY_COLOR
tcod.console.save_xp("example.xp", [console])
"""
consoles_c = ffi.new("TCOD_Console*[]", [c.console_c for c in consoles])
_check(
tcod.lib.TCOD_save_xp(
len(consoles_c),
consoles_c,
str(path).encode("utf-8"),
compress_level,
)
)
| 5,349,613 |
def _get_remote_user():
"""
Get the remote username.
Returns
-------
str: the username.
"""
return input('\nRemote User Name: ')
| 5,349,614 |
def parse_args():
"""Parse the args."""
parser = argparse.ArgumentParser(
description='example code to play with InfluxDB')
parser.add_argument('--host', type=str, required=False,
default='localhost',
help='hostname influxdb http API')
parser.add_argument('--port', type=int, required=False, default=8086,
help='port influxdb http API')
parser.add_argument('--nb_day', type=int, required=False, default=15,
help='number of days to generate time series data')
return parser.parse_args()
| 5,349,615 |
def main(args):
"""
Process the file created by the IDE into a log of builds,
which it will put in the folder 'data'.
By default it will look for the idea.log file in the default places for Android Studio versions 4.2 and 4.1.
Pass an argument to look in a different place instead
"""
if args:
if "--help" in args:
print("This script takes one argument: the log file to parse.")
return
path = args[0]
else:
path = guess_path_to_idea_log()
if not path:
print(
"unable to locate 'idea.log'! You can find it in your JetBrains IDE on the 'help' menu - 'Show log in Finder'. You should give the full path to idea.log as an argument to this script.")
return
data_folder = Path.cwd() / "data"
if not data_folder.exists():
os.mkdir(data_folder)
output = data_folder / output_filename()
print(f"Will parse log file {path} and write builds to {output}")
parse_idea_log(path, output)
| 5,349,616 |
def generate_warm_starts(vehicle,
world: TrafficWorld,
x0: np.array,
other_veh_info,
params: dict,
u_mpc_previous=None,
u_ibr_previous=None):
""" Generate a dictionary of warm starts for the solver.
Returns: Dictionary with warm_start_name: (state, control, desired_state)
"""
other_x0 = [veh_info.x0 for veh_info in other_veh_info]
u_warm_profiles, ux_warm_profiles = generate_warm_u(params["N"], vehicle, x0)
if len(other_x0) > 0:
warm_velocity = np.median([x[4] for x in other_x0])
else:
warm_velocity = x0[4]
_, x_ux_warm_profiles = generate_warm_x(vehicle, world, x0, warm_velocity)
ux_warm_profiles.update(x_ux_warm_profiles)
if (u_mpc_previous is not None): # TODO: Try out the controls that were previous executed
u_warm_profiles["previous_mpc"] = np.concatenate(
(
u_mpc_previous[:, params["number_ctrl_pts_executed"]:],
np.tile(u_mpc_previous[:, -1:], (1, params["number_ctrl_pts_executed"])),
),
axis=1,
)
x_warm, x_des_warm = vehicle.forward_simulate_all(x0.reshape(6, 1), u_warm_profiles["previous_mpc"])
ux_warm_profiles["previous_mpc"] = [
u_warm_profiles["previous_mpc"],
x_warm,
x_des_warm,
]
if (u_ibr_previous is not None): # Try out the controller from the previous round of IBR
u_warm_profiles["previous_ibr"] = u_ibr_previous
x_warm, x_des_warm = vehicle.forward_simulate_all(x0.reshape(6, 1), u_warm_profiles["previous_ibr"])
ux_warm_profiles["previous_ibr"] = [
u_warm_profiles["previous_ibr"],
x_warm,
x_des_warm,
]
return ux_warm_profiles
| 5,349,617 |
def test_setOwner_no_short_name_and_long_name_is_short(caplog):
"""Test setOwner"""
anode = Node('foo', 'bar', noProto=True)
with caplog.at_level(logging.DEBUG):
anode.setOwner(long_name ='Tnt')
assert re.search(r'p.set_owner.long_name:Tnt:', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.short_name:Tnt:', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.is_licensed:False', caplog.text, re.MULTILINE)
assert re.search(r'p.set_owner.team:0', caplog.text, re.MULTILINE)
| 5,349,618 |
def setup_logging(verbosity: int) -> None:
"""
Process -v/--verbose, --logfile options
"""
# Log level
logging.root.addHandler(bufferHandler)
logging.root.setLevel(logging.INFO if verbosity < 1 else logging.DEBUG)
_set_loggers(verbosity)
logger.info('Verbosity set to %s', verbosity)
| 5,349,619 |
def nf_regnet_b4(pretrained=False, **kwargs):
""" Normalization-Free RegNet-B4
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs)
| 5,349,620 |
def test_process_mutation_workflow():
"""Integration test to make sure workflow runs"""
validfiles = pd.DataFrame(
{
"fileType": ['vcf', 'maf'],
"path": ["path/to/vcf", "path/to/maf"]
}
)
database_mapping = pd.DataFrame(
{
"Database": ['vcf2maf', 'centerMaf'],
"Id": ['syn123', 'syn234']
}
)
genie_annotation_pkg = "annotation/pkg/path"
syn_get_calls = [call("syn22053204", ifcollision="overwrite.local",
downloadLocation=genie_annotation_pkg),
call("syn22084320", ifcollision="overwrite.local",
downloadLocation=genie_annotation_pkg)]
center = "SAGE"
workdir = "working/dir/path"
maf_path = "path/to/maf"
with patch.object(SYN, "get") as patch_synget,\
patch.object(process_mutation,
"annotate_mutation",
return_value=maf_path) as patch_annotation,\
patch.object(process_mutation,
"split_and_store_maf") as patch_split:
maf = process_mutation.process_mutation_workflow(
SYN, center, validfiles,
genie_annotation_pkg, database_mapping, workdir
)
patch_synget.assert_has_calls(syn_get_calls)
patch_annotation.assert_called_once_with(
center=center,
mutation_files=["path/to/vcf", "path/to/maf"],
genie_annotation_pkg=genie_annotation_pkg,
workdir=workdir
)
patch_split.assert_called_once_with(
syn=SYN,
center=center,
maf_tableid='syn123',
annotated_maf_path=maf_path,
flatfiles_synid='syn234',
workdir=workdir
)
assert maf == maf_path
| 5,349,621 |
async def amireallyalive(alive):
""" For .alive command, check if the bot is running. """
await alive.edit("**Apun Zinda He Sarr. \nJarvis is in your service ^.^** \n`🇮🇳BOT Status : ` **☣Hot**\n\n"
f"`My peru owner`: {DEFAULTUSER}\n\n"
"`Telethon version:` **6.0.9**\n`Python:` **3.7.4**\n"
"`Database Status:` **😀ALL OK**\n\n`Always with you, my master!\n`"
"**Bot Creator:** [♊KNOWLEDGE MASTER♋](t.me/knowledge_masterr)\n"
" [Want Your Own UserBot](t.me/knowledge_masterr)")
| 5,349,622 |
def load_data_binary_labels(path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Loads data from CSV file and returns features (X) and
only binary labels meaning (any kind of) toxic or not"""
df = pd.read_csv(path)
X = df.comment_text.to_frame()
y = df[config.LIST_CLASSES].max(axis=1).to_frame(name="toxic")
return X, y
| 5,349,623 |
def parse(html_url):
"""Parse."""
html = www.read(html_url)
soup = BeautifulSoup(html, 'html.parser')
data = {'paragraphs': []}
content = soup.find('div', class_=CLASS_NAME_CONTENT)
for child in content.find_all():
text = _clean(child.text)
if child.name == 'h3':
data['title'] = text
elif child.name == 'h4':
data['subtitle'] = text
elif child.name == 'p':
data['paragraphs'].append(text)
return data
| 5,349,624 |
def get_tp_algorithm(name: str) -> GenericTopologyProgramming:
""" returns the requested topology programming instance """
name = name.lower()
if name == "uniform_tp":
return UniformTP()
if name == "joint_tp":
return JointTP()
if name == "ssp_oblivious_tp":
return SSPObliviousTP()
err_msg = f"wan tp name not found: {name}"
logging.error(err_msg)
raise Exception(err_msg)
| 5,349,625 |
def get_first_job_queue_with_capacity():
"""Returns the first job queue that has capacity for more jobs.
If there are no job queues with capacity, returns None.
"""
job_queue_depths = get_job_queue_depths()["all_jobs"]
for job_queue in settings.AWS_BATCH_QUEUE_WORKERS_NAMES:
if job_queue_depths[job_queue] <= settings.MAX_JOBS_PER_NODE:
return job_queue
return None
| 5,349,626 |
def rotations(images, n_rot, ccw_limit, cw_limit):
"""
Rotates every image in the list "images" n_rot times, between 0 and cw_limit
(clockwise limit) n_rot times and between 0 and ccw_limit (counterclockwise
limit) n_rot times more. The limits are there to make sense of the data
augmentation. E.g: Rotating an mnist digit 180 degrees turns a 6 into a 9,
which makes no sense at all.
cw_limit and ccw_limit are in degrees!
Returns a list with all the rotated samples. Size will be 2*n_rot+1, because
we also want the original sample to be included
Example: images=[img],n_rot=3,ccw_limit=90,cw_limit=90
Returns: [img1: original,
img2: 90 degrees rot ccw,
img3: 60 degrees rot ccw,
img4: 30 degrees rot ccw,
img5: 30 degrees rot cw,
img5: 60 degrees rot cw
img5: 90 degrees rot cw]
"""
# if we only have 1 image, transform into a list to work with same script
if type(images) is not list:
images = [images]
# calculate the initial angle and the step
cw_step_angle = float(cw_limit) / float(n_rot)
ccw_step_angle = float(ccw_limit) / float(n_rot)
# container for rotated images
rotated_images = []
# get every image and apply the number of desired rotations
for img in images:
# get rows and cols to rotate
rows, cols, depth = img.shape
# append the original one too
rotated_images.append(img)
# rotate the amount of times we want them rotated
for i in range(1, n_rot + 1):
# create rotation matrix with center in the center of the image,
# scale 1, and the desired angle (we travel counter clockwise first, and
# then clockwise
M_ccw = cv2.getRotationMatrix2D(
(cols / 2, rows / 2), i * ccw_step_angle, 1)
# rotate using the matrix (using bicubic interpolation)
rot_img = cv2.warpAffine(img, M_ccw, (cols, rows), flags=cv2.INTER_CUBIC)
# append to rotated images container
rotated_images.append(rot_img)
M_cw = cv2.getRotationMatrix2D(
(cols / 2, rows / 2), -i * cw_step_angle, 1)
# rotate using the matrix (using bicubic interpolation)
rot_img = cv2.warpAffine(img, M_cw, (cols, rows), flags=cv2.INTER_CUBIC)
# append to rotated images container
rotated_images.append(rot_img)
return rotated_images
| 5,349,627 |
def num_range(num):
"""
Use in template language to loop through numberic range
"""
return range(num)
| 5,349,628 |
def datamodel_flights_column_names():
"""
Get FLIGHTS_CSV_SCHEMA column names (keys)
:return: list
"""
return list(FLIGHTS_CSV_SCHEMA.keys())
| 5,349,629 |
def create_tomography_circuits(circuit, qreg, creg, tomoset):
"""
Add tomography measurement circuits to a QuantumProgram.
The quantum program must contain a circuit 'name', which is treated as a
state preparation circuit for state tomography, or as teh circuit being
measured for process tomography. This function then appends the circuit
with a set of measurements specified by the input `tomography_set`,
optionally it also prepends the circuit with state preparation circuits if
they are specified in the `tomography_set`.
For n-qubit tomography with a tomographically complete set of preparations
and measurements this results in $4^n 3^n$ circuits being added to the
quantum program.
Args:
circuit (QuantumCircuit): The circuit to be appended with tomography
state preparation and/or measurements.
qreg (QuantumRegister): the quantum register containing qubits to be
measured.
creg (ClassicalRegister): the classical register containing bits to
store measurement outcomes.
tomoset (tomography_set): the dict of tomography configurations.
Returns:
list: A list of quantum tomography circuits for the input circuit.
Raises:
QISKitError: if circuit is not a valid QuantumCircuit
Example:
For a tomography set specififying state tomography of qubit-0 prepared
by a circuit 'circ' this would return:
```
['circ_meas_X(0)', 'circ_meas_Y(0)', 'circ_meas_Z(0)']
```
For process tomography of the same circuit with preparation in the
SIC-POVM basis it would return:
```
[
'circ_prep_S0(0)_meas_X(0)', 'circ_prep_S0(0)_meas_Y(0)',
'circ_prep_S0(0)_meas_Z(0)', 'circ_prep_S1(0)_meas_X(0)',
'circ_prep_S1(0)_meas_Y(0)', 'circ_prep_S1(0)_meas_Z(0)',
'circ_prep_S2(0)_meas_X(0)', 'circ_prep_S2(0)_meas_Y(0)',
'circ_prep_S2(0)_meas_Z(0)', 'circ_prep_S3(0)_meas_X(0)',
'circ_prep_S3(0)_meas_Y(0)', 'circ_prep_S3(0)_meas_Z(0)'
]
```
"""
if not isinstance(circuit, QuantumCircuit):
raise QISKitError('Input circuit must be a QuantumCircuit object')
dics = tomoset['circuits']
labels = tomography_circuit_names(tomoset, circuit.name)
tomography_circuits = []
for label, conf in zip(labels, dics):
tmp = circuit
# Add prep circuits
if 'prep' in conf:
prep = QuantumCircuit(qreg, creg, name='tmp_prep')
for qubit, op in conf['prep'].items():
tomoset['prep_basis'].prep_gate(prep, qreg[qubit], op)
prep.barrier(qreg[qubit])
tmp = prep + tmp
# Add measurement circuits
meas = QuantumCircuit(qreg, creg, name='tmp_meas')
for qubit, op in conf['meas'].items():
meas.barrier(qreg[qubit])
tomoset['meas_basis'].meas_gate(meas, qreg[qubit], op)
meas.measure(qreg[qubit], creg[qubit])
tmp = tmp + meas
# Add label to the circuit
tmp.name = label
tomography_circuits.append(tmp)
logger.info('>> created tomography circuits for "%s"', circuit.name)
return tomography_circuits
| 5,349,630 |
def _insert(filepath, line_start, lines):
"""Insert the lines to the specified position.
"""
flines = []
with open(filepath) as f:
for i, fline in enumerate(f):
if i == line_start:
# detect the indent of the last not empty line
space = ''
flines_len = len(flines)
if flines_len>0: # last line exists
line_i = -1
while flines[line_i].strip() == '' and -line_i <= flines_len:
line_i -= 1
for c in flines[line_i]:
if c not in (' ', '\t'): break
space += c
if flines[line_i].strip().endswith('{'):
space += ' '
for line in lines:
flines.append(''.join([space, line, '\n']))
flines.append(fline)
# write back to file
with open(filepath, 'w') as f: f.write(''.join(flines))
| 5,349,631 |
def filter_background(bbox, bg_data):
"""
Takes bounding box and background geojson file assumed to be the US states, and outputs a geojson-like dictionary
containing only those features with at least one point within the bounding box, or any state that completely
contains the bounding box.
This tests if a feature contains the bounding box by drawing the box that contains the feature and checking if that
box also contains the bounding box. Because features are odd shapes, this may find that more than one feature
completely contains the bounding box. E.g., if you draw a box around Maryland it will also contain a chunk of West
Virginia. To deal with this, we are allowed to find that multiple states contain the bounding box.
:param bbox: The coordinates of the bounding box as [lon, lat, lon, lat]
:param bg_data: a geojson-like dict describing the background
:return: the features from bg_filename whose borders intersect bbox OR the feature which completely contains bbox
"""
box_lon = [bbox[0], bbox[2]]
box_lat = [bbox[1], bbox[3]]
features = bg_data['features']
in_box = []
for f in features:
starting_len = len(in_box)
# Define points for bounding box around the feature.
feature_max_lat = -90
feature_max_lon = -180
feature_min_lat = 90
feature_min_lon = 180
coordinates = f['geometry']['coordinates']
for group in coordinates:
if len(in_box) > starting_len:
# This feature has already been added
break
# actual points for MultiPolygons are nested one layer deeper than those for polygons
if f['geometry']['type'] == 'MultiPolygon':
geom = group[0]
else:
geom = group
for lon, lat in geom:
# check if any point along the state's borders falls within the bounding box.
if min(box_lon) <= lon <= max(box_lon) and min(box_lat) <= lat <= max(box_lat):
in_box.append(f)
break
# If any point of a feature falls within the bounding box, then the feature cannot contain the box,
# so this only needs to be run if the above if statement is not executed
feature_min_lon = min(feature_min_lon, lon)
feature_min_lat = min(feature_min_lat, lat)
feature_max_lon = max(feature_max_lon, lon)
feature_max_lat = max(feature_max_lat, lat)
# If the box containing a feature also contains the bounding box, keep this feature
# Allow adding more than one because otherwise MD contains boxes in WV, and CA would contain most of NV.
if feature_min_lat < min(box_lat) and feature_max_lat > max(box_lat) and \
feature_min_lon < min(box_lon) and feature_max_lon > max(box_lon):
in_box.append(f)
keepers = {
'type': 'FeatureCollection',
'features': in_box
}
return keepers
| 5,349,632 |
def rename_columns(table, mapper):
""" Renames the table headings to conform with the ketos naming convention.
Args:
table: pandas DataFrame
Annotation table.
mapper: dict
Dictionary mapping the headings of the input table to the
standard ketos headings.
Returns:
: pandas DataFrame
Table with new headings
"""
return table.rename(columns=mapper)
| 5,349,633 |
def randrange(start, stop, step: Optional[Any]) -> int:
"""
The first form returns a random integer from the range [0, *stop*).
The second form returns a random integer from the range [*start*, *stop*).
The third form returns a random integer from the range [*start*, *stop*) in
steps of *step*. For instance, calling ``randrange(1, 10, 2)`` will
return odd numbers between 1 and 9 inclusive.
"""
...
| 5,349,634 |
def get_round(year, match):
"""Get event number by year and (partial) event name
A fuzzy match is performed to find the most likely event for the provided name.
Args:
year (int): Year of the event
match (string): Name of the race or gp (e.g. 'Bahrain')
Returns:
The round number. (2019, 'Bahrain') -> 2
"""
def build_string(d):
r = len('https://en.wikipedia.org/wiki/') # TODO what the hell is this
c, l = d['Circuit'], d['Circuit']['Location'] # noqa: E741 (for now...)
return (f"{d['url'][r:]} {d['raceName']} {c['circuitId']} "
+ f"{c['url'][r:]} {c['circuitName']} {l['locality']} "
+ f"{l['country']}")
races = ergast.fetch_season(year)
to_match = [build_string(block) for block in races]
ratios = np.array([fuzz.partial_ratio(match, ref) for ref in to_match])
return int(races[np.argmax(ratios)]['round'])
| 5,349,635 |
def parse_tuple(s: Union[str, tuple]) -> tuple:
"""Helper for load_detections_csv, to parse string column into column of Tuples."""
if isinstance(s, str):
result = s.replace("(", "[").replace(")", "]")
result = result.replace("'", '"').strip()
result = result.replace(",]", "]")
if result:
# print(result)
return tuple(sorted((json.loads(result))))
else:
return tuple()
else:
return s
| 5,349,636 |
def clean(tweet):
"""
clean tweet text by removing links, special characters
using simple regex statements
Parameters
----------
tweet : String
Single Twitter message
Returns
-------
tokenized_tweet : List
List of cleaned tokens derived from the input Twitter message
"""
# convert to lower
tweet = tweet.lower()
# get the stop-words available from the nltk.corpus lib
# as the corpus would haver also delete a lot of negations from the tweets
# it is considered to use just a subset
stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours',
'ourselves', 'you', "you're", "you've", "you'll",
"you'd", 'your', 'yours', 'yourself', 'yourselves',
'he', 'him', 'his', 'himself', 'she', "she's", 'her',
'hers', 'herself', 'it', "it's", 'its', 'itself',
'they', 'them', 'their', 'theirs', 'themselves', 'what',
'which', 'who', 'whom', 'this', 'that', "that'll",
'these', 'those', 'am', 'is', 'are', 'was', 'were',
'be', 'been', 'being', 'have', 'has', 'had', 'having',
'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and',
'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between',
'into', 'through', 'during', 'before', 'after', 'above',
'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on',
'off', 'over', 'under', 'again', 'further', 'then', 'once',
'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such',
'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't',
'will', 'just', 'should', "should've", 'now', 'd', 'll',
'm', 'o', 're', 've', 'y', 'ain', 'ma', '.', ',', ';', '!', '?',
'@...', '@', '@…']
# convert to string again as re expects a string-like object (and not a list)
# remove all the stopwords as well as the numbers and words shorter than
# two letters also check the spelling
tmp = ""
tmp_c = [tmp +
item.replace(",","").replace(";","").replace("?","").replace("!","").replace("#","")
for item in tweet.split() if item not in stop_words
and not item.isdigit()]
tmp_c = " ".join(item for item in tmp_c)
# remove other special characters including @, URLs, Usernames and other
# special characters
return ' '.join(re.sub("(@[A-Za-z0-9]+)| M^|(\w+:\/\/\S+)",
" ",
tmp_c).split())
| 5,349,637 |
def predict(x, u):
"""
:param x: Particle state (x,y,theta) [size 3 array]
:param u: Robot inputs (u1,u2) [size 2 array]
:return: Particle's updated state sampled from the motion model
"""
x = x + motionModel(x, u) + np.random.multivariate_normal(np.zeros(3), Q)
return x
| 5,349,638 |
def plot_multiple(datasets, method='scatter', pen=True, labels=None, **kwargs):
"""
Plot a series of 1D datasets as a scatter plot
with optional lines between markers.
Parameters
----------
datasets : a list of ndatasets
method : str among [scatter, pen]
pen : bool, optional, default: True
if method is scatter, this flag tells to draw also the lines
between the marks.
labels : a list of str, optional
labels used for the legend.
**kwargs : other parameters that will be passed to the plot1D function
"""
if not is_sequence(datasets):
# we need a sequence. Else it is a single plot.
return datasets.plot(**kwargs)
if not is_sequence(labels) or len(labels) != len(datasets):
# we need a sequence of labels of same lentgh as datasets
raise ValueError('the list of labels must be of same length '
'as the datasets list')
for dataset in datasets:
if dataset._squeeze_ndim > 1:
raise NotImplementedError('plot multiple is designed to work on '
'1D dataset only. you may achieved '
'several plots with '
'the `clear=False` parameter as a work '
'around '
'solution')
# do not save during this plots, nor apply any commands
# we will make this when all plots will be done
output = kwargs.get('output', None)
kwargs['output'] = None
commands = kwargs.get('commands', [])
kwargs['commands'] = []
clear = kwargs.pop('clear', True)
legend = kwargs.pop('legend', None) # remove 'legend' from kwargs before calling plot
# else it will generate a conflict
for s in datasets: # , colors, markers):
ax = s.plot(method=method, pen=pen, marker='AUTO', color='AUTO', ls='AUTO', clear=clear, **kwargs)
clear = False # clear=False is necessary for the next plot to say # that we will plot on the same figure
# scale all plots
if legend is not None:
_ = ax.legend(ax.lines, labels, shadow=True, loc=legend, frameon=True, facecolor='lightyellow')
# now we can output the final figure
kw = {'output': output, 'commands': commands}
datasets[0]._plot_resume(datasets[-1], **kw)
return ax
| 5,349,639 |
def _scale_enum(anchor, scales):
""" 列举关于一个anchor的三种尺度 128*128,256*256,512*512
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor) #返回宽高和中心坐标,w:16,h:16,x_ctr:7.5,y_ctr:7.5
ws = w * scales #[128 256 512]
hs = h * scales #[128 256 512]
anchors = _mkanchors(ws, hs, x_ctr, y_ctr) #[[-56 -56 71 71] [-120 -120 135 135] [-248 -248 263 263]]
return anchors
| 5,349,640 |
def metamodel_from_file(file_name, **kwargs):
"""
Creates new metamodel from the given file.
Args:
file_name(str): The name of the file with textX language description.
other params: See metamodel_from_str.
"""
with codecs.open(file_name, 'r', 'utf-8') as f:
lang_desc = f.read()
metamodel = metamodel_from_str(lang_desc=lang_desc,
file_name=file_name,
**kwargs)
return metamodel
| 5,349,641 |
def _python(data):
"""Generate python in current directory
Args:
data (dict): simulation
Returns:
py.path.Local: file to append
"""
import sirepo.template
import copy
template = sirepo.template.import_module(data)
res = pkio.py_path('run.py')
res.write(template.python_source_for_model(copy.deepcopy(data), None))
return res
| 5,349,642 |
def load_hosts_conf(path='/etc/hosts'):
"""parse hosts file"""
hosts = {}
try:
with open(path, 'r') as f:
for line in f.readlines():
parts = line.strip().split()
if len(parts) < 2:
continue
addr = ip_address(parts[0])
if addr:
for hostname in parts[1:]:
if hostname:
hosts[hostname] = addr
except IOError as e:
hosts['localhost'] = '127.0.0.1'
return hosts
| 5,349,643 |
def get_first_env(*args):
"""
Return the first env var encountered from list
PLEASE NOTE: Always prefer using get_env, this helper is for app
transitioning to a new config structure.
Example:
get_first_env('DB_NAME', 'DATABASE_NAME')
"""
for name in args:
if name in os.environ:
return os.environ[name]
error_msg = "Missing any of these env vars {}".format(args)
raise ImproperlyConfigured(error_msg)
| 5,349,644 |
def pick(df, isnotnull=None, **kwargs):
"""Function to pick row indices from DataFrame.
Copied from kkpandas
This method provides a nicer interface to choose rows from a DataFrame
that satisfy specified constraints on the columns.
isnotnull : column name, or list of column names, that should not be null.
See pandas.isnull for a defintion of null
All additional kwargs are interpreted as {column_name: acceptable_values}.
For each column_name, acceptable_values in kwargs.items():
The returned indices into column_name must contain one of the items
in acceptable_values.
If acceptable_values is None, then that test is skipped.
Note that this means there is currently no way to select rows that
ARE none in some column.
If acceptable_values is a single string or value (instead of a list),
then the returned rows must contain that single string or value.
TODO:
add flags for string behavior, AND/OR behavior, error if item not found,
return unique, ....
"""
msk = np.ones(len(df), dtype=np.bool)
for key, val in list(kwargs.items()):
if val is None:
continue
elif is_nonstring_iter(val):
msk &= df[key].isin(val)
else:
msk &= (df[key] == val)
if isnotnull is not None:
# Edge case
if not is_nonstring_iter(isnotnull):
isnotnull = [isnotnull]
# Filter by not null
for key in isnotnull:
msk &= -pandas.isnull(df[key])
return df.index[msk]
| 5,349,645 |
def _waveform_distortion(waveform, distortion_methods_conf):
""" Apply distortion on waveform
This distortion will not change the length of the waveform.
Args:
waveform: numpy float tensor, (length,)
distortion_methods_conf: a list of config for ditortion method.
a method will be randomly selected by 'method_rate' and
apply on the waveform.
Returns:
distorted waveform.
"""
r = random.uniform(0, 1)
acc = 0.0
for distortion_method in distortion_methods_conf:
method_rate = distortion_method['method_rate']
acc += method_rate
if r < acc:
distortion_type = distortion_method['name']
distortion_conf = distortion_method['params']
point_rate = distortion_method['point_rate']
return distort_wav_conf(waveform, distortion_type,
distortion_conf , point_rate)
return waveform
| 5,349,646 |
def raw(files = 'english-kjv'):
"""
@param files: One or more treebank files to be processed
@type files: L{string} or L{tuple(string)}
@rtype: iterator over L{tree}
"""
# Just one file to process? If so convert to a tuple so we can iterate
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "genesis", file+".txt")
s = open(path).read()
for t in tokenize.whitespace(s):
yield t
| 5,349,647 |
def grouper(n, iterable):
""" Browse an iterator by chunk of n elements """
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
| 5,349,648 |
def plot_with_lambdas(linewidth=2.0,image_extension='svg'):
""" Function to plot F with lambduhs for various snapshots.
Parameters
----------
linewidth : float, optional
The font size of the lines in the plot. This is set to 2.0 by default.
image_extension : string, optional
Specifies the extension of the plot file to be saved. The default image
type is '.svg'
Returns
-------
Nothing. The plot is written to file.
Examples
--------
>>> deltas = [0.01,0.025,0.05,1.0]
>>> for d in deltas:
... estrangement.ECA(dataset_dir='../data',delta=d,increpeats=opt.increpeats,minrepeats=opt.minrepeats)
>>> plot_with_lambdas()
"""
with open("Fdetails.log", 'r') as Fdetails_file:
Fdetails_dict = eval(Fdetails_file.read()) # {time: {lambda: {run_number: F}}}
with open("Qdetails.log", 'r') as Qdetails_file:
Qdetails_dict = eval(Qdetails_file.read()) # {time: {lambda: {run_number: Q}}}
with open("Edetails.log", 'r') as Edetails_file:
Edetails_dict = eval(Edetails_file.read()) # {time: {lambda: {run_number: E}}}
with open("lambdaopt.log", 'r') as f:
lambdaopt_dict = eval(f.read()) # {time: lambdaopt}
with open("best_feasible_lambda.log", 'r') as f:
best_feasible_lambda_dict = eval(f.read()) # {time: best_feasible_lambda}
with open("Q.log", 'r') as f:
Q_dict = eval(f.read()) # {time: lambdaopt}
with open("F.log", 'r') as f:
F_dict = eval(f.read()) # {time: lambdaopt}
for t in sorted(Fdetails_dict.keys()):
Flam = Fdetails_dict[t]
Qlam = Qdetails_dict[t]
Elam = Edetails_dict[t]
dictX = collections.defaultdict(list)
dictY = collections.defaultdict(list)
dictErr = collections.defaultdict(list)
for l in sorted(Flam.keys()):
dictX['Q'].append(l)
dictY['Q'].append(max(Qlam[l].values()))
dictErr['Q'].append( confidence_interval(Qlam[l].values()) )
dictX['F'].append(l)
dictY['F'].append(max(Flam[l].values()))
dictErr['F'].append( confidence_interval(Flam[l].values()) )
ax2 = postpro.plot_by_param(dictX, dictY, listLinestyles=['b-', 'g-', 'r-',],
xlabel="$\lambda$", ylabel="Dual function", title="Dual function at t=%s"%(str(t)),
dictErr=dictErr)
ax2.axvline(x=lambdaopt_dict[t], color='m', linewidth=linewidth,
linestyle='--', label="$\lambda_{opt}$")
ax2.axvline(x=best_feasible_lambda_dict[t], color='k', linewidth=linewidth,
linestyle='--', label="best feasible $\lambda$")
ax2.axhline(F_dict[t], color='b', linewidth=linewidth,
linestyle='--', label="best feasible F")
ax2.axhline(Q_dict[t], color='g', linewidth=linewidth,
linestyle='--', label="best feasible Q")
pyplot.legend()
pyplot.savefig('with_lambda_at_t%s.%s'%(str(t), image_extension))
| 5,349,649 |
def _pad_returns(returns):
"""
Pads a returns Series or DataFrame with business days, in case the
existing Date index is sparse (as with PNL csvs). Sparse indexes if not
padded will affect the Sharpe ratio because the 0 return days will not be
included in the mean and std.
"""
bdays = pd.date_range(start=returns.index.min(), end=returns.index.max(),freq="B")
idx = returns.index.union(bdays)
return returns.reindex(index=idx).fillna(0)
| 5,349,650 |
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
#if r.headers["Content-Type"] !="application/json" and r.status_code!=304:
# print(str(r.status_code)+" -",end="")
return r
| 5,349,651 |
def man_page():
"""Print manual page-like help"""
print("""
USAGE
image_purge.py [OPTIONS]
OPTIONS
-d, --dry-run
Report but don't delete any images.
-h, --help
Print a brief help.
--man
Print man page-like help.
-v, --verbose
Print information messages to stdout.
--vv,
More verbose. Print debug messages to stdout.
""")
| 5,349,652 |
def rename_photos(old_name, folder_path, taken_date, file_extension):
"""Function reaname photo files names to their dates"""
new_name = folder_path + "/" + taken_date + "." + file_extension
os.rename(old_name, new_name)
| 5,349,653 |
def pair_sorter(aln):
"""Get the alignment name and attributes for sorting."""
return (
aln.name,
not aln.first_in_pair,
aln.unmapped,
aln.supplementary_alignment,
aln.secondary_alignment)
| 5,349,654 |
def _read_input_from(input_from):
""" Reads the labels from the input from. """
inputs = []
for input_from_line in input_from.splitlines():
# Skip if line is empty.
if input_from_line.strip() == '':
continue
# Load file content
print(f"::debug::Loading labels from '{input_from_line}'.")
input_from_content = None
if input_from_line.startswith('http://') or input_from_line.startswith('https://'):
requests_url_response = requests.get(input_from)
if requests_url_response.ok:
input_from_content = requests_url_response.text
else:
raise Exception(f'Unable to read file from {input_from}: {requests_url_response.reason}')
else:
with open(input_from_line, 'r') as input_from_file:
input_from_content = input_from_file.read()
if input_from_line.endswith('.yaml') or input_from_line.endswith('.yml'):
inputs.extend(yaml.load(input_from_content, Loader=yaml.FullLoader))
if input_from_line.endswith('.json'):
inputs.extend(json.loads(input_from_content))
return inputs
| 5,349,655 |
def project_to2d(
pts: np.ndarray, K: np.ndarray, R: np.ndarray, t: np.ndarray
) -> np.ndarray:
"""Project 3d points to 2d.
Projects a set of 3-D points, pts, into 2-D using the camera intrinsic
matrix (K), and the extrinsic rotation matric (R), and extrinsic
translation vector (t). Note that this uses the matlab
convention, such that
M = [R;t] * K, and pts2d = pts3d * M
"""
M = np.concatenate((R, t), axis=0) @ K
projPts = np.concatenate((pts, np.ones((pts.shape[0], 1))), axis=1) @ M
projPts[:, :2] = projPts[:, :2] / projPts[:, 2:]
return projPts
| 5,349,656 |
def abort_cannot_update(_id, _type):
"""Abort the request if the entity cannot be updated."""
abort(400,
message="Cannot update {} {}. Please try again.".format(_type, _id))
| 5,349,657 |
def enditall():
"""Terminate program"""
sys.exit(0)
| 5,349,658 |
def uncontract_general(basis, use_copy=True):
"""
Removes the general contractions from a basis set
The input basis set is not modified. The returned basis
may have functions with coefficients of zero and may have duplicate
shells.
If use_copy is True, the input basis set is not modified.
"""
if use_copy:
basis = copy.deepcopy(basis)
for k, el in basis['elements'].items():
if not 'electron_shells' in el:
continue
newshells = []
for sh in el['electron_shells']:
# See if we actually have to uncontract
# Also, don't uncontract sp, spd,.... orbitals
# (leave that to uncontract_spdf)
if len(sh['coefficients']) == 1 or len(sh['angular_momentum']) > 1:
newshells.append(sh)
else:
if len(sh['angular_momentum']) == 1:
for c in sh['coefficients']:
# copy, them replace 'coefficients'
newsh = sh.copy()
newsh['coefficients'] = [c]
newshells.append(newsh)
el['electron_shells'] = newshells
# If use_basis is True, we already made our deep copy
return prune_basis(basis, False)
| 5,349,659 |
def read_sd15ch1_images(root_dir,
image_relative_path_seq,
resize=None,
color=False):
"""
WARNING
-------
- All images must have the same shape (this is the case for the frames, and all models but the
ones of the "01-original" category).
- Loading many images at one can quickly fill up your RAM.
Returns
-------
- np.array((number_of_images, images_height, images_width)) if `color` is `False`
- np.array((number_of_images, images_height, images_width, image_channels)) otherwise.
"""
# Read first image, if any, to get image shape
# Note: all images must have the same shape
if len(image_relative_path_seq) == 0:
return np.array([])
# We have a least 1 element
img0 = read_sd15ch1_image(root_dir, image_relative_path_seq[0], resize, color)
# allocate some contiguous memory to host the decoded images
dim_axis0 = (len(image_relative_path_seq), ) # make it a tuple
dim_axis_others = img0.shape
imgs_shape = dim_axis0 + dim_axis_others
__info("About to allocate %d bytes for an array of shape %s." % (np.prod(imgs_shape) * 4, imgs_shape))
imgs = np.zeros(imgs_shape, dtype=np.float32)
# Handle first image
imgs[0, ...] = img0
# Loop over other images
for ii, rel_path in enumerate(image_relative_path_seq[1:], start=1):
imgi = read_sd15ch1_image(root_dir, rel_path, resize, color)
if imgi.shape != dim_axis_others:
__err("All images must have the same shape. Inconsistent dataset. Aborting loading.", RuntimeError)
imgs[ii, ...] = imgi
return imgs
| 5,349,660 |
def generate_features(plz_ags,
boundary_type,
int_buildings_path,
pri_buildings_path):
"""
Scan all available PLZ/AGS in the region.
Populate PLZ/AGS building objects with data from region OSM dump (Geofabrik)
Args:
plz_ags: list of AGS in the region
boundary_type: PLZ or AGS code
int_buildings_path: output save to 02_intermediate
pri_buildings_path: output save to 03_primary
"""
k = 0
# create saving location folder if not exists
if not os.path.exists(pri_buildings_path):
os.makedirs(pri_buildings_path)
# Check for progress of already done areas
name_list = os.listdir(pri_buildings_path)
id_list = [x.split('.')[0].split('_')[2] for x in name_list if 'buildings' in x]
# Get list of AGS codes
plz_ags = plz_ags[[boundary_type]]
plz_ags = pd.DataFrame(np.setdiff1d(plz_ags, id_list), columns=[boundary_type])
logging.info(f'Total of {len(plz_ags)} {boundary_type}(s) in the country')
while k < len(plz_ags):
boundary_id = plz_ags[boundary_type].iloc[k]
buildings_path = f'{int_buildings_path}/buildings_{boundary_type}_{boundary_id}.csv'
try:
# Read in building objects data in the area
df = pd.read_csv(buildings_path,
dtype={'tags.addr:suburb': 'object',
'tags.building:levels': 'object',
'tags.source': str,
'postcode': str}, # supposed to be AGS
converters={"nodes": lambda x: x.strip("[]").split(", ")}) # read column as list
# Filter out NaN
df = df[df.geometry.isna() == False].reset_index(drop=True)
# Convert geometry to GeoSeries
df['geometry'] = df['geometry'].apply(wkt.loads)
# Convert to GeoPandas type
df_geo = GeoDataFrame(df, geometry='geometry')
# Shape & Size
df_geo[['surface_area','rectangularity']] = df_geo.apply(lambda row: pd.Series(shape_size(row['geometry'])),axis=1)
# Total area
df_geo['total_area'] = df_geo['building_levels'].astype(int) * df_geo['surface_area']
# Save result to 02_intermediate/buildings_plz/buildings_<boundary_type>_<boundary_id>.csv
logging.info(f'Total of {len(df)} buildings in {boundary_type} {boundary_id} at position {k+1}/{len(plz_ags)}. Saving result...')
# Save result
df_geo.to_csv(f'{pri_buildings_path}/buildings_{boundary_type}_{boundary_id}.csv', index=False)
except Exception as e:
logging.warning(f'Cannot enhance data on {boundary_type} {boundary_id} at position {k+1}/{len(plz_ags)}. Error: {e}')
finally:
k = k + 1
| 5,349,661 |
def is_private_bool(script_dict):
""" Returns is_private boolean value from user dictionary object """
return script_dict['entry_data']['ProfilePage'][0]['graphql']['user']['is_private']
| 5,349,662 |
def manage_greylist(request):
"""
View for managing greylist.
"""
message = None
if request.method == 'POST':
form = GreylistForm(request.POST)
if form.is_valid():
# Set details to empty string if blank
new_greylisted_guest = form.save(commit=False)
new_greylisted_guest.addedBy = request.user
new_greylisted_guest.save()
message = 'Successfully added entry to greylist'
else:
message = 'Error adding entry to greylist'
else:
form = GreylistForm()
context = {
'greylist': [
(
greylisting,
user_can_delete_greylisting(request.user, greylisting),
)
for greylisting in GreylistedGuest.objects.all().order_by('name')
],
'message': message,
'form': form,
}
return render(request, 'parties/greylist/manage.html', context)
| 5,349,663 |
def get_regions():
"""Summary
Returns:
TYPE: Description
"""
client = boto3.client('ec2')
region_response = client.describe_regions()
regions = [region['RegionName'] for region in region_response['Regions']]
return regions
| 5,349,664 |
def _is_ignored_read_event(request):
"""Return True if this read event was generated by an automated process, as
indicated by the user configurable LOG_IGNORE* settings.
See settings_site.py for description and rationale for the settings.
"""
if (
django.conf.settings.LOG_IGNORE_TRUSTED_SUBJECT
and d1_gmn.app.auth.is_trusted_subject(request)
):
return True
if (
django.conf.settings.LOG_IGNORE_NODE_SUBJECT
and d1_gmn.app.auth.is_client_side_cert_subject(request)
):
return True
if _has_regex_match(
request.META["REMOTE_ADDR"], django.conf.settings.LOG_IGNORE_IP_ADDRESS
):
return True
if _has_regex_match(
request.META.get("HTTP_USER_AGENT", "<not provided>"),
django.conf.settings.LOG_IGNORE_USER_AGENT,
):
return True
if _has_regex_match(
request.primary_subject_str, django.conf.settings.LOG_IGNORE_SUBJECT
):
return True
return False
| 5,349,665 |
def test_compiler_bootstrap_from_binary_mirror(
install_mockery_mutable_config, mock_packages, mock_fetch,
mock_archive, mutable_config, monkeypatch, tmpdir):
"""
Make sure installing compiler from buildcache registers compiler
"""
# Create a temp mirror directory for buildcache usage
mirror_dir = tmpdir.join('mirror_dir')
mirror_url = 'file://{0}'.format(mirror_dir.strpath)
# Install a compiler, because we want to put it in a buildcache
install('[email protected]')
# Put installed compiler in the buildcache
buildcache(
'create', '-u', '-a', '-f', '-d', mirror_dir.strpath, '[email protected]'
)
# Now uninstall the compiler
uninstall('-y', '[email protected]')
monkeypatch.setattr(spack.concretize.Concretizer,
'check_for_compiler_existence', False)
spack.config.set('config:install_missing_compilers', True)
assert CompilerSpec('[email protected]') not in compilers.all_compiler_specs()
# Configure the mirror where we put that buildcache w/ the compiler
mirror('add', 'test-mirror', mirror_url)
# Now make sure that when the compiler is installed from binary mirror,
# it also gets configured as a compiler. Test succeeds if it does not
# raise an error
install('--no-check-signature', '--cache-only', '--only',
'dependencies', 'b%[email protected]')
install('--no-cache', '--only', 'package', 'b%[email protected]')
| 5,349,666 |
def aten_embedding(mapper, graph, node):
""" 构造embedding的PaddleLayer。
TorchScript示例:
%inputs_embeds.1 : Tensor = aten::embedding(%57, %input_ids.1, %45, %46, %46)
参数含义:
%inputs_embeds.1 (Tensor): 输出,embedding后的结果。
%57 (Tensor): weights。
%input_ids.1 (Tensor): 需要进行embedding的特征层。
%45 (int): padding_idx。
%46 (bool): scale_grad_by_freq。
%46 (bool): sparse。
"""
scope_name = mapper.normalize_scope_name(node)
op_name = name_generator("embedding", mapper.nn_name2id)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [op_name, output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%57
weights = mapper.pytorch_params[inputs_name[0]]
mapper.paddle_params[op_name + ".weight"] = weights
layer_attrs["num_embeddings"] = weights.shape[0]
layer_attrs["embedding_dim"] = weights.shape[1]
# 处理输入1,即%input_ids.1
mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name)
layer_inputs["input"] = inputs_name[1]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
# 处理输入2,即%45
if mapper.attrs[inputs_name[2]] == -1:
layer_attrs["padding_idx"] = None
else:
layer_attrs["padding_idx"] = mapper.attrs[inputs_name[2]]
# 处理输入4,即%46
layer_attrs["sparse"] = mapper.attrs[inputs_name[4]]
graph.add_layer(
"paddle.nn.Embedding",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs
| 5,349,667 |
def build_drivers(compilation_commands, linker_commands, kernel_src_dir,
target_arch, clang_path, llvm_link_path, llvm_bit_code_out, is_clang_build):
"""
The main method that performs the building and linking of the driver files.
:param compilation_commands: Parsed compilation commands from the json.
:param linker_commands: Parsed linker commands from the json.
:param kernel_src_dir: Path to the kernel source directory.
:param target_arch: Number representing target architecture.
:param clang_path: Path to clang.
:param llvm_link_path: Path to llvm-link
:param llvm_bit_code_out: Folder where all the linked bitcode files should be stored.
:param is_clang_build: Flag to indicate that this is a clang build.
:return: True
"""
output_llvm_sh_file = os.path.join(llvm_bit_code_out, 'llvm_build.sh')
fp_out = open(output_llvm_sh_file, 'w')
fp_out.write("#!/bin/bash\n")
log_info("Writing all compilation commands to", output_llvm_sh_file)
all_compilation_commands = []
obj_bc_map = {}
for curr_compilation_command in compilation_commands:
if is_clang_build:
wd, obj_file, bc_file, build_str = _get_llvm_build_str_from_llvm(clang_path, curr_compilation_command.curr_args,
kernel_src_dir, target_arch,
curr_compilation_command.work_dir,
curr_compilation_command.src_file,
curr_compilation_command.output_file,
llvm_bit_code_out)
else:
wd, obj_file, bc_file, build_str = _get_llvm_build_str(clang_path, curr_compilation_command.curr_args,
kernel_src_dir, target_arch,
curr_compilation_command.work_dir,
curr_compilation_command.src_file,
curr_compilation_command.output_file, llvm_bit_code_out)
all_compilation_commands.append((wd, build_str))
obj_bc_map[obj_file] = bc_file
fp_out.write("cd " + wd + ";" + build_str + "\n")
fp_out.close()
log_info("Got", len(all_compilation_commands), "compilation commands.")
log_info("Running compilation commands in multiprocessing modea.")
p = Pool(cpu_count())
return_vals = p.map(run_program_with_wd, all_compilation_commands)
log_success("Finished running compilation commands.")
output_llvm_sh_file = os.path.join(llvm_bit_code_out, 'llvm_link_cmds.sh')
fp_out = open(output_llvm_sh_file, 'w')
fp_out.write("#!/bin/bash\n")
log_info("Writing all linker commands to", output_llvm_sh_file)
all_linker_commands = []
recursive_linker_commands = []
for curr_linked_command in linker_commands:
curr_ret_val = _get_llvm_link_str(llvm_link_path, kernel_src_dir,
curr_linked_command.input_files, obj_bc_map,
curr_linked_command.output_file,
curr_linked_command.work_dir, llvm_bit_code_out)
if curr_ret_val is not None:
wd, obj_file, bc_file, build_str = curr_ret_val
all_linker_commands.append((wd, build_str))
obj_bc_map[obj_file] = bc_file
fp_out.write("cd " + wd + ";" + build_str + "\n")
else:
# these are recursive linker commands.
recursive_linker_commands.append(curr_linked_command)
log_info("Got", len(all_linker_commands), "regular linker commands.")
log_info("Running linker commands in multiprocessing mode.")
p = Pool(cpu_count())
return_vals = p.map(run_program_with_wd, all_linker_commands)
log_success("Finished running linker commands.")
if len(recursive_linker_commands) > 0:
log_info("Got", len(recursive_linker_commands), " recursive linker commands.")
_process_recursive_linker_commands(recursive_linker_commands, kernel_src_dir, llvm_link_path,
llvm_bit_code_out, obj_bc_map, fp_out)
fp_out.close()
return True
| 5,349,668 |
def _validate_opts(opts):
"""
Check that all of the types of values passed into the config are
of the right types
"""
def format_multi_opt(valid_type):
try:
num_types = len(valid_type)
except TypeError:
# Bare type name won't have a length, return the name of the type
# passed.
return valid_type.__name__
else:
def get_types(types, type_tuple):
for item in type_tuple:
if isinstance(item, tuple):
get_types(types, item)
else:
try:
types.append(item.__name__)
except AttributeError:
log.warning(
"Unable to interpret type %s while validating "
"configuration",
item,
)
types = []
get_types(types, valid_type)
ret = ", ".join(types[:-1])
ret += " or " + types[-1]
return ret
errors = []
err = (
"Config option '{0}' with value {1} has an invalid type of {2}, a "
"{3} is required for this option"
)
for key, val in opts.items():
if key in VALID_OPTS:
if val is None:
if VALID_OPTS[key] is None:
continue
else:
try:
if None in VALID_OPTS[key]:
continue
except TypeError:
# VALID_OPTS[key] is not iterable and not None
pass
if isinstance(val, VALID_OPTS[key]):
continue
if hasattr(VALID_OPTS[key], "__call__"):
try:
VALID_OPTS[key](val)
if isinstance(val, (list, dict)):
# We'll only get here if VALID_OPTS[key] is str or
# bool, and the passed value is a list/dict. Attempting
# to run int() or float() on a list/dict will raise an
# exception, but running str() or bool() on it will
# pass despite not being the correct type.
errors.append(
err.format(
key, val, type(val).__name__, VALID_OPTS[key].__name__
)
)
except (TypeError, ValueError):
errors.append(
err.format(
key, val, type(val).__name__, VALID_OPTS[key].__name__
)
)
continue
errors.append(
err.format(
key, val, type(val).__name__, format_multi_opt(VALID_OPTS[key])
)
)
# Convert list to comma-delimited string for 'return' config option
if isinstance(opts.get("return"), list):
opts["return"] = ",".join(opts["return"])
for error in errors:
log.warning(error)
if errors:
return False
return True
| 5,349,669 |
async def from_string(input, output_path=None, options=None):
"""
Convert given string or strings to PDF document
:param input: string with a desired text. Could be a raw text or a html file
:param output_path: (optional) path to output PDF file. If not provided,
PDF will be returned as string
:param options: (optional) dict to configure pyppeteer page.pdf action
Returns: output_path if provided else PDF Binary
"""
sources = Source(input, 'string')
r = PDFMate(sources, options=options)
return await r.to_pdf(output_path)
| 5,349,670 |
def parse_tuple(tuple_string):
"""
strip any whitespace then outter characters.
"""
return tuple_string.strip().strip("\"[]")
| 5,349,671 |
def create_size():
"""Create a new size."""
in_out_schema = SizeSchema()
try:
new_size = in_out_schema.load(request.json)
except ValidationError as err:
abort(400, {'message': err.messages})
try:
db.session.add(new_size)
db.session.commit()
except IntegrityError as err:
db.session.rollback()
log.exception(err)
abort(400, {'message': 'Data integrity violated.'})
return in_out_schema.jsonify(new_size)
| 5,349,672 |
def inject_snakefmt_config(
ctx: click.Context, param: click.Parameter, config_file: Optional[str] = None
) -> Optional[str]:
"""
If no config file argument provided, parses "pyproject.toml" if one exists.
Injects any parsed configuration into the relevant parameters to the click `ctx`.
"""
if config_file is None:
config_file = find_pyproject_toml(ctx.params.get("src", ()))
config = read_snakefmt_config(config_file)
if ctx.default_map is None:
ctx.default_map = {}
ctx.default_map.update(config) # type: ignore # bad types in .pyi
return config_file
| 5,349,673 |
def test_screen_methods_exist():
""" Test that a couple of methods exist, but don't do anything. """
t = MockTurtle()
t.screen.tracer()
t.screen.update()
| 5,349,674 |
def sort_by_ctime(paths):
"""Sorts list of file paths by ctime in ascending order.
Arg:
paths: iterable of filepaths.
Returns:
list: filepaths sorted by ctime or empty list if ctime is unavailable.
"""
ctimes = list(map(safe_ctime, paths))
if not all(ctimes) or len(set(ctimes)) <= 1:
return []
else:
return sorted(paths, key=lambda fp: safe_ctime(fp))
| 5,349,675 |
def show(root=None, debug=False, parent=None):
"""Display Scene Inventory GUI
Arguments:
debug (bool, optional): Run in debug-mode,
defaults to False
parent (QtCore.QObject, optional): When provided parent the interface
to this QObject.
"""
try:
module.window.close()
del module.window
except (RuntimeError, AttributeError):
pass
if debug:
import traceback
sys.excepthook = lambda typ, val, tb: traceback.print_last()
with tools_lib.application():
window = Window(parent)
window.show()
window.setStyleSheet(style.load_stylesheet())
window.refresh()
module.window = window
# Pull window to the front.
module.window.raise_()
module.window.activateWindow()
| 5,349,676 |
def get_ospf_metric(device,
destination_address):
"""Get OSPF metric
Args:
device (obj): Device object
destination_address (str): Destination address
"""
out = device.parse('show route')
# Example dictionary
# "route-table": [
# {
# "active-route-count": "0",
# "destination-count": "0",
# "hidden-route-count": "0",
# "holddown-route-count": "0",
# "rt": [
# {
# "metric": "101",
# }
# },
rt_list = Dq(out).get_values('rt')
for rt_dict in rt_list:
rt_destination_ = Dq(rt_dict).get_values("rt-destination", 0)
if not isinstance(rt_destination_, list):
if rt_destination_.startswith(str(destination_address)):
metric_ = Dq(rt_dict).get_values('metric', 0)
if not metric_:
continue
return metric_
return None
| 5,349,677 |
def telegram_tcp_blocking_all(ooni_exe, outfile):
""" Test case where all POPs are TCP/IP blocked """
start_test("telegram_tcp_blocking_all")
args = args_for_blocking_all_pop_ips()
tk = execute_jafar_and_return_validated_test_keys(ooni_exe, outfile, args)
assert tk["telegram_tcp_blocking"] == True
assert tk["telegram_http_blocking"] == True
assert tk["telegram_web_failure"] == None
assert tk["telegram_web_status"] == "ok"
for entry in tk["tcp_connect"]:
assert entry["status"]["failure"] == (
"connection_refused" if entry["ip"] in ALL_POP_IPS else None
)
for entry in tk["requests"]:
url = urllib.parse.urlsplit(entry["request"]["url"])
assert entry["failure"] == (
"connection_refused" if url.hostname in ALL_POP_IPS else None
)
| 5,349,678 |
def get_episode_url():
"""エピソードの配信URLを追加
Returns:
[type]: [description]
"""
# フォームの値を取得
episode_num = "#"+request.form['episode_num'][0]
print(episode_num)
# 配信先一覧を取得
podcasts = Podcast.query.all()
broadcasts = Broadcast.query.all()
# 配信先 url
broadcast_urls = {}
for br in broadcasts:
broadcast_urls[br.broadcast_service] = br.broadcast_url
# エピソードのurlを取得
episode_urls = get_episode_url_all(broadcast_urls, episode_num)
return render_template(
'podcasts.html',
podcasts=podcasts,
broadcasts=broadcasts,
episode_num=episode_num,
episode_urls=episode_urls
)
| 5,349,679 |
def autofs():
"""Fixture data from /proc/mounts."""
data = "flux-support -rw,tcp,hard,intr,noacl,nosuid,vers=3,retrans=5 flux-support.locker.arc-ts.umich.edu:/gpfs/locker0/ces/g/nfs/f/flux-support\numms-remills -rw,tcp,hard,intr,noacl,nosuid,vers=3,retrans=5 umms-remills.locker.arc-ts.umich.edu:/gpfs/locker0/ces/g/nfs/u/umms-remills"
return data
| 5,349,680 |
def flag_dims(flags):
"""Return flag names, dims, and initials for flags.
Only flag value that correspond to searchable dimensions are
returned. Scalars and non-function string values are not included
in the result.
"""
dims = {}
initials = {}
for name, val in flags.items():
try:
flag_dim, initial = _flag_dim(val, name)
except ValueError:
pass
else:
dims[name] = flag_dim
initials[name] = initial
names = sorted(dims)
return (names, [dims[name] for name in names], [initials[name] for name in names])
| 5,349,681 |
def where(cmd, path=None):
"""
A function to wrap shutil.which for universal usage
"""
raw_result = shutil.which(cmd, os.X_OK, path)
if raw_result:
return os.path.abspath(raw_result)
else:
raise ValueError("Could not find '{}' in the path".format(cmd))
| 5,349,682 |
def to_stack(df, col, by, transform=None, get_cats=False):
""" Convert columns of a dataframe to a list of lists by 'by'
Args:
df:
col:
by:
transform:
Returns:
"""
g = df.groupby(by)
transform = _notransform if transform is None else transform
x_data = []
for gr in g.groups:
x_data.append(transform(g.get_group(gr)[col].values))
cats = np.array([gg for gg in g.groups])
x_len = np.array([len(x) for x in x_data])
inds = x_len.argsort()
# print(cats)
# print(inds)
if get_cats:
return [x_data[i] for i in inds], cats[inds]
return [x_data[i] for i in inds]
| 5,349,683 |
def dump_vfunc(path, vfunc_list, encoding="UTF-8"):
"""Dump vertical functions in Paradigm Echos VFUNC format to a file.
Each passed VFUNC is a tuple with 4 elements: `inline`, `crossline`, `x` and `y`, where `x` and `y` are 1d
`np.ndarray`s with the same length. For each VFUNC a block with the following strcture is created in the resulting
file:
- The first row contains 3 values: VFUNC [inline] [crossline],
- All other rows represent pairs of `x` and corresponding `y` values: [x1] [y1] [x2] [y2] ...
Each row contains 4 pairs, except for the last one, which may contain less. Each value is left aligned with the
field width of 8.
Block example:
VFUNC 22 33
17 1546 150 1530 294 1672 536 1812
760 1933 960 2000 1202 2148 1374 2251
1574 2409 1732 2517 1942 2675
Parameters
----------
path : str
A path to the created file.
vfunc_list : iterable of tuples with 4 elements
Each tuple corresponds to a vertical function and consists of the following values: `inline`, `crossline`,
`x` and `y`, where `x` and `y` are 1d `np.ndarray`s with the same length.
encoding : str, optional, defaults to "UTF-8"
File encoding.
"""
with open(path, "w", encoding=encoding) as f:
for inline, crossline, x, y in vfunc_list:
f.write(f"{'VFUNC':8}{inline:<8}{crossline:<8}\n")
data = np.column_stack([x, y]).ravel()
rows = np.split(data, np.arange(8, len(data), 8))
for row in rows:
f.write("".join(f"{i:<8.0f}" for i in row) + "\n")
| 5,349,684 |
def detect_entry_signals(context):
"""
Place limit orders on 20 or 55 day breakout.
"""
for market in context.prices.items:
context.price = context.prices[market].close[-1]
if context.price > context.twenty_day_high[market]\
or context.price > context.fifty_five_day_high[market]:
if is_trade_allowed(context, market, context.long_direction):
order_identifier = order(
context.contract,
context.trade_size[market],
style=LimitOrder(context.price)
)
if order_identifier is not None:
context.orders[market].append(order_identifier)
if context.is_info:
log.info(
'Long %s %i@%.2f'
% (
market.root_symbol,
context.trade_size[market],
context.price
)
)
if context.price < context.twenty_day_low[market]\
or context.price < context.fifty_five_day_low[market]:
if is_trade_allowed(context, market, context.short_direction):
order_identifier = order(
context.contract,
-context.trade_size[market],
style=LimitOrder(context.price)
)
if order_identifier is not None:
context.orders[market].append(order_identifier)
if context.is_info:
log.info(
'Short %s %i@%.2f'
% (
market.root_symbol,
context.trade_size[market],
context.price
)
)
| 5,349,685 |
def makeSDEnum(name, val): # real signature unknown; restored from __doc__
"""
makeSDEnum(name, val)
Make a structured object out of an enumeration value
"""
pass
| 5,349,686 |
def entries_repr(entries: List[Metadata]) -> str:
"""
Generates a nicely formatted string repr from a list of Dropbox metadata.
:param entries: List of Dropbox metadata.
:returns: String representation of the list.
"""
str_reps = [
f"<{e.__class__.__name__}(path_display={e.path_display})>" for e in entries
]
return "[" + ",\n ".join(str_reps) + "]"
| 5,349,687 |
def frequency_encode(dftrain, dftest, columnlist, output_type="include"):
"""
Frequency encode columns in columnlist.
Parameters:
dftrain: [DataFrame] train set
dftest: [DataFrame] test set
columnlist: [list] columns to encode.
output_type: [str], default="include" will include the columns in the same dataframes.
If "separate", returns separate dataframes.
Returns:
dftrain_freq: [DataFrame] train
dftest_freq: [DataFrame] test
Author: kmp
"""
if output_type is "include":
for col in columnlist:
col_freqs = dftrain.fillna({col:'NA'})[col].value_counts(normalize=True)
dftrain[col+'_freq'] = dftrain.fillna({col:'NA'})[col].map(col_freqs)
dftest[col+'_freq'] = dftest.fillna({col:'NA'})[col].map(col_freqs).fillna(0)
dftrain_freq = dftrain
dftest_freq = dftest
else:
dftrain_freq = pd.DataFrame(index=dftrain.index)
dftest_freq = pd.DataFrame(index=dftest.index)
for col in columnlist:
col_freqs = dftrain.fillna({col:'NA'})[col].value_counts(normalize=True)
dftrain_freq[col+'_freq'] = dftrain.fillna({col:'NA'})[col].map(col_freqs)
dftest_freq[col+'_freq'] = dftest.fillna({col:'NA'})[col].map(col_freqs).fillna(0)
return dftrain_freq, dftest_freq
| 5,349,688 |
def get_ranked_results(completed_rounds):
"""
For the rounds given in completed_rounds, calculate the total score for each team.
Then all teams are sorted on total score and are given a ranking to allow for ex aequo scores.
"""
results = []
for team in QTeam.objects.all():
teamtotal = 0
for a in team.qanswer_set.all():
# Only add results for complete rounds
if a.rnd in completed_rounds:
teamtotal += a.score
results.append((team.team_name, teamtotal))
# Sort the results
sorted_results = sorted(results, reverse=True, key=lambda tup: tup[1])
rank, count, previous, ranking = 0, 0, None, []
for key, num in sorted_results:
count += 1
if num != previous:
rank += count
previous = num
count = 0
ranking.append((rank, key, num))
return ranking
| 5,349,689 |
def setup_logging(loglevel):
"""Set up basic logging.
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(message)s"
logging.basicConfig(
level=loglevel, stream=sys.stdout, format=logformat, datefmt="%Y-%m-%d %H:%M:%S"
)
| 5,349,690 |
def get_subgraphs():
"""
Returns a list of lists. Each list is a subgraph (represented as a list of dictionaries).
:return: A list of lists of dictionaries.
"""
subgraph_list = [c.get("color") for c in classes if c.get("color") is not None]
subgraphs = []
# Add to subgraphs all the lists of actual subgraphs
for c in subgraph_list:
sub = [cl for cl in classes if cl.get("color") == c and cl]
if sub not in subgraphs:
subgraphs.append(sub)
# Now add to subgraphs all the items (as lists) that don't belong to a subsystem
for c in classes:
if c.get("color") is None:
sub = [c]
subgraphs.append(sub)
return subgraphs
| 5,349,691 |
def score(self, features):
""" return score from ML models"""
assert len(self._models) > 0, 'No valid prediction model'
scores = list()
for feature in features:
# when feature list extraction fails
if not feature:
scores.append(-float('inf'))
continue
item = list()
for ins in self._models:
item.append(ins.inference(feature))
pred = [i for i in item if i]
scores.append(float(sum(pred)/len(pred)))
return scores
| 5,349,692 |
def fromRGB(rgb):
"""Convert tuple or list to red, green and blue values that can be accessed as follows:
a = fromRGB((255, 255, 255))
a["red"]
a["green"]
a["blue"]
"""
return {"red":rgb[0], "green":rgb[1], "blue":rgb[2]}
| 5,349,693 |
def computeTelescopeTransmission(pars, offAxis):
"""
Compute tel. transmission (0 < T < 1) for a given set of parameters
as defined by the MC model and for a given off-axis angle.
Parameters
----------
pars: list of float
Parameters of the telescope transmission. Len(pars) should be 4.
offAxis: float
Off-axis angle in deg.
Returns
-------
float
Telescope transmission.
"""
_degToRad = math.pi / 180.0
if pars[1] == 0:
return pars[0]
else:
t = math.sin(offAxis * _degToRad) / (pars[3] * _degToRad)
return pars[0] / (1.0 + pars[2] * t ** pars[4])
| 5,349,694 |
def mean_iou(
results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False,
):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category IoU, shape (num_classes, ).
"""
all_acc, acc, iou = eval_seg_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=["mIoU"],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label,
)
return all_acc, acc, iou
| 5,349,695 |
def as_date_or_none(date_str):
"""
Casts a date string as a datetime.date, or None if it is blank.
>>> as_date_or_none('2020-11-04')
datetime.date(2020, 11, 4)
>>> as_date_or_none('')
None
>>> as_date_or_none(None)
None
"""
if not date_str:
return None
return dateutil_parser.parse(date_str).date()
| 5,349,696 |
def guarantee_trailing_slash(directory_name: str) -> str:
"""Adds a trailling slash when missing
Params:
:directory_name: str, required
A directory name to add trailling slash if missing
Returns:
A post processed directory name with trailling slash
"""
if not directory_name.endswith('/'):
return directory_name + '/'
return directory_name
| 5,349,697 |
def bc32encode(data: bytes) -> str:
"""
bc32 encoding
see https://github.com/BlockchainCommons/Research/blob/master/papers/bcr-2020-004-bc32.md
"""
dd = convertbits(data, 8, 5)
polymod = bech32_polymod([0] + dd + [0, 0, 0, 0, 0, 0]) ^ 0x3FFFFFFF
chk = [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
return "".join([BECH32_ALPHABET[d] for d in dd + chk])
| 5,349,698 |
def read_all_reviews(current_user):
"""Reads all Reviews"""
reviews = Review.query.all()
if reviews:
return jsonify({'Reviews': [
{
'id': review.id,
'title': review.title,
'desc': review.desc,
'reviewer': review.reviewer.username,
'business': review.business.name,
'created_at': review.created_at,
'updated_at': review.updated_at
} for review in reviews
]}), 200
return jsonify({'warning': 'No Review, create one first'}), 200
| 5,349,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.