content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def gather_audio_video_eavesdropping(x) :
"""
@param x : a Analysis instance
@rtype : a list strings for the concerned category, for exemple [ 'This application makes phone calls', "This application sends an SMS message 'Premium SMS' to the '12345' phone number" ]
"""
result = []
result.extend ( detect_MediaRecorder_Voice_record(x) )
result.extend ( detect_MediaRecorder_Video_capture(x) )
return result
| 5,352,300 |
def vdw_radius_single(element):
"""
Get the Van-der-Waals radius of an atom from the given element. [1]_
Parameters
----------
element : str
The chemical element of the atoms.
Returns
-------
The Van-der-Waals radius of the atom.
If the radius is unknown for the element, `None` is returned.
See also
--------
vdw_radius_protor
References
----------
.. [1] A Bondi,
"Van der Waals volumes and radii."
J Phys Chem, 86, 441-451 (1964).
Examples
--------
>>> print(vdw_radius_single("C"))
1.7
"""
return _SINGLE_RADII.get(element.upper())
| 5,352,301 |
def evaluate_ins_to_proto(ins: typing.EvaluateIns) -> ServerMessage.EvaluateIns:
"""Serialize flower.EvaluateIns to ProtoBuf message."""
parameters_proto = parameters_to_proto(ins.parameters)
config_msg = metrics_to_proto(ins.config)
return ServerMessage.EvaluateIns(parameters=parameters_proto, config=config_msg)
| 5,352,302 |
def avatar_synth_df(dir, batch_size, num_threads):
"""
Get data for training and evaluating the AvatarSynthModel.
:param dir: The data directory.
:param batch_size: The minibatch size.
:param num_threads: The number of threads to read and process data.
:return: A dataflow for parameter to bitmoji data
"""
df = AvatarSynthDataFlow(dir)
df = process_avatar_synth_data(df, batch_size, num_threads)
return df
| 5,352,303 |
def compile(file, cfile=None, dfile=None, doraise=False):
"""Does nothing on IronPython.
IronPython does not currently support compiling to .pyc
or any other format.
"""
return
| 5,352,304 |
def _append_char_coded_text(elem, s, char_code_pat):
"""Append s to C{elem} with text in coded with character style codes converted to span elements.
@param elem: element corresponding to an MDF field. This is modified by the function.
It may already have 'span' subelements corresponding to character styled text earlier in the MDF field.
@type elem: C{ElementTree._ElementInterface}
@param s: field contents possibly including parts coded with MDF character style codes
@type s: C{String}
@param char_code_pat: compiled regular expression describing the character styled text with the style
code. It must have two sets of capturing parentheses. The first set captures the style code and the
second the styled text.
@type char_code_pat: compiled regular expression pattern
"""
mobj = char_code_pat.search(s)
pos = 0
while mobj is not None:
elem_append_string(elem, s[pos:mobj.start()])
attribs = char_code_attribs[mobj.group(1)]
span_elem = SubElement(elem, 'span', attribs)
span_elem.text = mobj.group(2)
pos = mobj.end()
mobj = char_code_pat.search(s, pos)
elem_append_string(elem, s[pos:])
| 5,352,305 |
def assign_material(obj, materialname):
"""This function assigns a material to an objects mesh.
:param obj: The object to assign the material to.
:type obj: bpy.types.Object
:param materialname: The materials name.
:type materialname: str
"""
if materialname not in bpy.data.materials:
if materialname in defs.defaultmaterials:
materials.createPhobosMaterials()
else:
# print("###ERROR: material to be assigned does not exist.")
log("Material to be assigned does not exist.", "ERROR")
return None
# obj.data.materials[0] = bpy.data.materials[materialname]
obj.data.materials.append(bpy.data.materials[materialname])
| 5,352,306 |
def _check_stack_axis(axis, dims, default='unnamed'):
""" check or get new axis name when stacking array or datasets
(just to have that in one place)
"""
if axis is None:
axis = default
if axis in dims:
i = 1
while default+"_{}".format(i) in dims:
i+=1
axis = default+"_{}".format(i)
if type(axis) is int:
raise TypeError("axis must be a str (new axis name)")
if axis in dims:
raise ValueError("please provide an axis name which does not \
already exist, or use `concatenate`")
return axis
| 5,352,307 |
def eval_push_time_ratios(problem_size: int = 3000) -> Optional[TimeRatioType]:
"""
Function that calculates the execution time ratios, for the different time complexities.
Here, a process pool is created in order to speed up the process of generating
the lists of time ratios, for each time complexity.
"""
stack: Stack = Stack()
time_ratios: Dict[str, Union[str, List[Number]]] = {
func_name: [] for func_name in TIME_COMPLEXITIES
}
arguments: List[Any] = [
(stack, problem_size, function) for function in TIME_COMPLEXITIES
]
pool: ProcessPoolType = ProcessPool(get_cpu_count(), set_low_priority_to_process)
for response in pool.imap(_push_time_ratio_worker, arguments):
time_ratios.update(response)
time_ratios.update({
'data_struct_name': Stack.__name__.lower(),
'target_name': Stack.push.__name__,
})
return time_ratios
| 5,352,308 |
def validate_pbi_sprint(sprint, snapshot_date):
""" Validate sprint in a pbi, try to create it if possible """
if sprint is None:
raise ValidationError('Sprint cannot be null')
from ..models import Sprint
sprt = Sprint.objects.get(id=sprint.id)
if sprt is not None:
# si la date du pbi est en dehors du sprint
nbr_day = (sprt.end_date - sprt.start_date).days
if sprt.start_date >= snapshot_date or sprt.end_date <= snapshot_date:
#if pbi is outside this sprint, check if it exists a sprint with those dates/teams
sprint = Sprint.objects.filter(start_date__lte=snapshot_date, end_date__gte=snapshot_date, team__id=sprt.team.id)
if sprint != None and sprint.count() == 1:
logger.debug(f"Updating PBI : from sprint {sprint} to {sprint[0]}")
return sprint[0]
elif sprint.count() > 1:
raise ValidationError('More than one active sprints at the same time for the same team')
else:
#create new sprint
#import here otherwise we will have an issue
from ..forms import SprintForm
sprint_form = SprintForm(data={'goal' : 'GOAL UNDEFINED', 'team' : sprt.team.id, 'start_date' : snapshot_date , 'end_date' : snapshot_date + timedelta(days=nbr_day)})
if sprint_form.is_valid():
new_sprint = sprint_form.save()
#f"A new sprint has been created for team {sprt.team}, sprint id: {self.sprint.id}, start date: {self.sprint.start_date}, end date: {self.sprint.end_date}",
logger.debug(f"Updating PBI : new sprint created id: {new_sprint.id} {new_sprint}")
send_mail(
'New sprint automatically created',
"A new sprint has been created for team "+ str(sprt.team)+", sprint id: "+str(new_sprint.id)+", start date: "+str(new_sprint.start_date)+", end date: "+str(new_sprint.end_date),
settings.EMAIL_HOST_USER,
settings.EMAIL_AVADOS_TO_EMAIL,
fail_silently=False,
)
return new_sprint
else:
raise ValidationError('Invalid sprint'+str(new_sprint.id))
else:
return sprt
else:
raise ValidationError('Invalid sprint'+str(sprint.id))
| 5,352,309 |
def add_sim(Θ, f, Θs, G, D, Θ_s, G_s, D_s, sim_time_s, **state):
"""Add a simulation to the known simulations by performing the simulation.
"""
t0 = time.time()
g_s, d_s = gwed(Θ, f=f, **state)
Θs.append(Θ)
G.append(g_s)
D.append(d_s)
Θ_s.append(Θ)
G_s.append(g_s)
D_s.append(d_s)
t1 = time.time()
sim_time_s.append(t1 - t0)
| 5,352,310 |
def organize_photos(path: str):
"""
organize_photos
:param path:
:return:
"""
for file in os.listdir(path):
if os.path.isfile(os.path.join(path, file)):
move_photo(os.path.join(path, file))
elif os.path.isdir(os.path.join(path, file)):
organize_photos(os.path.join(path, file))
else:
print("**** " + os.path.join(path, file))
| 5,352,311 |
def _load_data():
"""
Internal function to get the data to plot.
"""
# Load homicides
homicides = gv_data.PoliceHomicides.get()
# Calculate concentrated disadvantage
sub_data = []
for cls in [
"PublicAssistance",
"FemaleHouseholders",
"PercentInPoverty",
"PercentUnder18",
]:
subset = []
for year in YEARS:
df = getattr(gv_data, cls).get(year=year)
df["year"] = year
subset.append(df)
sub_data.append(pd.concat(subset).set_index(["census_tract_id", "year"]))
data = sub_data[0]
for df in sub_data[1:]:
data = data.join(df.drop(labels=["geometry"], axis=1))
# Do min/max normalization on each
for col in [
"percent_public_assistance",
"percent_female_householder",
"percent_in_poverty",
"percent_under_18",
]:
data[col + "_normed"] = (data[col] - data[col].min()) / (
data[col].max() - data[col].min()
)
# Normalize sum to 0 to 1
data["index"] = data.filter(regex="_normed").sum(axis=1) / 5.0
return homicides, data
| 5,352,312 |
def test_auto_add_dataloader_idx(tmpdir, add_dataloader_idx):
"""test that auto_add_dataloader_idx argument works."""
class TestModel(BoringModel):
def val_dataloader(self):
dl = super().val_dataloader()
return [dl, dl]
def validation_step(self, *args, **kwargs):
output = super().validation_step(*args[:-1], **kwargs)
if add_dataloader_idx:
name = "val_loss"
else:
name = f"val_loss_custom_naming_{args[-1]}"
self.log(name, output["x"], add_dataloader_idx=add_dataloader_idx)
return output
model = TestModel()
model.validation_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=2)
trainer.fit(model)
logged = trainer.logged_metrics
# Check that the correct keys exist
if add_dataloader_idx:
assert "val_loss/dataloader_idx_0" in logged
assert "val_loss/dataloader_idx_1" in logged
else:
assert "val_loss_custom_naming_0" in logged
assert "val_loss_custom_naming_1" in logged
| 5,352,313 |
def move(x_pos, y_pos):
"""Return the G-CODE describing motion to x_pos, y_pos."""
out = ""
out += "G1X"+str(x_pos)+"Y"+str(y_pos)+"F"+str(FEEDRATE)+";\n"
out += "M400;\n"
return out
| 5,352,314 |
def load_scrub_optional_upload(storage_folder: str, filename: str) -> str:
"""Loads a option file that was previously saved in the storage folder.
:param storage_folder: A string representing the path of the storage
folder.
:param filename: A string representing the name of the file that is being
loaded.
:return: The file string that was saved in the folder (empty if there is
no string to load).
"""
try:
return general_functions.load_file_from_disk(
loc_folder=storage_folder, filename=filename)
except FileNotFoundError:
return ""
| 5,352,315 |
def Editor_NewExistingLevels_Works():
"""
Summary: Perform the below operations on Editor
1) Launch & Close editor
2) Create new level
3) Saving and loading levels
4) Level edits persist after saving
5) Export Level
6) Can switch to play mode (ctrl+g) and exit that
7) Run editor python bindings test
8) Create an Entity
9) Delete an Entity
10) Add a component to an Entity
Expected Behavior:
All operations succeed and do not cause a crash
Test Steps:
1) Launch editor and Create a new level
2) Create a new entity
3) Add Mesh component
4) Verify enter/exit game mode
5) Save, Load and Export level
6) Remove Mesh component
7) Delete entity
8) Open an existing level
9) Create a new entity in an existing level
10) Save, Load and Export an existing level and close editor
Note:
- This test file must be called from the O3DE Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
import os
import editor_python_test_tools.hydra_editor_utils as hydra
from editor_python_test_tools.utils import TestHelper as helper
from editor_python_test_tools.utils import Report
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.legacy.general as general
import azlmbr.math as math
# 1) Launch editor and Create a new level
helper.init_idle()
test_level_name = "temp_level"
general.create_level_no_prompt(test_level_name, 128, 1, 128, False)
helper.wait_for_condition(lambda: general.get_current_level_name() == test_level_name, 2.0)
Report.result(Tests.level_created, general.get_current_level_name() == test_level_name)
# 2) Create a new entity
entity_position = math.Vector3(200.0, 200.0, 38.0)
new_entity = hydra.Entity("Entity1")
new_entity.create_entity(entity_position, [])
test_entity = hydra.find_entity_by_name("Entity1")
Report.result(Tests.entity_found, test_entity.IsValid())
# 3) Add Mesh component
new_entity.add_component("Mesh")
Report.result(Tests.mesh_added, hydra.has_components(new_entity.id, ["Mesh"]))
# 4) Verify enter/exit game mode
helper.enter_game_mode(Tests.enter_game_mode)
helper.exit_game_mode(Tests.exit_game_mode)
# 5) Save, Load and Export level
# Save Level
general.save_level()
# Open Level
general.open_level(test_level_name)
Report.result(Tests.level_opened, general.get_current_level_name() == test_level_name)
# Export Level
general.export_to_engine()
level_pak_file = os.path.join("AutomatedTesting", "Levels", test_level_name, "level.pak")
Report.result(Tests.level_exported, os.path.exists(level_pak_file))
# 6) Remove Mesh component
new_entity.remove_component("Mesh")
Report.result(Tests.mesh_removed, not hydra.has_components(new_entity.id, ["Mesh"]))
# 7) Delete entity
editor.ToolsApplicationRequestBus(bus.Broadcast, "DeleteEntityById", new_entity.id)
test_entity = hydra.find_entity_by_name("Entity1")
Report.result(Tests.entity_deleted, len(test_entity) == 0)
# 8) Open an existing level
general.open_level(test_level_name)
Report.result(Tests.level_opened, general.get_current_level_name() == test_level_name)
# 9) Create a new entity in an existing level
entity_position = math.Vector3(200.0, 200.0, 38.0)
new_entity_2 = hydra.Entity("Entity2")
new_entity_2.create_entity(entity_position, [])
test_entity = hydra.find_entity_by_name("Entity2")
Report.result(Tests.entity_found, test_entity.IsValid())
# 10) Save, Load and Export an existing level
# Save Level
general.save_level()
# Open Level
general.open_level(test_level_name)
Report.result(Tests.level_opened, general.get_current_level_name() == test_level_name)
entity_id = hydra.find_entity_by_name(new_entity_2.name)
Report.result(Tests.level_edits_present, entity_id == new_entity_2.id)
# Export Level
general.export_to_engine()
level_pak_file = os.path.join("AutomatedTesting", "Levels", test_level_name, "level.pak")
Report.result(Tests.level_exported, os.path.exists(level_pak_file))
| 5,352,316 |
def test_kb_wrap_exceptions(
version_id: str, kbpk_len: int, key_len: int, error: str
) -> None:
"""Test wrap exceptions"""
with pytest.raises(tr31.KeyBlockError) as e:
kb = tr31.KeyBlock(b"E" * kbpk_len)
kb.header._version_id = version_id
_ = kb.wrap(b"F" * key_len)
assert e.value.args[0] == error
| 5,352,317 |
def test_backlinks(fixture, chain_id):
"""
NOTE: these links all use `parameters` and not `requestBody` or
`x-apigraph-requestBodyParameters`
"""
doc_uri = fixture_uri(fixture)
apigraph = APIGraph(doc_uri)
assert apigraph.docs.keys() == {doc_uri}
expected_nodes = [
NodeKey(doc_uri, "/2.0/users/{username}", HttpMethod.GET),
NodeKey(doc_uri, "/2.0/repositories/{username}", HttpMethod.GET),
]
expected_edges = [
(
expected_nodes[0],
expected_nodes[1],
(chain_id, "200"),
{
"response_id": "200",
"chain_id": chain_id,
"detail": LinkDetail(
link_type=LinkType.BACKLINK,
name="Get User by Username",
description="",
parameters={"username": "$response.body#/username"},
requestBody=None,
requestBodyParameters={},
),
},
),
]
assert [node for node in apigraph.graph.nodes] == expected_nodes
assert [
edge for edge in apigraph.graph.edges(data=True, keys=True)
] == expected_edges
| 5,352,318 |
def visualize_result(
experiment_name,
X_test, Y_test, Y_hat, parameters,
losses=None, save_dir="results"
):
"""
结果可视化
"""
# 没有保存目录时创建
now = datetime.now().strftime("%Y%m%d%H%M%S")
save_dir += "_" + experiment_name + os.sep + now
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# 测试数据适用(仅前2轴)
# 创建显示区域
plt.figure()
# 为了同时显示估计值和真值,设定为hold=“on”
#plt.hold("on")
# x_0 vs y 的显示
plt.subplot(211)
plt.plot(X_test[:, 0], Y_test, "+", label="True")
plt.plot(X_test[:, 0], Y_hat, "x", label="Estimate")
plt.xlabel("x_0")
plt.ylabel("y")
plt.legend()
# x_1 vs y 的显示
plt.subplot(212)
plt.plot(X_test[:, 1], Y_test, "+")
plt.plot(X_test[:, 1], Y_hat, "x")
plt.xlabel("x_1")
plt.ylabel("y")
# 保存参数到文件
# NOTE:json形式是设定文件等数据记述方便的形式
# 其实质是结构化文本文件
# 阅读时请使用适当的文本编辑器
# 使用Python时,标准具备处理json的模块
# (名称也是json模块)
# 其他数据记述形式有yaml、xml等
fn_param = "parameters.json"
with open(save_dir + os.sep + fn_param, "w") as fp:
json_str = json.dumps(parameters, indent=4)
fp.write(json_str)
# 保存图像到文件
fn_fit = "fitting.png" # 各种条件
plt.savefig(save_dir + os.sep + fn_fit)
# 表示损失
if losses is not None:
train_losses, test_losses = losses
# NOTE:损失的推移通常是指数的、
# 多以对数比例显示
x_train = range(len(train_losses))
x_test = range(len(test_losses))
plt.figure()
plt.plot(
x_train, np.log(train_losses),
x_test, np.log(test_losses)
)
plt.xlabel("steps")
plt.ylabel("ln(loss)")
plt.legend(["training loss", "test loss"])
fn_loss = "loss.png"
plt.savefig(save_dir + os.sep + fn_loss)
| 5,352,319 |
def compute_error_decrease(fun, VX, EToV) -> Dict[int, float]:
"""
Computes estimate of possible error decrease for each element in mesh.
:param fun: Function float -> float
:param VX: dict from point id to its position on x axis.
:param EToV: dict from element id to a tuple of its boundary points.
"""
L2_loss = dict()
for e, (idx1, idx2) in EToV.items():
x1 = VX[idx1]
x2 = VX[idx2]
y1 = fun(x1) # This line should be updated in 1.7
y2 = fun(x2) # This line should be updated in 1.7
x_half = (x1 + x2) / 2
y_half = fun(x_half)
slope0 = (y2 - y1) / (x2 - x1)
slope1 = (y_half - y1) / (x_half - x1)
slope2 = (y2 - y_half) / (x2 - x_half)
L2_loss1 = compute_L2_error(x_half - x1, slope0, slope1)
L2_loss2 = compute_L2_error(x_half - x1, slope0, slope2)
L2_loss[e] = np.sqrt(L2_loss1 + L2_loss2)
return L2_loss
| 5,352,320 |
def file_name_to_title_name(file_name):
"""
#Arguments
check_mk_url (str): URL to Check_Mk web application, check file names and print for each file in the directory in the correct format
#Examples
file_name_to_title_name('activate_mode')
output = 'Activate Mode: activate_mode.md'
"""
file_name_list = file_name.split('.py')
file_name = file_name_list[0]
title = file_name.replace('_', ' ').title()
filename2 = ': ' + file_name + '.md'
return title + filename2
| 5,352,321 |
def save_binary_mask_triple(
rgb_img: np.ndarray, label_img: np.ndarray, save_fpath: str, save_to_disk: bool = False
) -> np.ndarray:
"""Currently mask img background is light-blue. Instead, could set it to white. np.array([255,255,255])
Args:
rgb_img:
label_img:
save_fpath
save_to_disk
Returns:
Array, representing 3 horizontally concatenated images: from left-to-right, they are
RGB, RGB+Semantic Masks, Semantic Masks
"""
img_h, img_w, _ = rgb_img.shape
rgb_with_mask = highlight_binary_mask(label_img, rgb_img.copy())
blank_img = np.ones((img_h, img_w, 3), dtype=np.uint8) * 255
y, x = np.where(label_img == 0)
blank_img[y, x, :] = LIME_GREEN # LIGHT_BLUE
mask_img = highlight_binary_mask(label_img, blank_img)
return form_hstacked_imgs([rgb_img, rgb_with_mask, mask_img], save_fpath, save_to_disk)
| 5,352,322 |
def disc_train_step(
input_images, avg_input, real_images, input_condns, real_condns, epoch,
):
"""
Discriminator training step. Args:
input_images: tf tensor of training images for template branch.
avg_input: tf tensor of linear average repeated 'batch_size' times.
input_images: tf tensor of training images for discriminator.
input_condns: tf tensor of input condns for template branch.
real_condns: tf tensor of input condns for discriminator.
epoch: tf tensor of training step.
"""
# Reorient image for more augs. Pick a flip (subset of D_4h group):
real_choice = tf.random.uniform((1,), 0, 4, dtype=tf.int32)
fake_choice = tf.random.uniform((1,), 0, 4, dtype=tf.int32)
with tf.GradientTape() as disc_tape:
# Generator forward pass:
moved_atlases, _, _, _ = generator(
get_inputs([input_images, avg_input], [input_condns]),
training=True,
)
# Discriminator augmentation sequence on both fakes and reals:
moved_atlases = disc_augment(
moved_atlases, fake_choice, intensity_mods=False,
)
real_images = disc_augment(
real_images, real_choice, intensity_mods=False,
)
# Discriminator forward passes:
d_logits_real_local = discriminator(
get_inputs([real_images], [real_condns]),
training=True,
)
d_logits_fake_local = discriminator(
get_inputs([moved_atlases], [input_condns]),
training=True,
)
# Get loss:
disc_loss = discriminator_loss(
d_logits_real_local,
d_logits_fake_local,
)
# Get R1 gradient penalty from Mescheder, et al 2017:
# Gradient penalty inside gradient with tf.function leads to lots of
# if/else blocks for the tf2 graph.
if lambda_gp > 0.0:
# Every "lazy_reg" iterations compute the R1 gradient penalty:
if (epoch % lazy_reg) == 0:
new_real_batch = 1.0 * real_images
new_label = 1.0 * real_condns
with tf.GradientTape(persistent=True) as gp_tape:
gp_tape.watch(new_real_batch)
d_logits_real_local_new = discriminator(
get_inputs([new_real_batch], [new_label]),
training=True,
)
grad = gp_tape.gradient(
d_logits_real_local_new, new_real_batch,
)
grad_sqr = tf.math.square(grad)
grad_sqr_sum = tf.reduce_sum(
grad_sqr,
axis=np.arange(1, len(grad_sqr.shape)),
)
gp = (lambda_gp/2.0) * tf.reduce_mean(grad_sqr_sum)
else:
gp = 0.0
else:
gp = 0.0
# Total loss:
total_disc_loss = disc_loss + gp
discriminator_gradients = disc_tape.gradient(
total_disc_loss,
discriminator.trainable_variables,
)
discriminator_optimizer.apply_gradients(
zip(discriminator_gradients, discriminator.trainable_variables),
)
if (epoch % 10) == 0:
with summary_writer.as_default():
tf.summary.scalar(
'total_losses/total_disc_loss', total_disc_loss, step=epoch,
)
tf.summary.scalar(
'gan_losses/disc_loss', disc_loss, step=epoch,
)
tf.summary.scalar(
'regularizers/gp', gp, step=epoch,
)
| 5,352,323 |
def get_random_fortune(fortune_file):
"""
Get a random fortune from the specified file. Barfs if the corresponding
`.dat` file isn't present.
:Parameters:
fortune_file : str
path to file containing fortune cookies
:rtype: str
:return: the random fortune
"""
fortunes = list(_read_fortunes(fortune_file))
randomRecord = _random_int(0, len(fortunes) - 1)
return fortunes[randomRecord]
| 5,352,324 |
def output_data(
files : List[pathlib.Path],
parser : Callable[[List[str]], List[Dict[str, DataValue]]]
) -> Optional[OutputData]:
"""Parses output datapoints from a list of output files.
Args:
files: A list of data output files to parse
parser: A function that turns a list of data lines into a list of data
points. Each data point is a dict that maps parameter and value
names to their values. For an example parser, see last_value().
Returns:
An OutputData object summarizing the data, or None if an error occurs.
"""
records = []
parameter_names = []
value_names = []
command = None
for file_path in files:
assert file_path.is_file()
data = file_data(file_path)
if not data:
return None
if command is not None and data.command != command:
print("Warning: command mismatch between output files", file=sys.stderr)
command = data.command
values_list = parser(data.data_lines)
if values_list is None:
return None
if not values_list:
print("Warning: no values found for output", file_path, file=sys.stderr)
for param in data.params.keys():
if param not in parameter_names:
parameter_names.append(param)
for values in values_list:
for value_name in values.keys():
if value_name not in value_names:
value_names.append(value_name)
if data.params.keys() & values.keys():
print("Warning: overlap between parameter and value names in",
file_path, file=sys.stderr)
records.append({**data.params, **values})
return OutputData(command, parameter_names, value_names, records)
| 5,352,325 |
def test_side_view(capsys):
"""Supplying two outfile should print out the two outputs side-by-side."""
outfiles = [os.path.join(fixtures_dir, 'one.out'),
os.path.join(fixtures_dir, 'two.in')]
expected_file = os.path.join(fixtures_dir, 'side_view_expected.geomconv')
geomconv.main(outfiles)
out, err = capsys.readouterr()
with open(expected_file, 'r') as f:
expected = f.read()
assert out == expected
| 5,352,326 |
async def delete_item(item_id: int, db: Session = Depends(get_db)):
"""
Delete the Item with the given ID provided by User stored in database
"""
db_item = ItemRepo.fetch_by_id(db, item_id)
if db_item is None:
raise HTTPException(status_code=404, detail="Item not found with the given ID")
await ItemRepo.delete(db, item_id)
return "Item deleted successfully!"
| 5,352,327 |
def get_clusters(data,
model = None,
num_clusters = 4,
ignore_features = None,
normalize = True,
transformation = False,
pca = False,
pca_components = 0.99,
ignore_low_variance=False,
combine_rare_levels=False,
rare_level_threshold=0.1,
remove_multicollinearity=False,
multicollinearity_threshold=0.9,
n_jobs = None):
"""
Callable from any external environment without requiring setup initialization.
"""
if model is None:
model = 'kmeans'
if ignore_features is None:
ignore_features_pass = []
else:
ignore_features_pass = ignore_features
global X, data_, seed, n_jobs_param, logging_param, logger
data_ = data.copy()
seed = 99
n_jobs_param = n_jobs
logging_param = False
import logging
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
from pycaret import preprocess
X = preprocess.Preprocess_Path_Two(train_data = data,
features_todrop = ignore_features_pass,
display_types = False,
scale_data = normalize,
scaling_method = 'zscore',
Power_transform_data = transformation,
Power_transform_method = 'yj',
apply_pca = pca,
pca_variance_retained_or_number_of_components = pca_components,
apply_zero_nearZero_variance = ignore_low_variance,
club_rare_levels=combine_rare_levels,
rara_level_threshold_percentage=rare_level_threshold,
remove_multicollinearity=remove_multicollinearity,
maximum_correlation_between_features=multicollinearity_threshold,
random_state = seed)
try:
c = create_model(model=model, num_clusters=num_clusters, verbose=False, system=False)
except:
c = create_model(model=model, verbose=False, system=False)
dataset = assign_model(c, verbose=False)
return dataset
| 5,352,328 |
def process_messages(deck, messages, encrypt_or_decrypt):
"""(list of int, list of str, str) -> list of str
Return the messages encrypted or decrypted using the specified deck.
The parameter encrypt_or_decrypt will be ENCRYPT to encrpyt the message,
and DECRYPT to decrypt the message
>>>deck = [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 3, 6, 9, 12, 15, 18, 21, 24,
27, 2, 5, 8, 11, 14, 17, 20, 23, 26]
>>>process_messages(deck, ['Patty', 'Cakes'], ENCRYPT)
['AJQAI', 'BLVLT']
>>>deck = [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 3, 6, 9, 12, 15, 18, 21, 24,
27, 2, 5, 8, 11, 14, 17, 20, 23, 26]
>>>process_messages(deck, ['AJQAI', 'BLVLT'], DECRYPT)
['PATTY', 'CAKES']
"""
returned_message = []
for message in messages:
new_message = ''
cleaned_message = clean_message(message) # Cleans the message of
# punctation and makes it all upper case
for letter in cleaned_message:
keystream_value = get_next_keystream_value(deck)
# Generates a keystream value for each letter
if encrypt_or_decrypt == ENCRYPT:
new_message = new_message + encrypt_letter(letter, keystream_value)
else: # Where encrypt_or_decrypt == DECRYPT
new_message = new_message + decrypt_letter(letter, keystream_value)
returned_message.append(new_message)
return returned_message
| 5,352,329 |
def print_output(r_lst):
"""
打印命令行输出的内容
:param r_lst:
:return:
"""
for line in r_lst:
print line
| 5,352,330 |
def write_bom_seeed(output_file_slug, components):
"""Write the BOM according to the Seeed Studio Fusion PCBA template available at:
https://statics3.seeedstudio.com/assets/file/fusion/bom_template_2016-08-18.csv
```
Part/Designator,Manufacture Part Number/Seeed SKU,Quantity
C1,RHA,1
"D1,D2",CC0603KRX7R9BB102,2
```
The output is a CSV file at the `output_file_slug`.csv location.
"""
parts = {}
for c in components:
if components[c] not in parts:
parts[components[c]] = []
parts[components[c]] += [c]
field_names = ['Part/Designator', 'Manufacture Part Number/Seeed SKU', 'Quantity']
with open("{}.csv".format(output_file_slug), 'w') as csvfile:
bomwriter = csv.DictWriter(csvfile, fieldnames=field_names, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
bomwriter.writeheader()
for p in sorted(parts.keys()):
pieces = sorted(parts[p], key=natural_keys)
designators = ",".join(pieces)
bomwriter.writerow({'Part/Designator': designators,
'Manufacture Part Number/Seeed SKU': p,
'Quantity': len(pieces)})
| 5,352,331 |
def alarm(context):
"""
Handle sending the alarm message
"""
job = context.job
context.bot.send_message(job.context, text=LEMBRETE)
| 5,352,332 |
def clip(wavelength, spectra, threshold, substitute=None):
""" Removes or substitutes values above the given threshold.
Args:
wavelength <numpy.ndarray>: Vector of wavelengths.
spectra <numpy.ndarray>: NIRS data matrix.
threshold <float>: threshold value for rejection
substitute <float>: substitute value for rejected values (None removes values from the spectra)
Returns:
wavelength <numpy.ndarray>: Vector of wavelengths.
spectra <numpy.ndarray>: NIR spectra with threshold exceeding values removed.
"""
if substitute == None: # remove threshold violations
mask = np.any(spectra > threshold, axis=1)
spectra = spectra[~mask, :]
wavelength = wavelength[~mask]
else: # substitute threshold violations with a value
spectra[spectra > threshold] = substitute
return wavelength, spectra
return wavelength, spectra
| 5,352,333 |
def PrintBlockAnalysis(e: edid.Edid, desc, mode: Mode, raw_mode, start, prefix=None):
"""Print and interpret a single 18-byte descriptor's information.
Called up to 4 times in a base EDID.
Uses descriptor module to determine descriptor type.
Args:
e: The full EDID being parsed.
desc: The descriptor being parsed.
mode: The level of verbosity for analysis.
raw_mode: The type of raw data print out, if any.
start: The start index of the descriptor.
prefix: Optional string description of which (nth) descriptor this is within
the EDID.
"""
print("%s%s" % (prefix, desc.type))
if mode == Mode.LAYOUT_MODE:
if desc.type == descriptor.TYPE_DISPLAY_RANGE_LIMITS:
print(" Subtype: %s" % desc.subtype)
if raw_mode:
base = 54
PrintRawRange(
e.GetData(), raw_mode, base + (start * 18), base + ((start + 1) * 18)
)
if mode == Mode.LAYOUT_MODE:
return
if desc.type in (
descriptor.TYPE_PRODUCT_SERIAL_NUMBER,
descriptor.TYPE_ALPHANUM_DATA_STRING,
descriptor.TYPE_DISPLAY_PRODUCT_NAME,
):
print(" Data string:\t%s" % desc.string)
elif desc.type == descriptor.TYPE_DISPLAY_RANGE_LIMITS:
print("Subtype:", desc.subtype)
vert_rate = "%2d - %d" % (desc.min_vertical_rate, desc.max_vertical_rate)
hor_rate = "%2d - %d" % (desc.min_horizontal_rate, desc.max_horizontal_rate)
info = [
["Vertical rate (Hz):", vert_rate],
["Horizontal rate (kHz):", hor_rate],
["Pixel clock (MHz):", desc.pixel_clock],
]
PrintList(info, mode, " %-35s %s")
if desc.subtype == descriptor.SUBTYPE_DISPLAY_RANGE_CVT:
ss = []
for ar in tools.ListTrueOnly(desc.supported_aspect_ratios):
ss.append(" %-35s %s" % ("", ar))
asp = "\n".join(ss)
ss = []
for cb in tools.ListTrueOnly(desc.cvt_blanking_support):
ss.append(" %-35s %s" % ("", cb))
cvt_blank = "\n".join(ss)
ss = []
for ds in tools.ListTrueOnly(desc.display_scaling_support):
ss.append(" %-35s %s" % ("", ds))
dis_scal = "\n".join(ss)
cvt_info = [
["CVT Version:", desc.cvt_version],
["Additional Pixel Clock:", "%s MHz" % desc.additional_pixel_clock],
["Maximum active pixels:", desc.max_active_pixels],
["Supported aspect ratios:", asp.strip()],
["Preferred aspect ratio:", desc.preferred_aspect_ratio],
["CVT blanking support:", cvt_blank.strip()],
["Display scaling support:", dis_scal.strip()],
["Preferred vertical refresh (Hz):", desc.preferred_vert_refresh],
]
PrintList(cvt_info, mode, " %-35s %s")
elif desc.subtype == descriptor.SUBTYPE_DISPLAY_RANGE_2ND_GTF:
gtf_info = [
["Start break frequency:", desc.start_break_freq],
["C:", desc.c],
["M:", desc.m],
["K:", desc.k],
["J:", desc.j],
]
PrintList(gtf_info, mode, " %-25s %s")
elif desc.type == descriptor.TYPE_COLOR_POINT_DATA:
cp_1 = desc.first_color_point
cp_2 = desc.second_color_point
PrintCp(cp_1, 1)
PrintCp(cp_2, 2)
elif desc.type == descriptor.TYPE_STANDARD_TIMING:
sts = desc.standard_timings
for st in sts:
PrintSt(st)
elif desc.type == descriptor.TYPE_DISPLAY_COLOR_MANAGEMENT:
dcm_info = [
["Red a3:", desc.red_a3],
["Red a2:", desc.red_a2],
["Green a3:", desc.green_a3],
["Green a2:", desc.green_a2],
["Blue a3:", desc.blue_a3],
["Blue a2:", desc.blue_a2],
]
PrintList(dcm_info, mode, " %-12s %s")
elif desc.type == descriptor.TYPE_CVT_TIMING:
cvts = desc.coordinated_video_timings
for cvt in cvts:
PrintCvt(cvt)
elif desc.type == descriptor.TYPE_ESTABLISHED_TIMINGS_III:
print(tools.ListTrueOnly(desc.established_timings))
elif desc.type == descriptor.TYPE_MANUFACTURER_SPECIFIED:
if raw_mode:
print(desc.GetBlob())
elif desc.type == descriptor.TYPE_DETAILED_TIMING:
PrintDtd(desc)
| 5,352,334 |
def duplicate12(modeladmin, request, queryset):
""" Duplicate 12 action.
Duplicates each item in the queryset to the next week.
Continues duplicating until 12 duplicates are created.
Skips to the next week if a similar object already exists.
"""
for i in queryset:
count = 0
offset = timedelta()
while count < 12:
created = False
while not created:
offset += timedelta(weeks=1)
try:
obj = Session.objects.get(start=i.start + offset)
except Session.DoesNotExist:
obj = Session(
start=i.start + offset,
discipline=i.discipline,
location=i.location,
)
for attr in ['trainer', 'message']:
setattr(obj, attr, i.__getattribute__(attr))
obj.save()
count += 1
created = True
| 5,352,335 |
def findMachines(fqpn):
"""
Recursively yield L{MethodicalMachine}s and their FQPNs in and
under the a Python object specified by an FQPN.
The discovery heuristic considers L{MethodicalMachine} instances
that are module-level attributes or class-level attributes
accessible from module scope. Machines inside nested classes will
be discovered, but those returned from functions or methods will not be.
@type within: an FQPN
@param within: Where to start the search.
@return: a generator which yields FQPN, L{MethodicalMachine} pairs.
"""
return findMachinesViaWrapper(wrapFQPN(fqpn))
| 5,352,336 |
def asynchronous_prod_milp_constraint_rule(backend_model, loc_tech, timestep):
"""
BigM limit set on `carrier_prod`, forcing it to either be zero or non-zero,
depending on whether `prod` is zero or one, respectively.
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{carrier_prod}[loc::tech::carrier, timestep] \\leq
\\text{bigM} \\times \\boldsymbol{prod_con_switch}[loc::tech, timestep]
\\forall loc::tech \\in loc::techs_{asynchronous_prod_con},
\\forall timestep \\in timesteps
"""
model_dict = backend_model.__calliope_model_data
loc_tech_carrier = model_dict["data"]["lookup_loc_techs"][loc_tech]
return (
backend_model.carrier_prod[loc_tech_carrier, timestep]
<= backend_model.prod_con_switch[loc_tech, timestep] * backend_model.bigM
)
| 5,352,337 |
def user_cilogon_certificates_directory_path(instance):
"""
Return full path to filename based on User UUID value
:param instance:
:param filename:
:return:
"""
# file will be uploaded to MEDIA_ROOT/cilogon_certificates/user_<uuid>/<filename>
return os.path.join(MEDIA_ROOT, 'cilogon_certificates/user_{0}'.format(instance.uuid))
| 5,352,338 |
def drawCurveArc(self): #---- only for ELLIPSE -------------------------------------------------------------
"""Given a dxf ELLIPSE object return a blender_curve.
"""
center = self.loc
radius = self.radius
start = self.start_angle
end = self.end_angle
if start > end:
start = start - 360.0
startmatrix = Mathutils.RotationMatrix(start, 3, "Z")
startpoint = startmatrix * Mathutils.Vector((radius, 0, 0))
endmatrix = Mathutils.RotationMatrix(end, 3, "Z")
endpoint = endmatrix * Mathutils.Vector((radius, 0, 0))
# Note: handles must be tangent to arc and of correct length...
a = Curve.New('arc') # create new curve data
p1 = (0, -radius, 0)
p2 = (radius, 0, 0)
p3 = (0, radius, 0)
p4 = (-radius, 0, 0)
p1 = BezTriple.New(p1)
p2 = BezTriple.New(p2)
p3 = BezTriple.New(p3)
p4 = BezTriple.New(p4)
curve = a.appendNurb(p1)
curve.append(p2)
curve.append(p3)
curve.append(p4)
for point in curve:
point.handleTypes = [AUTO, AUTO]
point.radius = 1.0
curve.flagU = 1 # Set curve cyclic
a.update()
ob = Object.New('Curve', 'arc') # make curve object
return ob
| 5,352,339 |
def read_pickle(filename, protocol=-1, **kwargs):
"""
read grid saved in PICKLE format into a GridData object
:param filename: full path to the filename
:type filename: str
:rtype: ~uquake.core.data.grid.Grid
"""
import pickle
return pickle.load(open(filename, 'rb'))
| 5,352,340 |
def test_parameter_check(params, error, err_msg):
"""Test parameter validation."""
boss = BOSS(**params)
with pytest.raises(error, match=re.escape(err_msg)):
boss.fit(X, y)
| 5,352,341 |
def linked_ims(im_list, pix_per_um, shape=(2,2),
x_range=None, y_range=None, scale_fig=1, scale_height=1.4,
brightness=1, palette='Turbo256', cmap_range='from zero',
show_fig=True, title_list=[], t_fs=24, ax_fs=16, tk_fs=12, cb_fs=14):
"""
Shows multiple frames with linked panning and zooming.
Uses format_im().
"""
# list of figures
p = []
# creates images
for i, im in enumerate(im_list):
if len(title_list) == len(im_list):
title = title_list[i]
p_new = format_im(im, pix_per_um, x_range=x_range, y_range=y_range,
scale_fig=scale_fig, scale_height=scale_height, title=title,
brightness=brightness, palette=palette, cmap_range=cmap_range,
show_fig=False, t_fs=t_fs, ax_fs=ax_fs, tk_fs=tk_fs, cb_fs=cb_fs)
p += [p_new]
# makes grid plot
p_grid = make_gridplot(p, shape)
# shows figure
if show_fig:
show(p_grid)
return p_grid
| 5,352,342 |
def prepend_zeros_to_lists(ls):
"""
Takes a list of lists and appends 0s to the beggining of each sub_list
until they are all the same length. Used for sign-extending binary numbers.
"""
longest = max([len(l) for l in ls])
for i in range(len(ls)):
while len(ls[i]) < longest:
ls[i].insert(0, "0")
| 5,352,343 |
def utility_format_obj_input():
"""bad input object"""
pm1 = magpy.magnet.Cuboid((1, 2, 3), (1, 2, 3))
pm2 = magpy.magnet.Cuboid((1, 2, 3), (1, 2, 3))
format_obj_input([pm1, pm2, 333])
| 5,352,344 |
def read_qmcpack_hamiltonian(filename):
"""Read Hamiltonian from QMCPACK format.
Parameters
----------
filename : string
QMPACK Hamiltonian file.
Returns
-------
hamil : dict
Data read from file.
"""
try:
hc, chol, enuc, nmo, nelec, nmok, qkk2 = (
read_qmcpack_cholesky_kpoint(filename)
)
hamil = {
'hcore': hc,
'chol': chol,
'enuc': enuc,
'nelec': nelec,
'nmo': nmo,
'nmo_pk': nmok,
'qk_k2': qkk2
}
except KeyError:
try:
hc, chol, enuc, nmo, nelec = read_qmcpack_cholesky(filename)
hamil = {
'hcore': hc,
'chol': chol,
'enuc': enuc,
'nmo': nmo,
'nelec': nelec
}
except KeyError:
print("Error reading Hamiltonian file. Hamiltonian not found.")
hamil = None
return hamil
| 5,352,345 |
def run_rnn(file):
# define model params
"""
Run the process to train/test a recurrent neural network using LSTM using a given dataset file.
:param string file: Location of CSV-formatted dataset file
:return: Model with expected (test) targets and associated scores
:rtype: object, dataframe, object
"""
num_epochs = 2
sequence_length = 20
# grab train and test data from CSV
X_train, y_train, X_test, y_test = split_test_training(file, sequence_length)
print(X_train)
# build model
model = build_model()
model.fit(X_train, y_train, epochs=num_epochs, batch_size=64, validation_split=0.2)
# predict
predict = model.predict(X_test)
predict = np.reshape(predict, predict.size)
# evaluate
score = model.evaluate(X_test, y_test, verbose=0)
print("Accuracy: ", score[1]*100, "%")
# save model to h5 file (same folder as data)
model_location_folder = get_latest_dataset_folder()
model.save(model_location_folder + '/RNN_' + current_dt + '.h5')
return model, y_test, predict
| 5,352,346 |
def lab_results(request, format=None):
"""Get lab results data."""
if request.method == 'GET':
limit = request.query_params.get("limit", 1000)
if limit:
limit = int(limit)
order_by = request.query_params.get("order_by", "")
# TODO: Get any filters from dict(request.query_params)
docs = get_collection('tests/leaf/lab_results', order_by=order_by, limit=limit, filters=[])
return Response(docs, content_type="application/json")
if request.method == 'POST':
print('TODO: Create lab results')
return NotImplementedError
| 5,352,347 |
def test_J4(i):
""" Test a property of J from result 2 of the paper """
d = SD([1 / i] * i)
assert J(d) == pytest.approx((i - 1) * (np.log2(i) - np.log2(i - 1)))
| 5,352,348 |
def load(file, file_format=None, **kwargs):
"""Load data from json, yaml, or pickle files.
This method provides a unified api for loading data from serialized files.
Args:
file (str or file-like object): Filename or a file-like object.
file_format (str, optional): If not specified, the file format will be
inferred from the file extension, otherwise use the specified one.
Currently supported formats include "json", "yaml/yml" and
"pickle/pkl".
Returns:
The content from the file.
"""
if file_format is None and isinstance(file, str):
file_format = file.split('.')[-1]
if file_format not in file_handlers:
raise TypeError('Unsupported format: {}'.format(file_format))
handler = file_handlers[file_format]
if isinstance(file, str):
obj = handler.load_from_path(file, **kwargs)
elif hasattr(file, 'read'):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj
| 5,352,349 |
def get_criteo(root):
"""Download the Criteo data if it doesn't exist."""
url = 'https://s3-eu-west-1.amazonaws.com/kaggle-display-advertising-challenge-dataset/dac.tar.gz'
raw_folder = os.path.join(root, 'criteo', 'raw')
processed_folder = os.path.join(root, 'criteo', 'processed')
makedir_exist_ok(raw_folder)
makedir_exist_ok(processed_folder)
# download files and extract
filename = url.rpartition('/')[2]
print('Downloading...')
download_url(url, root=raw_folder, filename=filename, md5=None)
print('Extracting...')
extract_file(os.path.join(raw_folder, filename), processed_folder)
print('Done!')
return Path(processed_folder)
| 5,352,350 |
def StepToGeom_MakeAxis2Placement_Convert(*args):
"""
:param SA:
:type SA: Handle_StepGeom_Axis2Placement3d &
:param CA:
:type CA: Handle_Geom_Axis2Placement &
:rtype: bool
"""
return _StepToGeom.StepToGeom_MakeAxis2Placement_Convert(*args)
| 5,352,351 |
def test_best_site(txt, expected_coords, expected_count):
"""
Test against examples in the brief
"""
asteroids = solution1.read_array(io.StringIO(txt))
predicted_coords, predicted_count = solution1.calculate_best_site(asteroids)
assert predicted_count == expected_count
assert (predicted_coords == np.array(expected_coords)).all()
| 5,352,352 |
def results(year: hug.types.text, firstName: hug.types.text, lastName: hug.types.text):
"""Returns the results for a given candidate for a given year"""
engine = create_engine(
'postgresql://%s:%s@%s/%s' %(user,pwd,ip,user),
client_encoding='utf8',echo=False)
conn = engine.connect()
Base = declarative_base()
query = "SELECT * FROM names WHERE election_year = '%s' AND candidate_first_name = '%s' AND candidate_last_name = '%s'" %(str(year),firstName.upper(),lastName.upper())
df = pd.read_sql(query, conn)
candidateId = df['candidate_id'].tolist()[0]
resultQuery = "SELECT * FROM votes WHERE candidate_id = '%s';" %(str(candidateId))
result = pd.read_sql(resultQuery,conn)
officeId, districtId = result['office_code'].tolist()[0], result['district_code'].tolist()[0]
totalQuery = "Select office_code, district_code, county_code, city_code, ward_number, precinct_number, SUM(precinct_votes) AS total_votes FROM votes WHERE office_code = '%s' AND district_code = '%s' AND election_year = '%s' GROUP BY 1,2,3,4,5,6" %(str(officeId),str(districtId),str(year))
totalTable = pd.read_sql(totalQuery,conn)
output = pd.merge(result,totalTable, on = ['office_code', 'district_code', 'county_code', 'city_code', 'ward_number', 'precinct_number'], how="inner")
output['candidate_percentage'] = 100*output['precinct_votes']/output['total_votes']
conn.close()
engine.dispose()
return output.reset_index().to_json(orient="records")
| 5,352,353 |
async def get_user_from_event(event):
""" Get the user from argument or replied message. """
args = event.pattern_match.group(1).split(':', 1)
extra = None
if event.reply_to_msg_id and not len(args) == 2:
previous_message = await event.get_reply_message()
user_obj = await event.client.get_entity(previous_message.from_id)
extra = event.pattern_match.group(1)
elif len(args[0]) > 0:
user = args[0]
if len(args) == 2:
extra = args[1]
if user.isnumeric():
user = int(user)
if not user:
await event.edit(f"`{JAVES_NNAME}`: ** Pass the user's username, id or reply!**")
return
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
user_obj = await event.client.get_entity(user_id)
return user_obj
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj, extra
| 5,352,354 |
def stat_scores_multiple_classes(
pred: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
argmax_dim: int = 1,
reduction: str = 'none',
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
.. deprecated::
Use :func:`torchmetrics.functional.stat_scores`. Will be removed in v1.4.0.
"""
rank_zero_deprecation(
"This `stat_scores_multiple_classes` was deprecated in v1.2.0 in favor of"
" `from pytorch_lightning.metrics.functional import stat_scores`."
" It will be removed in v1.4.0"
)
if pred.ndim == target.ndim + 1:
pred = to_categorical(pred, argmax_dim=argmax_dim)
num_classes = get_num_classes(pred=pred, target=target, num_classes=num_classes)
if pred.dtype != torch.bool:
pred = pred.clamp_max(max=num_classes)
if target.dtype != torch.bool:
target = target.clamp_max(max=num_classes)
possible_reductions = ('none', 'sum', 'elementwise_mean')
if reduction not in possible_reductions:
raise ValueError("reduction type %s not supported" % reduction)
if reduction == 'none':
pred = pred.view((-1, )).long()
target = target.view((-1, )).long()
tps = torch.zeros((num_classes + 1, ), device=pred.device)
fps = torch.zeros((num_classes + 1, ), device=pred.device)
fns = torch.zeros((num_classes + 1, ), device=pred.device)
sups = torch.zeros((num_classes + 1, ), device=pred.device)
match_true = (pred == target).float()
match_false = 1 - match_true
tps.scatter_add_(0, pred, match_true)
fps.scatter_add_(0, pred, match_false)
fns.scatter_add_(0, target, match_false)
tns = pred.size(0) - (tps + fps + fns)
sups.scatter_add_(0, target, torch.ones_like(match_true))
tps = tps[:num_classes]
fps = fps[:num_classes]
tns = tns[:num_classes]
fns = fns[:num_classes]
sups = sups[:num_classes]
elif reduction == 'sum' or reduction == 'elementwise_mean':
count_match_true = (pred == target).sum().float()
oob_tp, oob_fp, oob_tn, oob_fn, oob_sup = stat_scores(pred, target, num_classes, argmax_dim)
tps = count_match_true - oob_tp
fps = pred.nelement() - count_match_true - oob_fp
fns = pred.nelement() - count_match_true - oob_fn
tns = pred.nelement() * (num_classes + 1) - (tps + fps + fns + oob_tn)
sups = pred.nelement() - oob_sup.float()
if reduction == 'elementwise_mean':
tps /= num_classes
fps /= num_classes
fns /= num_classes
tns /= num_classes
sups /= num_classes
return tps.float(), fps.float(), tns.float(), fns.float(), sups.float()
| 5,352,355 |
def learn_skill(entity: EntityID, skill_name: str):
"""
Add the skill name to the entity's knowledge component.
"""
if not entity_has_component(entity, Knowledge):
add_component(entity, Knowledge([]))
knowledge = get_entitys_component(entity, Knowledge)
if knowledge:
skill_class = action.skill_registry[skill_name]
knowledge.learn_skill(skill_class)
| 5,352,356 |
def build_resnet(
repetitions=(2, 2, 2, 2),
include_top=True,
input_tensor=None,
input_shape=None,
classes=1000,
block_type='usual',
class_detector_top=False):
"""
TODO
"""
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=197,
data_format='channels_last',
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape, name='data')
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# get parameters for model layers
no_scale_bn_params = get_bn_params(scale=False)
bn_params = get_bn_params()
conv_params = get_conv_params()
init_filters = 64
if block_type == 'basic':
conv_block = basic_conv_block
identity_block = basic_identity_block
else:
conv_block = usual_conv_block
identity_block = usual_identity_block
# resnet bottom
x = BatchNormalization(name='bn_data', **no_scale_bn_params)(img_input)
x = ZeroPadding2D(padding=(3, 3))(x)
x = Conv2D(init_filters, (7, 7), strides=(2, 2), name='conv0', **conv_params)(x)
x = BatchNormalization(name='bn0', **bn_params)(x)
x = Activation('relu', name='relu0')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name='pooling0')(x)
# resnet body
for stage, rep in enumerate(repetitions):
for block in range(rep):
filters = init_filters * (2**stage)
# first block of first stage without strides because we have maxpooling before
if block == 0 and stage == 0:
x = conv_block(filters, stage, block, strides=(1, 1))(x)
elif block == 0:
x = conv_block(filters, stage, block, strides=(2, 2))(x)
else:
x = identity_block(filters, stage, block)(x)
x = BatchNormalization(name='bn1', **bn_params)(x)
x = Activation('relu', name='relu1')(x)
# resnet top
if include_top:
x = GlobalAveragePooling2D(name='pool1')(x)
x = Dense(classes, name='fc1')(x)
x = Activation('softmax', name='softmax')(x)
if class_detector_top:
x = GlobalMaxPooling2D()(x)
x = Dense(1, name='fc1')(x)
x = Activation('sigmoid')(x)
# Ensure that the model takes into account any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x)
return model
| 5,352,357 |
def find_python_str():
"""find python executable in PATH"""
paths = os.environ["PATH"].split(os.pathsep)
python_name = "python.exe" if os.name == "nt" else "python?"
for path in paths:
yield from (
os.path.normpath(path) for path in glob.glob("%s/%s" % (path, python_name))
)
| 5,352,358 |
def dispatch_tensorflowjs_to_keras_h5_conversion(config_json_path, h5_path):
"""Converts a Keras Model from tensorflowjs format to H5.
Args:
config_json_path: Path to the JSON file that includes the model's
topology and weights manifest, in tensorflowjs format.
h5_path: Path for the to-be-created Keras HDF5 model file.
Raises:
ValueError, if `config_json_path` is not a path to a valid JSON
file, or if h5_path points to an existing directory.
"""
if os.path.isdir(config_json_path):
raise ValueError(
'For input_type=tensorflowjs & output_format=keras, '
'the input path should be a model.json '
'file, but received a directory.')
if os.path.isdir(h5_path):
raise ValueError(
'For input_type=tensorflowjs & output_format=keras, '
'the output path should be the path to an HDF5 file, '
'but received an existing directory (%s).' % h5_path)
# Verify that config_json_path points to a JSON file.
with open(config_json_path, 'rt') as f:
try:
json.load(f)
except (ValueError, IOError):
raise ValueError(
'For input_type=tensorflowjs & output_format=keras, '
'the input path is expected to contain valid JSON content, '
'but cannot read valid JSON content from %s.' % config_json_path)
with tf.Graph().as_default(), tf.Session():
model = keras_tfjs_loader.load_keras_model(config_json_path)
model.save(h5_path)
print('Saved Keras model to HDF5 file: %s' % h5_path)
| 5,352,359 |
def correct_doi(file_name: str):
"""Attempt extract a DOI from a filename which contains a DOI."""
if file_name.startswith("acs.jced") or file_name.startswith("je"):
doi = f"10.1021/{file_name}"
elif file_name.startswith("j.jct"):
doi = f"10.1016/{file_name}"
elif file_name.startswith("j.fluid"):
doi = f"10.1016/{file_name}"
elif file_name.startswith("j.tca"):
doi = f"10.1016/{file_name}"
elif file_name.startswith("s"):
doi = f"10.1007/{file_name}"
else:
raise NotImplementedError()
doi = doi.replace(".xml", "")
doi_request = requests.get(
f"https://doi.org/{doi}", headers={"Accept": "application/x-bibtex"}
)
doi_request.raise_for_status()
return doi
| 5,352,360 |
def test_one_supplier_one_lot(mock_data_client):
"""Test a single client in a single lot."""
mock_data_client.get_framework.return_value = {
'frameworks': {'lots': [{'slug': 'saas'}]}
}
mock_data_client.find_framework_suppliers.return_value = {
'supplierFrameworks': [
{'supplierId': 123, 'supplierName': 'Bens cool supplier', 'extraneous_field': 'foo', 'declaration': ''}
]
}
mock_data_client.find_draft_services_iter.return_value = iter([{
'lot': 'saas', 'lotSlug': 'saas', 'status': 'submitted', 'extraneous_field': 'foo'
}])
csv_builder = GenerateFrameworkApplicationsCSV(client=mock_data_client, target_framework_slug='test_framework_slug')
f = cStringIO()
csv_builder.populate_output()
csv_builder.write_csv(outfile=f)
with open(os.path.join(FIXTURES_DIR, 'test_one_supplier_one_lot_result.csv')) as expected_file:
assert f.getvalue() == expected_file.read()
| 5,352,361 |
def getTextFromFile(filename):
"""
"""
filepath = os.path.join(CHAPTERDIR, filename)
txt = open(filepath).read()
return txt
| 5,352,362 |
def decode_base64(data):
"""Decode base64, padding being optional.
:param data: Base64 data as an ASCII byte string
:returns: The decoded byte string.
"""
if sys.version_info.major > 2:
data = bytes(data, 'utf-8')
missing_padding = len(data) % 4
if missing_padding != 0:
data += b'='* (4 - missing_padding)
return base64.b64decode(data)
| 5,352,363 |
def version():
"""Display full version information."""
# Print out the current version of Tower CLI.
click.echo('Tower CLI %s' % __version__)
# Print out the current API version of the current code base.
click.echo('API %s' % CUR_API_VERSION)
# Attempt to connect to the Ansible Tower server.
# If we succeed, print a version; if not, generate a failure.
try:
r = client.get('/config/')
except RequestException as ex:
raise exc.TowerCLIError('Could not connect to Ansible Tower.\n%s' %
six.text_type(ex))
config = r.json()
license = config.get('license_info', {}).get('license_type', 'open')
if license == 'open':
server_type = 'AWX'
else:
server_type = 'Ansible Tower'
click.echo('%s %s' % (server_type, config['version']))
# Print out Ansible version of server
click.echo('Ansible %s' % config['ansible_version'])
| 5,352,364 |
def create_multi_dataset_generic_benchmark(
train_datasets: Sequence[SupportedDataset],
test_datasets: Sequence[SupportedDataset],
*,
other_streams_datasets: Dict[str, Sequence[SupportedDataset]] = None,
complete_test_set_only: bool = False,
train_transform=None, train_target_transform=None,
eval_transform=None, eval_target_transform=None,
other_streams_transforms: Dict[str, Tuple[Any, Any]] = None,
dataset_type: AvalancheDatasetType = None) -> GenericCLScenario:
"""
Creates a benchmark instance given a list of datasets. Each dataset will be
considered as a separate experience.
Contents of the datasets must already be set, including task labels.
Transformations will be applied if defined.
This function allows for the creation of custom streams as well.
While "train" and "test" datasets must always be set, the experience list
for other streams can be defined by using the `other_streams_datasets`
parameter.
If transformations are defined, they will be applied to the datasets
of the related stream.
:param train_datasets: A list of training datasets.
:param test_datasets: A list of test datasets.
:param other_streams_datasets: A dictionary describing the content of custom
streams. Keys must be valid stream names (letters and numbers,
not starting with a number) while the value must be a list of dataset.
If this dictionary contains the definition for "train" or "test"
streams then those definition will override the `train_datasets` and
`test_datasets` parameters.
:param complete_test_set_only: If True, only the complete test set will
be returned by the benchmark. This means that the ``test_dataset_list``
parameter must be list with a single element (the complete test set).
Defaults to False.
:param train_transform: The transformation to apply to the training data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations). Defaults to None.
:param train_target_transform: The transformation to apply to training
patterns targets. Defaults to None.
:param eval_transform: The transformation to apply to the test data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations). Defaults to None.
:param eval_target_transform: The transformation to apply to test
patterns targets. Defaults to None.
:param other_streams_transforms: Transformations to apply to custom
streams. If no transformations are defined for a custom stream,
then "train" transformations will be used. This parameter must be a
dictionary mapping stream names to transformations. The transformations
must be a two elements tuple where the first element defines the
X transformation while the second element is the Y transformation.
Those elements can be None. If this dictionary contains the
transformations for "train" or "test" streams then those transformations
will override the `train_transform`, `train_target_transform`,
`eval_transform` and `eval_target_transform` parameters.
:param dataset_type: The type of the dataset. Defaults to None, which
means that the type will be obtained from the input datasets. If input
datasets are not instances of :class:`AvalancheDataset`, the type
UNDEFINED will be used.
:returns: A :class:`GenericCLScenario` instance.
"""
transform_groups = dict(
train=(train_transform, train_target_transform),
eval=(eval_transform, eval_target_transform))
if other_streams_transforms is not None:
for stream_name, stream_transforms in other_streams_transforms.items():
if isinstance(stream_transforms, Sequence):
if len(stream_transforms) == 1:
# Suppose we got only the transformation for X values
stream_transforms = (stream_transforms[0], None)
else:
# Suppose it's the transformation for X values
stream_transforms = (stream_transforms, None)
transform_groups[stream_name] = stream_transforms
input_streams = dict(
train=train_datasets,
test=test_datasets)
if other_streams_datasets is not None:
input_streams = {**input_streams, **other_streams_datasets}
if complete_test_set_only:
if len(input_streams['test']) != 1:
raise ValueError('Test stream must contain one experience when'
'complete_test_set_only is True')
stream_definitions = dict()
for stream_name, dataset_list in input_streams.items():
initial_transform_group = 'train'
if stream_name in transform_groups:
initial_transform_group = stream_name
stream_datasets = []
for dataset_idx in range(len(dataset_list)):
dataset = dataset_list[dataset_idx]
stream_datasets.append(AvalancheDataset(
dataset,
transform_groups=transform_groups,
initial_transform_group=initial_transform_group,
dataset_type=dataset_type))
stream_definitions[stream_name] = (stream_datasets,)
return GenericCLScenario(
stream_definitions=stream_definitions,
complete_test_set_only=complete_test_set_only)
| 5,352,365 |
def check_triangle_inequality(method, h1, h2, h3):
""" Classic test for a metric: dist(a,b) < dist(a,b) + dist(a,c)"""
d12 = method(h1, h2)
d23 = method(h2, h3)
d13 = method(h1, h3)
d13_plus_d23 = np.round(d13 + d23, decimals=10)
d12_ = np.round(d12, decimals=10)
assert d12_ <= d13_plus_d23
| 5,352,366 |
def convert_to_xml_string(string):
"""
For input strings with escaped tags and special characters
issue a set of conversion functions to prepare it prior
to adding it to an article object
"""
string = entity_to_unicode(string)
string = decode_brackets(string)
string = eautils.replace_tags(string, "i", "italic")
string = eautils.replace_tags(string, "u", "underline")
string = eautils.replace_tags(string, "b", "bold")
string = eautils.replace_tags(string, "em", "italic")
string = etoolsutils.escape_unmatched_angle_brackets(string, allowed_tags())
return string
| 5,352,367 |
def read_shear_catalog_type(stage):
"""
Determine the type of shear catalog a stage is using as input.
Returns a string, e.g. metacal, lensfit.
Also sets shear_catalog_type in the stage's configuration
so that it is available later and is saved in output.
"""
with stage.open_input('shear_catalog', wrapper=True) as f:
shear_catalog_type = f.catalog_type
stage.config['shear_catalog_type'] = shear_catalog_type
return shear_catalog_type
| 5,352,368 |
def inverse_fft_iterative(
poly: Sequence, has_imaginary: bool = False, imag_threshold: float = 1e-14
) -> List:
"""Perform inverse iterative discrete fast Fourier transform (DFT) of a polynomial with a degree that is `2^t-1`, t being a positive integer (ie `len(poly)` should be an exact power of 2).
Input is point-value form, output is coefficient form.
"""
# For algo detail, cf. CLRS Ch30.3.
# Time complexity: Theta(N log N), but the const in Theta is smaller than that in
# fft_recursive()
n = len(poly)
if n == 1:
return poly
bit_reversed_poly = _bit_reversal_permutation(poly)
for s in range(1, int(math.log2(n) + 1)):
# s is the level of recursion counting from bottom, lowest being 1, 2nd-highest
# (ie the level just below the root = the orig list) being log2(n).
# Length of the target sublists in level s+1 (eg for s=1, target is len of lv2)
target_len = 2 ** s
# Compute omega_{target_len}
principal_root_of_unity = cmath.exp(-(2 * cmath.pi / target_len) * 1j)
for k in range(0, n, target_len):
omega = 1
for j in range(target_len // 2):
body = bit_reversed_poly[k + j]
twiddle = omega * bit_reversed_poly[k + j + target_len // 2]
# Butterfly operation in-place
bit_reversed_poly[k + j] = 1/2 * (body + twiddle)
bit_reversed_poly[k + j + target_len // 2] = 1/2 * (body - twiddle)
omega *= principal_root_of_unity
if not has_imaginary:
# This will return a cleaner inverse by discarding imag parts whose
# absolute value is less than imag_threshold
bit_reversed_poly = [item.real if abs(item.imag) < imag_threshold else item for item in bit_reversed_poly]
return bit_reversed_poly
| 5,352,369 |
def admin_uri():
"""
Helper fucntion to get the admin url quickly
:returns: admin url, redirect or print friendly
:rtype: string
"""
return '/' + app.global_content['options']['admin-url'].value
| 5,352,370 |
def rlencode(x, check = True, dropna = False):
"""
Run length encoding.
Based on http://stackoverflow.com/a/32681075, which is based on the rle
function from R.
See https://gist.github.com/nvictus/66627b580c13068589957d6ab0919e66
Parameters
----------
x : 1D array_like
Input array to encode
dropna: bool, optional
Drop all runs of NaNs.
Returns
-------
start positions, run lengths, run values
"""
where = np.flatnonzero
x = np.asarray(x)
n = len(x)
if n == 0:
return (np.array([], dtype=int),
np.array([], dtype=int),
np.array([], dtype=x.dtype))
if check:
starts = np.r_[0, where(~np.isclose(x[1:], x[:-1], equal_nan=True)) + 1]
else:
starts = np.r_[0, where(x[1:] != x[:-1]) + 1]
lengths = np.diff(np.r_[starts, n])
values = x[starts]
if dropna:
mask = ~np.isnan(values)
starts, lengths, values = starts[mask], lengths[mask], values[mask]
return starts, lengths, values
| 5,352,371 |
def config_load(config_path):
"""Load a json config from a file."""
return files.json_load(config_path)
| 5,352,372 |
def tz2utc(date, tz):
"""Offset between local time and UTC.
Parameters
----------
date : various
The local time, in any format acceptable to `date2time`.
tz : string
date will be processed via `pytz`.
Returns
-------
offset : datetime.timedelta
The UTC offset.
"""
from pytz import timezone
return timezone(tz).utcoffset(date2time(date).datetime)
| 5,352,373 |
def replace_from_execution_report(replace_id, execution_report):
"""Create OrderCancelReplaceRequest from given execution report
For more info about OrderCancelReplaceRequest look at https://support.xena.exchange/support/solutions/articles/44000222082-ws-trading-api#order_cancel_replace_request
"""
cmd = order_pb2.OrderCancelReplaceRequest()
cmd.MsgType = constants.MsgType_OrderCancelReplaceRequestMsgType
cmd.ClOrdId = replace_id
cmd.OrigClOrdId = execution_report.ClOrdId
cmd.Symbol = execution_report.Symbol
cmd.Side = execution_report.Side
cmd.TransactTime = int(time.time() * 1000000000)
cmd.Account = execution_report.Account
cmd.Price = execution_report.Price
cmd.StopPx = execution_report.StopPx
cmd.CapPrice = execution_report.CapPrice
cmd.OrderQty = execution_report.OrderQty
cmd.PegPriceType = execution_report.PegPriceType
cmd.PegOffsetType = execution_report.PegOffsetType
cmd.PegOffsetValue = execution_report.PegOffsetValue
for element in execution_report.SLTP:
sltp = cmd.SLTP.add()
sltp.OrdType = element.OrdType
sltp.Price = element.Price
sltp.StopPx = element.StopPx
sltp.CapPrice = element.CapPrice
sltp.PegPriceType = element.PegPriceType
sltp.PegOffsetType = element.PegOffsetType
sltp.PegOffsetValue = element.PegOffsetValue
return cmd
| 5,352,374 |
def test_simple_method_ptr(tmp_path, template_path):
"""Write out a very simple top level class with a method.
Args:
tmp_path ([type]): [description]
"""
classes = [
class_info(
"xAOD.Jets",
"xAOD::Jets",
[
method_info(
name="pt",
return_type="float*",
arguments=[],
param_arguments=[],
param_helper=None,
)
],
None,
None,
"jet.hpp",
)
]
write_out_classes(classes, template_path, tmp_path, "package")
all_text = (tmp_path / "xAOD" / "jets.py").read_text()
assert "pt(self) -> float:" in all_text
assert "'return_type': 'float*'" in all_text
| 5,352,375 |
def convert_unit(value, factor, offset):
"""Return converted value depending on the provided factor and offset."""
return num2decimal(value) * num2decimal(factor) + num2decimal(offset)
| 5,352,376 |
def label_panels(axes, labels=None, **kwargs):
"""Label the 1-D array of axes with uppercase letters from the Latin alphabet."""
if labels:
seq = labels
else:
seq = string.ascii_uppercase
for ax, letter in zip(axes, seq):
_label_panel(ax, letter, **kwargs)
| 5,352,377 |
def DefinePanelZoneNodes(MasterNodeID: int, MidPanelZoneWidth, MidPanelZoneHeight):
"""
Function that defines the remaining 10 nodes of a panel zone given the dimensions and the master node (top center one).
ID convention for the panel zone: \n
PZNodeID: 12 nodes: top right 1xy (master), 1xy1 top right, 1xy09,1xy10 1xy 1xy1,1xy01 \n
clockwise 10 nodes xy01-xy10 (with double node at corners) o-----------o-----------o \n
Spring at node 1xy1 | | \n
PZElemeneID: 8 elements: starting at node 1xy, clockwise | | \n
(see function DefinePanelZoneElements for more info) | | \n
| | \n
1xy08 o o 1xy02 \n
| | \n
| | \n
| | \n
| | \n
o-----------o-----------o \n
1xy06,1xy07 1xy05 1xy03,1xy04 \n
Note that the top right node is defined differently because is where the spring is.
@param MasterNodeID (int): ID of the master node (central top node that should be a grid node).
@param MidPanelZoneWidth (float): Mid panel zone width.
@param MidPanelZoneHeight (float): Mid panel zone height.
"""
# Get node coord and define useful variables
m_node = np.array(nodeCoord(MasterNodeID))
AxisCL = m_node[0]
FloorCL = m_node[1] - MidPanelZoneHeight
# Convention: Node of the spring (top right) is xy1
node(IDConvention(MasterNodeID, 1), AxisCL+MidPanelZoneWidth, FloorCL+MidPanelZoneHeight)
# Convention: Two notes in the corners (already defined one, xy1) clockwise from xy01 to xy10
node(IDConvention(MasterNodeID, 1, 1), AxisCL+MidPanelZoneWidth, FloorCL+MidPanelZoneHeight)
node(IDConvention(MasterNodeID, 2, 1), AxisCL+MidPanelZoneWidth, FloorCL)
node(IDConvention(MasterNodeID, 3, 1), AxisCL+MidPanelZoneWidth, FloorCL-MidPanelZoneHeight)
node(IDConvention(MasterNodeID, 4, 1), AxisCL+MidPanelZoneWidth, FloorCL-MidPanelZoneHeight)
node(IDConvention(MasterNodeID, 5, 1), AxisCL, FloorCL-MidPanelZoneHeight)
node(IDConvention(MasterNodeID, 6, 1), AxisCL-MidPanelZoneWidth, FloorCL-MidPanelZoneHeight)
node(IDConvention(MasterNodeID, 7, 1), AxisCL-MidPanelZoneWidth, FloorCL-MidPanelZoneHeight)
node(IDConvention(MasterNodeID, 8, 1), AxisCL-MidPanelZoneWidth, FloorCL)
node(IDConvention(MasterNodeID, 9, 1), AxisCL-MidPanelZoneWidth, FloorCL+MidPanelZoneHeight)
node(IDConvention(MasterNodeID, 10), AxisCL-MidPanelZoneWidth, FloorCL+MidPanelZoneHeight)
| 5,352,378 |
def single_chromosome_graph_scatter(
df,
chromosome,
chosen_template,
marker_width,
colors,
font_size,
xaxis_gridlines,
yaxis_gridlines,
font_family,
samples,
):
""" Filter out current chromosome and set x- and y-max"""
curr_chrom_data = df[df["Chromosome"] == chromosome]
y_max = float(curr_chrom_data["Value"].max())
fig = px.scatter(
curr_chrom_data,
x='Window',
y='Value',
category_orders={"Sample": samples},
color='Sample',
color_discrete_sequence=colors,
height=500,
)
fig.update_layout(
font=dict(
size=font_size,
family=font_family,
),
legend=dict(
itemsizing='trace',
orientation="h",
xanchor="left",
x=0,
y=1.02,
yanchor="bottom",
),
showlegend=True,
template=chosen_template,
title_x=0.5,
)
fig.update_xaxes(
title="Position",
rangemode='tozero',
showgrid=xaxis_gridlines,
)
fig.update_yaxes(
title="Value",
range=[0, y_max],
fixedrange=True,
showgrid=yaxis_gridlines,
)
fig.update_traces(
marker=dict(size=float(marker_width)),
)
return fig
| 5,352,379 |
def _findall_rmaps_using_reference(filename, observatory="hst"):
"""Return the basename of all reference mappings which mention `filename`."""
return uses_files([filename], observatory, "rmap")
| 5,352,380 |
def batchGD_bp(X, y, d=3, nH=10, c=3, lr=0.8, T=100, eps=0.0):
"""
BP算法, 每轮迭代使用全部样本
:param X: 训练样本的特征矩阵
:param y: 训练样本的标签向量
:param d: 训练样本的特征维数
:param nH: 隐层的节点数
:param c: 类别数
:param lr: 学习率
:param T: 停机条件1(最大迭代轮数)
:param eps: 停机条件2(相邻两次迭代loss之差的最大允许值), 设为0.0表示不使用这个条件
:return:
"""
W_H = np.random.normal(size=(nH, d)) # np.random.random(size=(nH, d)) # [0.0, 1.0)之间均匀分布
b_H = np.array([0.0] * nH).reshape(nH, 1)
W_c = np.random.normal(size=(c, nH))
b_c = np.array([0.0] * c).reshape(c, 1)
Loss = []; loss = 0; false_num = []
for t in range(T):
loss_last = loss
y_ = []
for idx, x in enumerate(X):
## 前向传播
x = x.reshape(d, 1)
net_H = np.dot(W_H, x) + b_H
z_H = np.tanh(net_H)
net = np.dot(W_c, z_H) + b_c
z = sigmoid(net)
y_.append(z.argmax())
y_x = y[idx].reshape(d, 1)
loss = 0.5 * np.sum(np.square(y_x - z))
## 误差反向传播
# 输出层
delta_c = z * (1 - z) * (z - y_x) # element-wise
grad_Wc = np.dot(delta_c, np.transpose(z_H))
grad_bc = delta_c
W_c -= lr * grad_Wc
b_c -= lr * grad_bc
# 隐层
delta_H = (1 - np.square(z_H)) * (np.dot(np.transpose(W_c), delta_c))
grad_WH = np.dot(delta_H, np.transpose(x))
grad_bH = delta_H
W_H -= lr * grad_WH
b_H -= lr * grad_bH
Loss.append(loss)
## 计算本轮过后错分的样本数
y_ = np.array(y_).reshape((30,))
tOf = (np.argmax(y, axis=1) == y_)
false_num.append(np.where(tOf == False)[0].shape[0])
if false_num[-1] == 0: # or abs(loss_last - loss) <= eps: # 停机条件
return t, Loss, false_num
return T, Loss, false_num
| 5,352,381 |
def delete_host_by_id(host_id):
"""
Host deleting
This is intended for use in adcm_delete_host ansible plugin only
"""
host = Host.obj.get(id=host_id)
delete_host(host)
| 5,352,382 |
def run_ase_opt(
atoms: Atoms,
fmax: float = 0.01,
max_steps: int = 100,
optimizer: str = "FIRE",
opt_kwargs: Dict[str, Any] = None,
scratch_dir: str = SETTINGS.SCRATCH_DIR,
gzip: bool = SETTINGS.GZIP_FILES,
copy_files: List[str] = None,
) -> trajectory:
"""
Run an ASE-based optimization in a scratch directory and copy the results
back to the original directory. This can be useful if file I/O is slow in
the working directory, so long as file transfer speeds are reasonable.
This is a wrapper around the optimizers in ASE. Note: This function does
not modify the atoms object in-place.
Parameters
----------
atoms : .Atoms
The Atoms object to run the calculation on.
fmax : float
Tolerance for the force convergence (in eV/A).
max_steps : int
Maximum number of steps to take.
optimizer : str
Name of optimizer class to use.
opt_kwargs : dict
Dictionary of kwargs for the optimizer.
scratch_dir : str
Path where a tmpdir should be made for running the calculation. If None,
the current working directory will be used.
gzip : bool
Whether to gzip the output files.
copy_files : List[str]
Filenames to copy from source to scratch directory.
Returns
-------
traj
The ASE trajectory object.
"""
if atoms.calc is None:
raise ValueError("Atoms object must have attached calculator.")
atoms = copy_atoms(atoms)
cwd = os.getcwd()
scratch_dir = scratch_dir or cwd
symlink = os.path.join(cwd, "tmp_dir")
opt_kwargs = opt_kwargs or {}
opt_kwargs["trajectory"] = "opt.traj"
opt_kwargs["restart"] = "opt.pckl"
# Get optimizer
if optimizer.lower() == "bfgs":
opt_class = BFGS
elif optimizer.lower() == "bfgslinesearch":
opt_class = BFGSLineSearch
elif optimizer.lower() == "lbfgs":
opt_class = LBFGS
elif optimizer.lower() == "lbfgslinesearch":
opt_class = LBFGSLineSearch
elif optimizer.lower() == "gpmin":
opt_class = GPMin
elif optimizer.lower() == "mdmin":
opt_class = MDMin
elif optimizer.lower() == "fire":
opt_class = FIRE
else:
raise ValueError(f"Unknown optimizer: {optimizer}")
tmpdir = mkdtemp(prefix="quacc-tmp", dir=scratch_dir)
if os.name != "nt":
if os.path.islink(symlink):
os.unlink(symlink)
os.symlink(tmpdir, symlink)
# Copy files to scratch and decompress them if needed
if copy_files:
copy_decompress(copy_files, tmpdir)
# Run calculation
os.chdir(tmpdir)
dyn = opt_class(atoms, **opt_kwargs)
dyn.run(fmax=fmax, steps=max_steps)
os.chdir(cwd)
# Check convergence
if not dyn.converged:
raise ValueError("Optimization did not converge.")
# Read trajectory
traj = read(os.path.join(tmpdir, "opt.traj"), index=":")
# Gzip files in tmpdir
if gzip:
gzip_dir(tmpdir)
# Copy files back to run_dir
copy_r(tmpdir, cwd)
# Remove symlink
if os.path.islink(symlink):
os.remove(symlink)
os.chdir(cwd)
return traj
| 5,352,383 |
def __slicer(my_str, sub):
"""
Remove everything in a string before a specified substring is found.
Throw exception if substring is not found in string
https://stackoverflow.com/questions/33141595/how-can-i-remove-everything-in-a-string-until-a-characters-are-seen-in-python
Args:
my_str (string): the string to slice.
sub (string): the substring to stop slicing at.
Returns:
str: substring of my_str, without everything before sub.
Raises:
Exception: Sub string specified is not found in my_str.
"""
index = my_str.find(sub)
if index != -1:
return my_str[index:]
else:
# raise Exception('Sub string not found!')
return my_str
| 5,352,384 |
def test_delete_no_oid():
"""Test DELETE api without index, it should return response as "None"."""
config = CORTXS3Config()
response = CORTXS3ObjectApi(config).delete(None, "test_layot_id2", "test_pvid_str")
if (response is not None):
assert response[0] is False
assert response[1] is None
| 5,352,385 |
def rank_genes_groups_heatmap(
adata: AnnData,
groups: Union[str, Sequence[str]] = None,
n_genes: int = 10,
groupby: Optional[str] = None,
key: str = None,
show: Optional[bool] = None,
save: Optional[bool] = None,
**kwds,
):
"""\
Plot ranking of genes using heatmap plot (see :func:`~scanpy.pl.heatmap`)
Parameters
----------
adata
Annotated data matrix.
groups
The groups for which to show the gene ranking.
n_genes
Number of genes to show.
groupby
The key of the observation grouping to consider. By default,
the groupby is chosen from the rank genes groups parameter but
other groupby options can be used. It is expected that
groupby is a categorical. If groupby is not a categorical observation,
it would be subdivided into `num_categories` (see :func:`~scanpy.pl.heatmap`).
key
Key used to store the ranking results in `adata.uns`.
**kwds
Are passed to :func:`~scanpy.pl.heatmap`.
{show_save_ax}
"""
_rank_genes_groups_plot(
adata,
plot_type='heatmap',
groups=groups,
n_genes=n_genes,
groupby=groupby,
key=key,
show=show,
save=save,
**kwds,
)
| 5,352,386 |
def load_nii(src_path, as_array=False, as_numpy=False):
"""
Load a brain from a nifti file
:param str src_path: The path to the nifty file on the filesystem
:param bool as_array: Whether to convert the brain to a numpy array of
keep it as nifty object
:param bool as_numpy: Whether to convert the image to a numpy array in
memory (rather than a memmap)
:return: The loaded brain (format depends on the above flag)
"""
src_path = str(src_path)
nii_img = nib.load(src_path)
if as_array:
image = nii_img.get_data()
if as_numpy:
image = np.array(image)
return image
else:
return nii_img
| 5,352,387 |
def case34_3ph():
"""
Create the IEEE 34 bus from IEEE PES Test Feeders:
"https://site.ieee.org/pes-testfeeders/resources/”.
OUTPUT:
**net** - The pandapower format network.
"""
net = pp.create_empty_network()
# Linedata
# CF-300
line_data = {'c_nf_per_km': 3.8250977, 'r_ohm_per_km': 0.69599766,
'x_ohm_per_km': 0.5177677,
'c0_nf_per_km': 1.86976748, 'r0_ohm_per_km': 1.08727498,
'x0_ohm_per_km': 1.47374703,
'max_i_ka': 0.23, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-300', element='line')
# CF-301
line_data = {'c_nf_per_km': 3.66884364, 'r_ohm_per_km': 1.05015841,
'x_ohm_per_km': 0.52265586,
'c0_nf_per_km': 1.82231544, 'r0_ohm_per_km': 1.48350255,
'x0_ohm_per_km': 1.60203942,
'max_i_ka': 0.18, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-301', element='line')
# CF-302
line_data = {'c_nf_per_km': 0.8751182, 'r_ohm_per_km': 0.5798427,
'x_ohm_per_km': 0.30768221,
'c0_nf_per_km': 0.8751182, 'r0_ohm_per_km': 0.5798427,
'x0_ohm_per_km': 0.30768221,
'max_i_ka': 0.14, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-302', element='line')
# CF-303
line_data = {'c_nf_per_km': 0.8751182, 'r_ohm_per_km': 0.5798427,
'x_ohm_per_km': 0.30768221,
'c0_nf_per_km': 0.8751182, 'r0_ohm_per_km': 0.5798427,
'x0_ohm_per_km': 0.30768221,
'max_i_ka': 0.14, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-303', element='line')
# CF-304
line_data = {'c_nf_per_km': 0.90382554, 'r_ohm_per_km': 0.39802955,
'x_ohm_per_km': 0.29436416,
'c0_nf_per_km': 0.90382554, 'r0_ohm_per_km': 0.39802955,
'x0_ohm_per_km': 0.29436416,
'max_i_ka': 0.18, 'type': 'ol'}
pp.create_std_type(net, line_data, name='CF-304', element='line')
# Busses
bus0 = pp.create_bus(net, name='Bus 0', vn_kv=24.9, type='n', zone='34_BUS')
bus_800 = pp.create_bus(net, name='Bus 800', vn_kv=24.9, type='n', zone='34_BUS')
bus_802 = pp.create_bus(net, name='Bus 802', vn_kv=24.9, type='n', zone='34_BUS')
bus_806 = pp.create_bus(net, name='Bus 806', vn_kv=24.9, type='n', zone='34_BUS')
bus_808 = pp.create_bus(net, name='Bus 808', vn_kv=24.9, type='n', zone='34_BUS')
bus_810 = pp.create_bus(net, name='Bus 810', vn_kv=24.9, type='n', zone='34_BUS')
bus_812 = pp.create_bus(net, name='Bus 812', vn_kv=24.9, type='n', zone='34_BUS')
bus_814 = pp.create_bus(net, name='Bus 814', vn_kv=24.9, type='n', zone='34_BUS')
bus_850 = pp.create_bus(net, name='Bus 850', vn_kv=24.9, type='n', zone='34_BUS')
bus_816 = pp.create_bus(net, name='Bus 816', vn_kv=24.9, type='n', zone='34_BUS')
bus_818 = pp.create_bus(net, name='Bus 818', vn_kv=24.9, type='n', zone='34_BUS')
bus_820 = pp.create_bus(net, name='Bus 820', vn_kv=24.9, type='n', zone='34_BUS')
bus_822 = pp.create_bus(net, name='Bus 822', vn_kv=24.9, type='n', zone='34_BUS')
bus_824 = pp.create_bus(net, name='Bus 824', vn_kv=24.9, type='n', zone='34_BUS')
bus_826 = pp.create_bus(net, name='Bus 826', vn_kv=24.9, type='n', zone='34_BUS')
bus_828 = pp.create_bus(net, name='Bus 828', vn_kv=24.9, type='n', zone='34_BUS')
bus_830 = pp.create_bus(net, name='Bus 830', vn_kv=24.9, type='n', zone='34_BUS')
bus_854 = pp.create_bus(net, name='Bus 854', vn_kv=24.9, type='n', zone='34_BUS')
bus_852 = pp.create_bus(net, name='Bus 852', vn_kv=24.9, type='n', zone='34_BUS')
bus_832 = pp.create_bus(net, name='Bus 832', vn_kv=24.9, type='n', zone='34_BUS')
bus_858 = pp.create_bus(net, name='Bus 858', vn_kv=24.9, type='n', zone='34_BUS')
bus_834 = pp.create_bus(net, name='Bus 834', vn_kv=24.9, type='n', zone='34_BUS')
bus_842 = pp.create_bus(net, name='Bus 842', vn_kv=24.9, type='n', zone='34_BUS')
bus_844 = pp.create_bus(net, name='Bus 844', vn_kv=24.9, type='n', zone='34_BUS')
bus_846 = pp.create_bus(net, name='Bus 846', vn_kv=24.9, type='n', zone='34_BUS')
bus_848 = pp.create_bus(net, name='Bus 848', vn_kv=24.9, type='n', zone='34_BUS')
bus_860 = pp.create_bus(net, name='Bus 860', vn_kv=24.9, type='n', zone='34_BUS')
bus_836 = pp.create_bus(net, name='Bus 836', vn_kv=24.9, type='n', zone='34_BUS')
bus_840 = pp.create_bus(net, name='Bus 840', vn_kv=24.9, type='n', zone='34_BUS')
bus_862 = pp.create_bus(net, name='Bus 862', vn_kv=24.9, type='n', zone='34_BUS')
bus_838 = pp.create_bus(net, name='Bus 838', vn_kv=24.9, type='n', zone='34_BUS')
bus_864 = pp.create_bus(net, name='Bus 864', vn_kv=24.9, type='n', zone='34_BUS')
bus_888 = pp.create_bus(net, name='Bus 888', vn_kv=4.16, type='n', zone='34_BUS')
bus_890 = pp.create_bus(net, name='Bus 890', vn_kv=4.16, type='n', zone='34_BUS')
bus_856 = pp.create_bus(net, name='Bus 856', vn_kv=24.9, type='n', zone='34_BUS')
# Lines
pp.create_line(net, bus_800, bus_802, length_km=0.786384, std_type='CF-300',
name='Line 0')
pp.create_line(net, bus_802, bus_806, length_km=0.527304, std_type='CF-300',
name='Line 1')
pp.create_line(net, bus_806, bus_808, length_km=9.823704, std_type='CF-300',
name='Line 2')
pp.create_line(net, bus_808, bus_810, length_km=1.769059, std_type='CF-303',
name='Line 3')
pp.create_line(net, bus_808, bus_812, length_km=11.43000, std_type='CF-300',
name='Line 4')
pp.create_line(net, bus_812, bus_814, length_km=9.061704, std_type='CF-300',
name='Line 5')
# pp.create_line(net, bus_814, bus_850, length_km=0.003048, std_type='CF-301',
# name='Line 6')
pp.create_line(net, bus_816, bus_818, length_km=0.521208, std_type='CF-302',
name='Line 7')
pp.create_line(net, bus_816, bus_824, length_km=3.112008, std_type='CF-301',
name='Line 8')
pp.create_line(net, bus_818, bus_820, length_km=14.67612, std_type='CF-302',
name='Line 9')
pp.create_line(net, bus_820, bus_822, length_km=4.187952, std_type='CF-302',
name='Line 10')
pp.create_line(net, bus_824, bus_826, length_km=0.923544, std_type='CF-303',
name='Line 11')
pp.create_line(net, bus_824, bus_828, length_km=0.256032, std_type='CF-301',
name='Line 12')
pp.create_line(net, bus_828, bus_830, length_km=6.230112, std_type='CF-301',
name='Line 13')
pp.create_line(net, bus_830, bus_854, length_km=0.158496, std_type='CF-301',
name='Line 14')
pp.create_line(net, bus_832, bus_858, length_km=1.493520, std_type='CF-301',
name='Line 15')
pp.create_line(net, bus_834, bus_860, length_km=0.615696, std_type='CF-301',
name='Line 16')
pp.create_line(net, bus_834, bus_842, length_km=0.085344, std_type='CF-301',
name='Line 17')
pp.create_line(net, bus_836, bus_840, length_km=0.262128, std_type='CF-301',
name='Line 18')
pp.create_line(net, bus_836, bus_862, length_km=0.085344, std_type='CF-301',
name='Line 19')
pp.create_line(net, bus_842, bus_844, length_km=0.411480, std_type='CF-301',
name='Line 20')
pp.create_line(net, bus_844, bus_846, length_km=1.109472, std_type='CF-301',
name='Line 21')
pp.create_line(net, bus_846, bus_848, length_km=0.161544, std_type='CF-301',
name='Line 22')
pp.create_line(net, bus_850, bus_816, length_km=0.094488, std_type='CF-301',
name='Line 23')
# pp.create_line(net, bus_852, bus_832, length_km=0.003048, std_type='CF-301',
# name='Line 24')
pp.create_line(net, bus_854, bus_856, length_km=7.110984, std_type='CF-303',
name='Line 25')
pp.create_line(net, bus_854, bus_852, length_km=11.22578, std_type='CF-301',
name='Line 26')
pp.create_line(net, bus_858, bus_864, length_km=0.493776, std_type='CF-302',
name='Line 27')
pp.create_line(net, bus_858, bus_834, length_km=1.776984, std_type='CF-301',
name='Line 28')
pp.create_line(net, bus_860, bus_836, length_km=0.816864, std_type='CF-301',
name='Line 29')
pp.create_line(net, bus_860, bus_838, length_km=1.481328, std_type='CF-304',
name='Line 30')
pp.create_line(net, bus_888, bus_890, length_km=3.218688, std_type='CF-300',
name='Line 31')
# Substation
pp.create_transformer_from_parameters(net, bus0, bus_800, sn_mva=2.5, vn_hv_kv=24.9,
vn_lv_kv=24.9, vkr_percent=1.0, vk_percent=8.062257,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
vkr0_percent=1.0, vk0_percent=8.062257, vector_group='YNyn',
mag0_percent=100.0, mag0_rx=0, si0_hv_partial=0.9,
tap_side='lv', tap_neutral=0, tap_max=2, tap_min=-2,
tap_step_percent=2.5, tap_pos=-2,
name='Substation')
# Regulator 1
pp.create_transformer_from_parameters(net, bus_814, bus_850, sn_mva=1, vn_hv_kv=24.9,
vn_lv_kv=24.9, vkr_percent=0.320088, vk_percent=0.357539,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
vkr0_percent=0.452171, vk0_percent=0.665505, vector_group='YNyn',
mag0_percent=100.0, mag0_rx=0, si0_hv_partial=0.9,
tap_side='lv', tap_neutral=0, tap_max=16, tap_min=-16,
tap_step_percent=0.625, tap_pos=0, tap_phase_shifter=False,
name='Regulator 1')
# Regulator 2
pp.create_transformer_from_parameters(net, bus_852, bus_832, sn_mva=1, vn_hv_kv=24.9,
vn_lv_kv=24.9, vkr_percent=0.320088, vk_percent=0.357539,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
vkr0_percent=0.452171, vk0_percent=0.665505, vector_group='YNyn',
mag0_percent=100.0, mag0_rx=0, si0_hv_partial=0.9,
tap_side='lv', tap_neutral=0, tap_max=16, tap_min=-16,
tap_step_percent=0.625, tap_pos=0, tap_phase_shifter=False,
name='Regulator 2')
# Traformer
pp.create_transformer_from_parameters(net, bus_832, bus_888, sn_mva=0.5, vn_hv_kv=24.9,
vn_lv_kv=4.16, vkr_percent=1.9, vk_percent=4.5,
pfe_kw=0.0, i0_percent=0.0, shift_degree=0.0,
vkr0_percent=1.9, vk0_percent=4.5, vector_group='YNyn',
mag0_percent=100.0, mag0_rx=0, si0_hv_partial=0.9,
name='Traformer')
# Loads
pp.create_asymmetric_load(net, bus_806, p_a_mw=0, p_b_mw=0.03, p_c_mw=0.025,
q_a_mvar=0, q_b_mvar=0.015, q_c_mvar=0.014, name='Load 806', type='wye')
pp.create_asymmetric_load(net, bus_810, p_a_mw=0, p_b_mw=0.016, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.008, q_c_mvar=0, name='Load 810', type='wye')
pp.create_asymmetric_load(net, bus_820, p_a_mw=0.034, p_b_mw=0, p_c_mw=0,
q_a_mvar=0.017, q_b_mvar=0, q_c_mvar=0, name='Load 820', type='wye')
pp.create_asymmetric_load(net, bus_822, p_a_mw=0.135, p_b_mw=0, p_c_mw=0,
q_a_mvar=0.07, q_b_mvar=0, q_c_mvar=0, name='Load 822', type='wye')
pp.create_asymmetric_load(net, bus_824, p_a_mw=0, p_b_mw=0.005, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.002, q_c_mvar=0, name='Load 824', type='delta')
pp.create_asymmetric_load(net, bus_826, p_a_mw=0, p_b_mw=0.04, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.02, q_c_mvar=0, name='Load 826', type='wye')
pp.create_asymmetric_load(net, bus_828, p_a_mw=0, p_b_mw=0, p_c_mw=0.004,
q_a_mvar=0, q_b_mvar=0, q_c_mvar=0.002, name='Load 828', type='wye')
pp.create_asymmetric_load(net, bus_830, p_a_mw=0.007, p_b_mw=0, p_c_mw=0,
q_a_mvar=0.003, q_b_mvar=0, q_c_mvar=0, name='Load 830', type='wye')
pp.create_asymmetric_load(net, bus_856, p_a_mw=0, p_b_mw=0.004, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.002, q_c_mvar=0, name='Load 856', type='wye')
pp.create_asymmetric_load(net, bus_858, p_a_mw=0.007, p_b_mw=0.002, p_c_mw=0.006,
q_a_mvar=0.003, q_b_mvar=0.001, q_c_mvar=0.003, name='Load 858', type='delta')
pp.create_asymmetric_load(net, bus_864, p_a_mw=0.002, p_b_mw=0, p_c_mw=0,
q_a_mvar=0.001, q_b_mvar=0, q_c_mvar=0, name='Load 864', type='wye')
pp.create_asymmetric_load(net, bus_834, p_a_mw=0.004, p_b_mw=0.015, p_c_mw=0.013,
q_a_mvar=0.002, q_b_mvar=0.008, q_c_mvar=0.007, name='Load 834', type='delta')
pp.create_asymmetric_load(net, bus_860, p_a_mw=0.016, p_b_mw=0.02, p_c_mw=0.11,
q_a_mvar=0.008, q_b_mvar=0.01, q_c_mvar=0.055, name='Load 860', type='delta')
pp.create_asymmetric_load(net, bus_836, p_a_mw=0.03, p_b_mw=0.01, p_c_mw=0.042,
q_a_mvar=0.015, q_b_mvar=0.006, q_c_mvar=0.022, name='Load 836', type='delta')
pp.create_asymmetric_load(net, bus_840, p_a_mw=0.018, p_b_mw=0.022, p_c_mw=0,
q_a_mvar=0.009, q_b_mvar=0.011, q_c_mvar=0, name='Load 840', type='delta')
pp.create_asymmetric_load(net, bus_838, p_a_mw=0, p_b_mw=0.028, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.014, q_c_mvar=0, name='Load 838', type='wye')
pp.create_asymmetric_load(net, bus_844, p_a_mw=0.009, p_b_mw=0, p_c_mw=0,
q_a_mvar=0.005, q_b_mvar=0, q_c_mvar=0, name='Load 844', type='wye')
pp.create_asymmetric_load(net, bus_846, p_a_mw=0, p_b_mw=0.025, p_c_mw=0.012,
q_a_mvar=0, q_b_mvar=0.02, q_c_mvar=0.011, name='Load 846', type='wye')
pp.create_asymmetric_load(net, bus_848, p_a_mw=0, p_b_mw=0.023, p_c_mw=0,
q_a_mvar=0, q_b_mvar=0.011, q_c_mvar=0, name='Load 848', type='wye')
pp.create_asymmetric_load(net, bus_860, p_a_mw=0.02, p_b_mw=0.02, p_c_mw=0.02,
q_a_mvar=0.016, q_b_mvar=0.016, q_c_mvar=0.016, name='Load 860 spot', type='wye')
pp.create_asymmetric_load(net, bus_840, p_a_mw=0.009, p_b_mw=0.009, p_c_mw=0.009,
q_a_mvar=0.007, q_b_mvar=0.007, q_c_mvar=0.007, name='Load 840 spot', type='wye')
pp.create_asymmetric_load(net, bus_844, p_a_mw=0.135, p_b_mw=0.135, p_c_mw=0.135,
q_a_mvar=0.105, q_b_mvar=0.105, q_c_mvar=0.105, name='Load 844 spot', type='wye')
pp.create_asymmetric_load(net, bus_848, p_a_mw=0.02, p_b_mw=0.02, p_c_mw=0.02,
q_a_mvar=0.016, q_b_mvar=0.016, q_c_mvar=0.016, name='Load 848 spot', type='delta')
pp.create_asymmetric_load(net, bus_890, p_a_mw=0.15, p_b_mw=0.15, p_c_mw=0.15,
q_a_mvar=0.075, q_b_mvar=0.075, q_c_mvar=0.075, name='Load 890 spot', type='delta')
pp.create_asymmetric_load(net, bus_830, p_a_mw=0.01, p_b_mw=0.01, p_c_mw=0.025,
q_a_mvar=0.005, q_b_mvar=0.005, q_c_mvar=0.01, name='Load 830 spot', type='delta')
# External grid
pp.create_ext_grid(net, bus0, vm_pu=1.0, va_degree=0.0, s_sc_max_mva=10.0,
s_sc_min_mva=10.0, rx_max=1, rx_min=1, r0x0_max=1, x0x_max=1)
# Distributed generators
pp.create_sgen(net, bus_848, p_mw=0.66, q_mvar=0.500, name='DG 1', max_p_mw=0.66, min_p_mw=0, max_q_mvar=0.5, min_q_mvar=0)
pp.create_sgen(net, bus_890, p_mw=0.50, q_mvar=0.375, name='DG 2', max_p_mw=0.50, min_p_mw=0, max_q_mvar=0.375, min_q_mvar=0)
pp.create_sgen(net, bus_822, p_mw=0.2, type='PV', name='PV 1', max_p_mw=0.2, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_856, p_mw=0.2, type='PV', name='PV 2', max_p_mw=0.2, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_838, p_mw=0.2, type='PV', name='PV 3', max_p_mw=0.2, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_822, p_mw=0.3, type='WP', name='WP 1', max_p_mw=0.3, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_826, p_mw=0.3, type='WP', name='WP 2', max_p_mw=0.3, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
pp.create_sgen(net, bus_838, p_mw=0.3, type='WP', name='WP 3', max_p_mw=0.3, min_p_mw=0, max_q_mvar=0, min_q_mvar=0)
# Shunt capacity bank
pp.create_shunt(net, bus_840, q_mvar=-0.12, name='SCB 1', step=4, max_step=4)
pp.create_shunt(net, bus_864, q_mvar=-0.12, name='SCB 2', step=4, max_step=4)
# storage
pp.create_storage(net, bus_810, p_mw=0.2, max_e_mwh=1.0, sn_mva=1.0, soc_percent=50, min_e_mwh=0.2, name='Storage')
pp.add_zero_impedance_parameters(net)
return net
| 5,352,388 |
def create_sales_invoice(order_dict, order, site_id_order,
msgprint_log, changes):
"""
Create a Sales Invoice from the eBay order.
"""
updated_db = False
# Don't create SINV from incomplete order
if (order['OrderStatus'] != 'Completed'
or order['CheckoutStatus']['Status'] != 'Complete'):
return
ebay_order_id = order_dict['ebay_order_id']
ebay_user_id = order_dict['ebay_user_id']
order_fields = db_get_ebay_doc(
"eBay order", ebay_order_id,
fields=["name", "customer", "customer_name",
"address", "ebay_order_id"],
log=changes, none_ok=False)
db_cust_name = order_fields['customer']
# Get from existing linked sales order
sinv_fields = db_get_ebay_doc(
"Sales Invoice", ebay_order_id, fields=["name"],
log=changes, none_ok=True)
if sinv_fields is not None:
# Linked sales invoice exists
debug_msgprint('Sales Invoice already exists: '
+ ebay_user_id + ' : ' + sinv_fields['name'])
changes.append({"ebay_change": "Sales Invoice already exists",
"ebay_user_id": ebay_user_id,
"customer_name": order_fields['customer_name'],
"customer": db_cust_name,
"address": order_fields['address'],
"ebay_order": order_fields['name']})
return
# No linked sales invoice - check for old unlinked sales invoice
test_title = db_cust_name + "-" + ebay_order_id
query = frappe.get_all("Sales Invoice", filters={"title": test_title})
if len(query) > 2:
raise ErpnextEbaySyncError(
"Multiple Sales Invoices with title {}!".format(test_title))
if len(query) == 1:
# Old sales invoice without link - don't interfere
debug_msgprint('Old Sales Invoice exists: '
+ ebay_user_id + ' : ' + query[0]['name'])
changes.append({"ebay_change": "Old Sales Invoice exists",
"ebay_user_id": ebay_user_id,
"customer_name": order_fields['customer_name'],
"customer": db_cust_name,
"address": order_fields['address'],
"ebay_order": order_fields['name']})
return
# Create a sales invoice
# eBay date format: YYYY-MM-DDTHH:MM:SS.SSSZ
if 'PaidTime' in order:
paid_datetime = order['PaidTime'][:-1] + 'UTC'
else:
paid_datetime = order['CreatedTime'][:-1] + 'UTC'
posting_date = datetime.datetime.strptime(paid_datetime,
'%Y-%m-%dT%H:%M:%S.%f%Z')
order_status = order['OrderStatus']
buyer_checkout_message = order.get('BuyerCheckoutMessage', None)
if buyer_checkout_message:
buyer_checkout_message = html.escape(buyer_checkout_message,
quote=False)
item_list = []
payments = []
taxes = []
amount_paid_dict = order['AmountPaid']
currency = amount_paid_dict['_currencyID']
amount_paid = float(amount_paid_dict['value'])
default_currency = get_default_currency()
if currency != default_currency:
conversion_rate = get_exchange_rate(currency, default_currency,
posting_date.date())
else:
conversion_rate = 1.0
sku_list = []
sum_inc_vat = 0.0
sum_exc_vat = 0.0
sum_vat = 0.0
sum_to_pay = 0.0
shipping_cost = 0.0
ebay_car = 0.0 # eBay Collect and Remit sales taxes
transactions = order['TransactionArray']['Transaction']
cust_email = transactions[0]['Buyer']['Email']
# Find the correct VAT rate
country = frappe.db.get_value('Address', order_dict['address'], 'country')
if country is None:
raise ErpnextEbaySyncError(
'No country for this order for user {}!'.format(ebay_user_id))
(
income_account, ship_income_account, tax_income_account
) = determine_income_accounts(country)
territory = determine_territory(country)
vat_rate = VAT_RATES[income_account]
# TODO
# isGSP = TransactionArray.Transaction.ContainingOrder.IsMultiLegShipping
# Transaction.ContainingOrder.MonetaryDetails.Payments.Payment.PaymentStatus
# Transaction.MonetaryDetails.Payments.Payment.PaymentStatus
for transaction in transactions:
if transaction['Buyer']['Email'] != cust_email:
raise ValueError('Multiple emails for this buyer?')
# Vat Status
#NoVATTax VAT is not applicable
#VATExempt Residence in a country with VAT and user is registered as VAT-exempt
#VATTax Residence in a country with VAT and user is not registered as VAT-exempt
#vat_status = transaction['Buyer']['VATStatus']
shipping_cost_dict = transaction['ActualShippingCost']
handling_cost_dict = transaction['ActualHandlingCost']
final_value_fee_dict = transaction.get(
'FinalValueFee', {'_currencyID': default_currency, 'value': 0.0}
)
if shipping_cost_dict['_currencyID'] == currency:
shipping_cost += float(shipping_cost_dict['value'])
else:
raise ErpnextEbaySyncError('Inconsistent currencies in order!')
if handling_cost_dict['_currencyID'] == currency:
shipping_cost += float(handling_cost_dict['value'])
else:
raise ErpnextEbaySyncError('Inconsistent currencies in order!')
# Final Value Fee currently limited to being in *default* currency or
# sale currency, and does not include any VAT (for UK/EU sellers).
# With the introduction of Managed Payments, should always be
# in home currency, and is only an estimate.
if final_value_fee_dict['_currencyID'] == default_currency:
# final value fee typically in seller currency
base_final_value_fee = float(final_value_fee_dict['value'])
final_value_fee = base_final_value_fee / conversion_rate
elif final_value_fee_dict['_currencyID'] == currency:
final_value_fee = float(final_value_fee_dict['value'])
base_final_value_fee = final_value_fee * conversion_rate
else:
raise ErpnextEbaySyncError('Inconsistent currencies in order!')
if transaction['eBayCollectAndRemitTax'] == 'true':
ebay_car_dict = (
transaction['eBayCollectAndRemitTaxes']['TotalTaxAmount'])
if ebay_car_dict['_currencyID'] == currency:
ebay_car += float(ebay_car_dict['value'])
else:
raise ErpnextEbaySyncError('Inconsistent currencies in order!')
qty = float(transaction['QuantityPurchased'])
try:
sku = transaction['Item']['SKU']
sku_list.append(sku)
# Only allow valid SKU
except KeyError:
debug_msgprint(
'Order {} failed: One of the items did not have an SKU'.format(
ebay_order_id))
sync_error(changes, 'An item did not have an SKU',
ebay_user_id, customer_name=db_cust_name)
raise ErpnextEbaySyncError(
'An item did not have an SKU for user {}'.format(ebay_user_id))
if not frappe.db.exists('Item', sku):
debug_msgprint('Item not found?')
raise ErpnextEbaySyncError(
'Item {} not found for user {}'.format(sku, ebay_user_id))
ebay_price = float(transaction['TransactionPrice']['value'])
if ebay_price <= 0.0:
raise ValueError('TransactionPrice Value <= 0.0')
inc_vat = ebay_price
exc_vat = round(float(inc_vat) / (1.0 + vat_rate), 2)
vat = inc_vat - exc_vat
sum_inc_vat += inc_vat
sum_exc_vat += exc_vat
sum_vat += vat * qty
sum_to_pay += inc_vat * qty
# Get item description in case it is empty, and we need to insert
# filler text to avoid MandatoryError
description = frappe.get_value('Item', sku, 'description')
if not strip_html(cstr(description)).strip():
description = '(no item description)'
item_list.append({
"item_code": sku,
"description": description,
"warehouse": WAREHOUSE,
"qty": qty,
"rate": exc_vat,
"ebay_final_value_fee": final_value_fee,
"base_ebay_final_value_fee": base_final_value_fee,
"valuation_rate": 0.0,
"income_account": income_account,
"expense_account": f"Cost of Goods Sold - {COMPANY_ACRONYM}"
})
# Add a single line item for shipping services
if shipping_cost > 0.0001:
inc_vat = shipping_cost
exc_vat = round(float(inc_vat) / (1.0 + vat_rate), 2)
vat = inc_vat - exc_vat
sum_inc_vat += inc_vat
sum_exc_vat += exc_vat
sum_vat += vat
sum_to_pay += inc_vat
item_list.append({
"item_code": SHIPPING_ITEM,
"description": "Shipping costs (from eBay)",
"warehouse": WAREHOUSE,
"qty": 1.0,
"rate": exc_vat,
"valuation_rate": 0.0,
"income_account": ship_income_account,
"expense_account": f"Cost of Goods Sold - {COMPANY_ACRONYM}"
})
# Add a single line item for eBay Collect and Remit taxes
if ebay_car > 0.0001:
item_list.append({
"item_code": CAR_ITEM,
"description": "eBay Collect and Remit taxes",
"warehouse": WAREHOUSE,
"qty": 1.0,
"rate": ebay_car,
"valuation_rate": 0.0,
"income_account": tax_income_account,
"expense_account": f"Cost of Goods Sold - {COMPANY_ACRONYM}"
})
sum_to_pay += ebay_car
# Taxes are a single line item not each transaction
if VAT_RATES[income_account] > 0.00001:
taxes.append({
"charge_type": "Actual",
"description": "VAT {}%".format(VAT_PERCENT[income_account]),
"account_head": f"VAT - {COMPANY_ACRONYM}",
"rate": VAT_PERCENT[income_account],
"tax_amount": sum_vat})
checkout = order['CheckoutStatus']
submit_on_pay = False
if checkout['PaymentMethod'] in ('PayOnPickup', 'CashOnPickup'):
# Cash on delivery - may not yet be paid (set to zero)
payments.append({"mode_of_payment": "Cash",
"amount": 0.0})
elif checkout['PaymentMethod'] in ('CCAccepted', 'CreditCard'):
# eBay Managed Payments (with/without eBay gift card)
# Add amount as it has been paid
# Always use default currency
ebay_payment_account = f'eBay Managed {default_currency}'
if not frappe.db.exists('Mode of Payment', ebay_payment_account):
raise ErpnextEbaySyncError(
f'Mode of Payment "{ebay_payment_account}" does not exist!')
if amount_paid > 0.0:
payments.append({"mode_of_payment": ebay_payment_account,
"amount": amount_paid})
submit_on_pay = (currency == default_currency)
#submit_on_pay = True
elif checkout['PaymentMethod'] == 'PayPal':
# PayPal - add amount as it has been paid
paypal_acct = f'PayPal {currency}'
if not frappe.db.exists('Mode of Payment', paypal_acct):
raise ErpnextEbaySyncError(
f'Mode of Payment "{paypal_acct}" does not exist!')
if amount_paid > 0.0:
payments.append({"mode_of_payment": paypal_acct,
"amount": amount_paid})
submit_on_pay = True
elif checkout['PaymentMethod'] == 'PersonalCheck':
# Personal cheque - may not yet be paid (set to zero)
payments.append({"mode_of_payment": "Cheque",
"amount": 0.0})
elif checkout['PaymentMethod'] == 'MOCC':
# Postal order/banker's draft - may not yet be paid (set to zero)
payments.append({"mode_of_payment": "eBay",
"amount": 0.0})
title = 'eBay: {} [{}]'.format(
order_fields['customer_name'],
', '.join(sku_list))
sinv_dict = {
"doctype": "Sales Invoice",
"naming_series": "SINV-",
"pos_profile": f"eBay {currency}",
"title": title,
"customer": db_cust_name,
"territory": territory,
"shipping_address_name": order_dict['address'],
"ebay_order_id": ebay_order_id,
"ebay_site_id": site_id_order,
"buyer_message": buyer_checkout_message,
"contact_email": cust_email,
"posting_date": posting_date.date(),
"posting_time": posting_date.time(),
"due_date": posting_date,
"set_posting_time": 1,
"currency": currency,
"conversion_rate": conversion_rate,
"ignore_pricing_rule": 1,
"apply_discount_on": "Net Total",
"status": "Draft",
"update_stock": 1,
"is_pos": 1,
"taxes": taxes,
"payments": payments,
"items": item_list}
sinv = frappe.get_doc(sinv_dict)
sinv.run_method('erpnext_ebay_before_insert')
sinv.insert()
if abs(amount_paid - sum_to_pay) > 0.005:
sinv.add_comment(
'Comment',
text='sync_orders: Unable to match totals - please check this '
+ f'order manually ({amount_paid} != {sum_to_pay})')
elif submit_on_pay:
# This is an order which adds up and has an approved payment method
# Submit immediately
sinv.submit()
updated_db = True
debug_msgprint('Adding Sales Invoice: ' + ebay_user_id + ' : ' + sinv.name)
changes.append({"ebay_change": "Adding Sales Invoice",
"ebay_user_id": ebay_user_id,
"customer_name": order_fields['customer_name'],
"customer": db_cust_name,
"address": order_fields['address'],
"ebay_order": order_fields['name']})
# Commit changes to database
if updated_db:
frappe.db.commit()
return
| 5,352,389 |
def calc_ext_str_features(id2bedrow_dic, chr_len_dic,
out_str, args,
check_seqs_dic=False,
stats_dic=None,
tr_regions=False,
tr_seqs_dic=False):
"""
Calculate structure features (structural element probabilities)
by using extended sequences, and then prune them to match
remaining feature lists.
id2bedrow_dic:
Site ID to BED region (tab separated)
chr_len_dic:
Reference sequence lengths dictionary.
out_str:
Output .str file.
args:
Arguments from rnaprot gt / gp.
check_seqs_dic:
Center sequences to compare to extended and truncated ones.
Should be the same after extension, structure calculation, and
truncation.
stats_dic:
For .html statistics.
tr_regions:
Are we dealing with transcript regions?
tr_seqs_dic:
If tr_regions supplied, transcript sequences need to be supplied
as well.
"""
assert id2bedrow_dic, "id2bedrow_dic empty"
assert chr_len_dic, "chr_len_dic empty"
print("Extend sequences by --plfold-w for structure calculations ... ")
# Get extended parts and infos.
id2newvp_dic = {} # viewpoint region coords on extended sequence (1-based).
id2extrow_dic = {} # Extended sites BED on reference.
extlen_dic = {} # Extended lengths of sites.
refid_dic = {} # Reference IDs.
id2newl_dic = get_ext_site_parts(id2bedrow_dic, chr_len_dic,
str_ext=args.plfold_w,
id2ucr_dic=False,
refid_dic=refid_dic,
extlen_dic=extlen_dic,
id2extrow_dic=id2extrow_dic,
id2newvp_dic=id2newvp_dic)
# Checks.
assert id2extrow_dic, "id2extrow_dic empty"
# tmp files.
random_id = uuid.uuid1()
tmp_fa = str(random_id) + ".tmp.fa"
random_id = uuid.uuid1()
tmp_bed = str(random_id) + ".tmp.bed"
random_id = uuid.uuid1()
tmp_str_out = str(random_id) + ".tmp.str"
# If transcript regions.
if tr_regions:
# Checks.
assert tr_seqs_dic, "tr_seqs_dic empty"
for ref_id in refid_dic:
assert ref_id in tr_seqs_dic, "reference ID %s not in tr_seqs_dic" %(ref_id)
# Get extended sequences.
seqs_dic = extract_transcript_sequences(id2extrow_dic, tr_seqs_dic)
# Write sequences to FASTA.
fasta_output_dic(seqs_dic, tmp_fa)
else:
# Genomic regions.
bed_write_row_dic_into_file(id2extrow_dic, tmp_bed)
# Extract sequences.
bed_extract_sequences_from_2bit(tmp_bed, tmp_fa, args.in_2bit)
# Check extracted sequences, replace N's with random nucleotides.
polish_fasta_seqs(tmp_fa, extlen_dic,
vp_check_seqs_dic=check_seqs_dic,
vp_dic=id2newvp_dic)
calc_str_elem_p(tmp_fa, tmp_str_out,
stats_dic=stats_dic,
plfold_u=args.plfold_u,
plfold_l=args.plfold_l,
plfold_w=args.plfold_w)
print("Post-process structure files ... ")
# Refine elem_p.str.
str_elem_p_dic = read_str_elem_p_into_dic(tmp_str_out,
p_to_str=True)
assert str_elem_p_dic, "str_elem_p_dic empty"
SEPOUT = open(out_str,"w")
for site_id in str_elem_p_dic:
us_ext = id2newl_dic[site_id][0]
ds_ext = id2newl_dic[site_id][4]
# Checks.
len_ll = len(str_elem_p_dic[site_id])
total_ext = us_ext + ds_ext
assert len_ll > total_ext, "len_ll <= total_ext for site ID %s" %(site_id)
if ds_ext:
new_ll = str_elem_p_dic[site_id][us_ext:-ds_ext]
else:
# If ds_ext == 0.
new_ll = str_elem_p_dic[site_id][us_ext:]
assert new_ll, "new_ll empty for site ID %s (us_ext = %i, ds_ext = %i, len_ll = %i)" %(site_id, us_ext, ds_ext, len_ll)
SEPOUT.write(">%s\n" %(site_id))
for l in new_ll:
s = "\t".join(l)
SEPOUT.write("%s\n" %(s))
SEPOUT.close()
# Remove tmp files.
if os.path.exists(tmp_fa):
os.remove(tmp_fa)
if os.path.exists(tmp_bed):
os.remove(tmp_bed)
if os.path.exists(tmp_str_out):
os.remove(tmp_str_out)
| 5,352,390 |
def test_filtrate_is_a_callable():
"""Verify if 'filtrate' is a callable."""
assert callable(filtrate)
| 5,352,391 |
def generate_daily_stats():
"""
Generates dummy daily stats for one year
"""
times = [1577836800 + (i * 86400) for i in range(0,366)]
stats_arr = [[]]
for time in times:
vals = [uniform(0,100) for i in range(843)]
stats_arr[0].append({
'min': np.min(vals),
'max': np.max(vals),
'mean': np.mean(vals),
'cnt': 843,
'std': np.std(vals),
'time': int(time),
'iso_time': datetime.utcfromtimestamp(int(time)).replace(tzinfo=tz('UTC')).strftime('%Y-%m-%dT%H:%M:%S%z')
})
clim_stats = {datetime.utcfromtimestamp(result['time']).month: result for result in stats_arr[0]}
return stats_arr, clim_stats
| 5,352,392 |
def dump_file(filepath,filename, Variable):
"""
@Params:
filename: filename inside the ./dumps folder for dumping
Variable: Variable to dump inside the file
@Returns:
None
"""
with open(filepath +'/'+filename+'.pickle','wb') as handle:
pickle.dump(Variable,handle,protocol=pickle.HIGHEST_PROTOCOL)
| 5,352,393 |
def create_mask(imsize: tuple, bbox: tuple) -> Image:
"""
Args:
imsize: (w, h)
bboxes: (x0, y0, x1, y1)
"""
mask = Image.new("L", imsize)
draw = ImageDraw.Draw(mask)
draw.rectangle(bbox, fill=255)
return mask
| 5,352,394 |
def send_template_email(
recipient_list: list,
subject: str,
template: str,
template_context: dict,
from_email: str = None,
plain_context: dict = None,
html_context: dict = None,
language: str = 'nl',
) -> None:
"""
Light wrapper for Django's send_mail function. The main addition: this
function handles the template rendering for you. Just specify the template
name (without extensions).
Note: both a HTML and a plain text version of the template should exist!
For example:
app/test.html
app/test.txt
Function call: send_template_email(template='app/test', *args, **kwargs)
html_context and plain_context can be used to override something in the
regular template_context for either the html version of the plain text
version. (Note: you can also add a key that does not exist in
template_context)
:param recipient_list: A list of recipients
:param subject: Email subject
:param template: Template name, without extension
:param template_context: Any context variables for the templates
:param from_email: FROM header. If absent, settings.FROM_EMAIL will be used
:param html_context: Optional dict with context specifically for HTML
:param plain_context: Optional dict with context specifically for plaintext
:param language: Which language Django should use when creating the mail
"""
if plain_context is None:
plain_context = {}
if html_context is None:
html_context = {}
# Override so that all emails will be parsed with the desired language
old_lang = translation.get_language()
translation.activate(language)
# Create the context for both the plain text email
plain_text_context = template_context.copy()
plain_text_context.update(plain_context)
# And now the same for the HTML version
html_text_context = template_context.copy()
html_text_context.update(html_context)
plain_body = render_to_string(
'{}.txt'.format(template),
plain_text_context
)
html_body = render_to_string(
'{}.html'.format(template),
html_text_context
)
# revert to the original language
translation.activate(old_lang)
from_email = from_email or settings.EMAIL_FROM
send_mail(
subject,
plain_body,
from_email,
recipient_list,
html_message=html_body
)
| 5,352,395 |
def phone_number_validator(value, region=settings.KOMPASSI_PHONENUMBERS_DEFAULT_REGION):
"""
Validate the phone number using Google's phonenumbers library.
"""
exc = _('Invalid phone number.')
try:
phone_number = phonenumbers.parse(value, region)
except phonenumbers.NumberParseException as e:
raise ValidationError(exc)
else:
if not phonenumbers.is_valid_number(phone_number):
raise ValidationError(exc)
| 5,352,396 |
def request_change_template_picture(update, context):
"""
Args:
update (telegram.Update)
context (telegram.ext.CallbackContext)
"""
reg_user = get_reg_user(update.effective_user, update.effective_chat)
markup = ReplyKeyboardMarkup(
[
[CANCEL_MARKUP]
], resize_keyboard=True
)
update.message.reply_text(
"Envíe la nueva foto de plantilla. 📸", reply_markup=markup)
reg_user.status = "requested_template_picture"
| 5,352,397 |
def build_resolved_spec(api, spec_lookup, cache, force_build, spec, version,
ecosystem_hash):
"""Builds a resolved spec at a specific version, then uploads it.
Args:
* api - The ThirdPartyPackagesNGApi's `self.m` module collection.
* spec_lookup ((package_name, platform) -> ResolvedSpec) - A function to
lookup (possibly cached) ResolvedSpec's for things like dependencies and
tools.
* cache (dict) - A map of (package_name, version, platform) -> CIPDSpec.
The `build_resolved_spec` function fully manages the content of this
dictionary.
* force_build (bool) - If True, don't consult CIPD server to see if the
package is already built. This also disables uploading the source and
built results, to avoid attempting to upload a duplicately-tagged package.
* spec (ResolvedSpec) - The resolved spec to build.
* version (str) - The symver (or 'latest') version of the package to build.
* ecosystem_hash(str) - If specified, tells 3pp hash used for this build.
Returns the CIPDSpec of the built package; If the package already existed on
the remote server, it will return the CIPDSpec immediately (without attempting
to build anything).
"""
keys = [(spec.cipd_pkg_name, version, spec.platform)]
if keys[0] in cache:
return cache[keys[0]]
def set_cache(spec):
for k in keys:
cache[k] = spec
return spec
with api.step.nest('building %s' % (spec.cipd_pkg_name,)):
env = {
'_3PP_PLATFORM': spec.platform,
'_3PP_TOOL_PLATFORM': spec.tool_platform,
'_3PP_CIPD_PACKAGE_NAME': spec.cipd_pkg_name,
# CIPD uses 'mac' instead of 'darwin' for historical reasons.
'GOOS': spec.platform.split('-')[0].replace('mac', 'darwin'),
# CIPD encodes the GOARCH/GOARM pair of ('arm', '6') as 'armv6l'.
# Since GOARCH=6 is the default, we don't need to specify it.
'GOARCH': spec.platform.split('-')[1].replace('armv6l', 'arm'),
}
if spec.platform.startswith('mac-'):
if spec.platform == 'mac-arm64':
# ARM64 support is added in macOS 11.
env['MACOSX_DEPLOYMENT_TARGET'] = '11.0'
# Mac builds don't use Docker/Dockcross, so we handle cross-build
# setup here. Setting CCC_OVERRIDE_OPTIONS passes the target to
# Clang globally, so we don't need to plumb it through each individual
# install script. We use '^' to indicate this option is inserted at
# the beginning of the compiler options list -- this gives the ability
# to override it later if needed.
if resolved_spec.platform_for_host(api) != spec.platform:
env['CROSS_TRIPLE'] = 'aarch64-apple-darwin'
env['CCC_OVERRIDE_OPTIONS'] = '^--target=arm64-apple-macos'
else:
# Make sure to clear these options if not cross-compiling, since
# we may be switching back and forth between building host tools
# and target-platform binaries.
env.pop('CROSS_TRIPLE', None)
env.pop('CCC_OVERRIDE_OPTIONS', None)
else:
env['MACOSX_DEPLOYMENT_TARGET'] = '10.10'
if spec.create_pb.source.patch_version:
env['_3PP_PATCH_VERSION'] = spec.create_pb.source.patch_version
with api.context(env=env):
# Resolve 'latest' versions. Done inside the env because 'script' based
# sources need the $_3PP* envvars.
is_latest = version == 'latest'
git_hash = ''
if is_latest:
version, git_hash = source.resolve_latest(api, spec)
keys.append((spec.cipd_pkg_name, version, spec.platform))
if keys[-1] in cache:
return set_cache(cache[keys[-1]])
cipd_spec = spec.cipd_spec(version)
# See if the specific version is uploaded
if force_build or not cipd_spec.exists_in_cipd():
# Otherwise, build it
_build_impl(
api, cipd_spec, is_latest, spec_lookup, force_build,
(lambda spec, version: build_resolved_spec(
api, spec_lookup, cache, force_build, spec, version,
ecosystem_hash)),
spec, version, git_hash, ecosystem_hash)
return set_cache(cipd_spec)
| 5,352,398 |
def test_resource_delta(loop):
"""Test if the controller correctly calculates the delta between
``last_applied_manifest`` and ``last_observed_manifest``
State (0):
The application possesses a last_applied_manifest which specifies a Deployment,
a Service and a ConfigMap. The application has a custom observer_schema which
observes only part of the resources:
- It observes the deployment's image, initialized by the given manifest file.
- It observes the deployment's replicas count, initialized by k8s to 1.
- The Service's first port's protocol, initialized in the manifest file, is
*not* observed
- It accepts between 0 and 2 ports.
- The ConfigMap is not observed
The application does not possess a last_observed_manifest.
This state tests the addition of observed and non observed resources to
last_applied_manifest
State (1):
The application possesses a last_observed_manifest which matches the
last_applied_manifest.
State (2):
Update a field in the last_applied_manifest, which is observed and present in
last_observed_manifest
State (3):
Update a field in the last_applied_manifest, which is observed and not present
in last_observed_manifest
State (4):
Update a field in the last_applied_manifest, which is not observed and present
in last_observed_manifest
State (5):
Update a field in the last_applied_manifest, which is not observed and not
present in last_observed_manifest
State (6):
Update a field in the last_observed_manifest, which is observed and present in
last_applied_manifest
State (7):
Update a field in the last_observed_manifest, which is observed and not
present in last_applied_manifest
State (8):
Update a field in the last_observed_manifest, which is not observed and
present in last_applied_manifest
State (9):
Update a field in the last_observed_manifest, which is not observed and not
present in last_applied_manifest
State (10):
Add additional elements to a list in last_observed_manifest
State (11):
Remove elements from a list in last_observed_manifest
State (12):
Remove ConfigMap
"""
# State(0): Observed and non observed resources are added to last_applied_manifest
app = ApplicationFactory(
spec__manifest=deepcopy(nginx_manifest),
spec__observer_schema=deepcopy(custom_observer_schema),
)
generate_default_observer_schema(app)
initial_mangled_observer_schema = deepcopy(app.status.mangled_observer_schema)
update_last_applied_manifest_from_spec(app)
deployment_object = serialize_k8s_object(deployment_response, "V1Deployment")
service_object = serialize_k8s_object(service_response, "V1Service")
configmap_object = serialize_k8s_object(configmap_response, "V1ConfigMap")
# The Deployment and Service and ConfigMap have to be created.
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 3
assert app.status.last_applied_manifest[0] in new # Deployment
assert app.status.last_applied_manifest[1] in new # Service
assert app.status.last_applied_manifest[2] in new # ConfigMap
assert len(deleted) == 0
assert len(modified) == 0
# State (1): The application possesses a last_observed_manifest which matches the
# last_applied_manifest.
update_last_applied_manifest_from_resp(app, None, None, deployment_object)
update_last_applied_manifest_from_resp(app, None, None, service_object)
update_last_applied_manifest_from_resp(app, None, None, configmap_object)
initial_last_applied_manifest = deepcopy(app.status.last_applied_manifest)
app.status.last_observed_manifest = deepcopy(initial_last_observed_manifest)
# No changes should be detected
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 0
# State (2): Update a field in the last_applied_manifest, which is observed and
# present in last_observed_manifest
app.status.last_applied_manifest[1]["spec"]["type"] = "LoadBalancer"
# The modification of an observed field should be detected (here in the Service
# resource)
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 1
assert app.status.last_applied_manifest[1] in modified # Service
# State (3): Update a field in the last_applied_manifest, which is observed and not
# present in last_observed_manifest
app.status.last_observed_manifest[1]["spec"].pop("type")
# The modification of an observed field should be detected (here in the Service
# resource)
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 1
assert app.status.last_applied_manifest[1] in modified # Service
# State (4): Update a field in the last_applied_manifest, which is not observed and
# present in last_observed_manifest
app.status.last_applied_manifest = deepcopy(initial_last_applied_manifest)
app.status.last_observed_manifest = deepcopy(initial_last_observed_manifest)
app.status.mangled_observer_schema[0]["spec"].pop("replicas")
app.status.last_applied_manifest[0]["spec"]["replicas"] = 2
# The modification of an non observed field should not trigger an update of the
# Kubernetes resource.
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 0
# State (5): Update a field in the last_applied_manifest, which is not observed and
# not present in last_observed_manifest
app.status.last_observed_manifest[0]["spec"].pop("replicas")
# The modification of an non observed field should not trigger an update of the
# Kubernetes resource.
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 0
# State (6): Update a field in the last_observed_manifest, which is observed and
# present in last_applied_manifest
app.status.mangled_observer_schema = deepcopy(initial_mangled_observer_schema)
app.status.last_applied_manifest = deepcopy(initial_last_applied_manifest)
app.status.last_observed_manifest = deepcopy(initial_last_observed_manifest)
app.status.last_observed_manifest[1]["spec"]["type"] = "LoadBalancer"
# The modification of an observed field should be detected (here in the Service
# resource)
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 1
assert app.status.last_applied_manifest[1] in modified # Service
# State (7): Update a field in the last_observed_manifest, which is observed and not
# present in last_applied_manifest
app.status.last_applied_manifest[1]["spec"].pop("type")
# The modification of an observed field should be detected (here in the Service
# resource)
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 1
assert app.status.last_applied_manifest[1] in modified # Service
# State (8): Update a field in the last_observed_manifest, which is not observed and
# present in last_applied_manifest
app.status.last_applied_manifest = deepcopy(initial_last_applied_manifest)
app.status.last_observed_manifest = deepcopy(initial_last_observed_manifest)
app.status.last_observed_manifest[1]["spec"]["ports"][0]["protocol"] = "UDP"
# The modification of an non observed field should not trigger an update of the
# Kubernetes resource.
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 0
# State (9): Update a field in the last_observed_manifest, which is not observed and
# not present in last_applied_manifest
app.status.last_applied_manifest[1]["spec"]["ports"][0].pop("protocol")
# The modification of an non observed field should not trigger an update of the
# Kubernetes resource.
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 0
# State (10): Add additional elements to a list in last_observed_manifest
app.status.last_applied_manifest = deepcopy(initial_last_applied_manifest)
app.status.last_observed_manifest = deepcopy(initial_last_observed_manifest)
app.status.last_observed_manifest[1]["spec"]["ports"].insert(
-1, {"nodePort": 32567, "port": 81, "protocol": "TCP", "targetPort": 81}
)
app.status.last_observed_manifest[1]["spec"]["ports"][-1][
"observer_schema_list_current_length"
] += 1
# Number of elements is within the authorized list length. No update should be
# triggered
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 0
app.status.last_observed_manifest[1]["spec"]["ports"].insert(
-1, {"nodePort": 32568, "port": 82, "protocol": "TCP", "targetPort": 82}
)
app.status.last_observed_manifest[1]["spec"]["ports"][-1][
"observer_schema_list_current_length"
] += 1
# Number of elements is above the authorized list length. Service should be
# rollbacked
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 1
assert app.status.last_applied_manifest[1] in modified # Service
# State (11): Remove elements from a list in last_observed_manifest
app.status.mangled_observer_schema[1]["spec"]["ports"][-1][
"observer_schema_list_min_length"
] = 1
app.status.last_observed_manifest[1]["spec"][
"ports"
] = app.status.last_observed_manifest[1]["spec"]["ports"][-1:]
app.status.last_observed_manifest[1]["spec"]["ports"][-1][
"observer_schema_list_current_length"
] = 0
# Number of elements is below the authorized list length. Service should be
# rollbacked
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 0
assert len(modified) == 1
assert app.status.last_applied_manifest[1] in modified # Service
# State (12): Remove ConfigMap
app.status.last_applied_manifest = deepcopy(initial_last_applied_manifest)
app.status.last_observed_manifest = deepcopy(initial_last_observed_manifest)
app.status.mangled_observer_schema.pop(2)
app.spec.manifest.pop(2)
app.status.last_applied_manifest.pop(2)
# ConfigMap should be deleted
new, deleted, modified = ResourceDelta.calculate(app)
assert len(new) == 0
assert len(deleted) == 1
assert len(modified) == 0
assert app.status.last_observed_manifest[2] in deleted
| 5,352,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.