content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def register_error(self, name, error):
"""
During development we track errors in the cache.
This could be moved inside a class later.
"""
if 'errors' not in self.cache:
self.cache['errors'] = {}
self.cache['errors'][name] = error
| 5,348,100 |
def test_frozen():
"""Test `frozen`."""
f = _frozen.frozen
# --------------
# check has all constants
assert f.__all_constants__ == data.__all_constants__
# --------------
# check equality
C = data.read_constants()
name: str
for name in data.__all_constants__:
assert getattr(f, name) == C[name]["value"] * u.Unit(C[name]["unit"])
# --------------
# check __getitem__
assert f[name] == C[name]["value"] * u.Unit(C[name]["unit"])
return
| 5,348,101 |
def get_spot_market_price(facility: Optional[str] = None,
plan: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSpotMarketPriceResult:
"""
Use this data source to get Packet Spot Market Price.
## Example Usage
```python
import pulumi
import pulumi_packet as packet
example = packet.get_spot_market_price(facility="ewr1",
plan="c1.small.x86")
```
:param str facility: Name of the facility.
:param str plan: Name of the plan.
"""
__args__ = dict()
__args__['facility'] = facility
__args__['plan'] = plan
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('packet:index/getSpotMarketPrice:getSpotMarketPrice', __args__, opts=opts, typ=GetSpotMarketPriceResult).value
return AwaitableGetSpotMarketPriceResult(
facility=__ret__.facility,
id=__ret__.id,
plan=__ret__.plan,
price=__ret__.price)
| 5,348,102 |
def _get_cluster_id(emr: boto3.client("emr"), clusterName: str) -> str:
"""
Returns the id of a running cluster with given cluster name.
"""
clusters = emr.list_clusters()["Clusters"]
# choose the correct cluster
clusters = [c for c in clusters if c["Name"] == clusterName and c["Status"]["State"] in ["WAITING", "RUNNING"]]
if not clusters:
logger.info("No valid clusters")
raise Exception("cannot find running cluster: " + clusterName)
# take the first relevant cluster
return clusters[0]["Id"]
| 5,348,103 |
def seed_student(request, i):
"""Returns the properties for a new student entity.
"""
gsoc2009 = Program.get_by_key_name('google/gsoc2009')
user = User.get_by_key_name('user_%d' % i)
if not gsoc2009:
raise Error('Run seed_db first')
if not user:
raise Error('Run seed_many for at least %d users first.' % i)
properties = {
'key_name':'google/gsoc2009/student_%d' % i,
'link_id': 'student_%d' % i,
'scope_path': 'google/gsoc2009',
'scope': gsoc2009,
'user' : user,
'given_name': 'Student %d' % i,
'surname': 'Last Name',
'name_on_documents': 'Test Example',
'email': '[email protected]',
'res_street': 'Some Street',
'res_city': 'Some City',
'res_state': 'Some State',
'res_country': 'United States',
'res_postalcode': '12345',
'phone': '1-555-BANANA',
'birth_date': db.DateProperty.now(),
'agreed_to_tos': True,
'school_name': 'School %d' % i,
'school_country': 'United States',
'major': 'Computer Science',
'degree': 'Undergraduate',
'expected_graduation': 2012,
'program_knowledge': 'Knowledge %d' % i,
'school': None,
'can_we_contact_you': True,
}
return properties
| 5,348,104 |
def add_parm_value_multiplier(kwargs, add_exponent=False):
"""Adds a value/multipler parameter pair to the specified parameter.
(Called from PARMmenu.xml)
"""
p = kwargs['parms'][0]
try:
n = p.node()
v = p.eval()
t = p.parmTemplate()
g = n.parmTemplateGroup()
pn = t.name()
pl = t.label()
pvn = '%s_value' % pn
pmn = '%s_mult' % pn
pxn = '%s_exp' % pn
t = hou.FloatParmTemplate(name=p.name(), label="...", num_components=1)
expr = "ch('%s') * ch('%s')" % (pvn, pmn, )
if not n.parm(pvn) and not n.parm(pmn):
# value
t.setName(pvn)
t.setLabel('%s (v)' % pl)
t.setDefaultValue( (v, ) )
g.insertAfter(pn, t)
# mult
t.setName(pmn)
t.setLabel('%s (%%)' % pl)
t.setMinValue(0.0)
t.setMaxValue(2.0)
t.setDefaultValue( (1.0, ) )
g.insertAfter(pvn, t)
if add_exponent and not n.parm(pxn):
# exp
t.setName(pxn)
t.setLabel('%s (exp)' % pl)
t.setMinValue(0.001)
t.setMaxValue(4.0)
t.setDefaultValue( (2.0, ) )
g.insertAfter(pmn, t)
expr = "ch('%s') * pow(ch('%s'), ch('%s'))" % (pvn, pmn, pxn, )
# add parms
n.setParmTemplateGroup(g)
p.setExpression(expr)
else:
hou.ui.setStatusMessage("Value/multiplier params already exist for %s" % p.path(),
severity=hou.severityType.Warning)
except:
hou.ui.setStatusMessage("couldn't set up value/multiplier parameters on %s" % p.path(),
severity=hou.severityType.Error)
| 5,348,105 |
def get_route(routes):
"""
Запросить данные о маршруте.
"""
destination = input("Пункт назначения? ")
number = input("Номер поезда? ")
time = input("Время отправления?(формат чч:мм) ")
route = {
'destination': destination,
'number': number,
'time': time
}
routes.append(route)
if len(routes) > 1:
routes.sort(key=lambda item: item.get('destination', ''))
| 5,348,106 |
def apply(effect: List[float], signal: List[float]):
"""Given effect interpolated to length of given signal.
Args:
effect: effect to interpolate to signal length.
signal: length of which effect is interpolated to.
"""
max_len = max(len(effect), len(signal))
# Signal indices to effect indices.
i = interp1d(
np.linspace(0, len(signal) - 1, max_len),
np.linspace(0, len(effect) - 1, max_len),
)(np.arange(len(signal)))
# print(
# f"i[0:10] = {i[0:10]}, np.arange(len(effect))[0:10] = {np.arange(len(effect))[0:10]}, effect[0:10] = {effect[0:10]}"
# )
# Effect indices to effect.
return interp1d(np.arange(len(effect)), effect)(i)
| 5,348,107 |
def cli_cosmosdb_sql_trigger_update(client,
resource_group_name,
account_name,
database_name,
container_name,
trigger_name,
trigger_body=None,
trigger_type=None,
trigger_operation=None):
"""Updates an Azure Cosmos DB SQL trigger """
logger.debug('reading SQL trigger')
sql_trigger = client.get_sql_trigger(resource_group_name, account_name, database_name, container_name, trigger_name)
sql_trigger_resource = SqlTriggerResource(id=trigger_name)
sql_trigger_resource.body = sql_trigger.resource.body
sql_trigger_resource.trigger_operation = sql_trigger.resource.trigger_operation
sql_trigger_resource.trigger_type = sql_trigger.resource.trigger_type
if _populate_sql_trigger_definition(sql_trigger_resource,
trigger_body,
trigger_operation,
trigger_type):
logger.debug('replacing SQL trigger')
sql_trigger_create_update_resource = SqlTriggerCreateUpdateParameters(
resource=sql_trigger_resource,
options={})
return client.create_update_sql_trigger(resource_group_name,
account_name,
database_name,
container_name,
trigger_name,
sql_trigger_create_update_resource)
| 5,348,108 |
def evaluate_features(features: np.ndarray, labels: np.ndarray, train_frac: float = 0.8) -> List[int]:
"""
Evaluates the marginal impact of each feature in the given array (by retraining).
Args:
features: A [N, T, D] array of input features for each sequence element
labels: A [N] array of labels per instance
Returns:
An (ordered) list of feature indices
"""
# For feasibility purposes, we start with the first feature
result: List[int] = [0]
remaining_idx = list(range(1, features.shape[1]))
split_point = int(features.shape[0] * train_frac)
train_features = features[0:split_point, :, :]
test_features = features[split_point:, :, :]
train_labels = labels[0:split_point]
test_labels = labels[split_point:]
train_samples = train_features.shape[0]
test_samples = test_features.shape[0]
while len(remaining_idx) > 0:
best_accuracy = 0.0
best_idx = None
for feature_idx in remaining_idx:
feature_indices = result + [feature_idx]
X_train = train_features[:, feature_indices, :].reshape(train_samples, -1)
X_test = test_features[:, feature_indices, :].reshape(test_samples, -1)
clf = LogisticRegression(max_iter=500)
clf.fit(X_train, train_labels)
accuracy = clf.score(X_test, test_labels)
if accuracy > best_accuracy:
best_accuracy = accuracy
best_idx = feature_idx
result.append(best_idx)
remaining_idx.pop(remaining_idx.index(best_idx))
print(best_accuracy)
print(result)
return result
| 5,348,109 |
def pad_for_tpu(shapes_dict, hparams, max_length):
"""Pads unknown features' dimensions for TPU."""
padded_shapes = {}
def get_filler(specified_max_length):
if not specified_max_length:
return max_length
return min(specified_max_length, max_length)
inputs_none_filler = get_filler(hparams.max_input_seq_length)
targets_none_filler = get_filler(hparams.max_target_seq_length)
def pad_one_shape(shape, none_filler):
return [
(dim if dim is not None else none_filler) for dim in shape.as_list()
]
for key, shape in six.iteritems(shapes_dict):
if key == "inputs":
padded_shapes[key] = pad_one_shape(shape, inputs_none_filler)
elif key == "targets":
padded_shapes[key] = pad_one_shape(shape, targets_none_filler)
else:
padded_shapes[key] = pad_one_shape(shape, max_length)
return padded_shapes
| 5,348,110 |
def sample(x_axes, y_axes) -> None:
"""
Basic Sample File
"""
if not isinstance(x_axes, int):
print("Please enter a number for the X axis")
sys.exit()
if not isinstance(y_axes, int):
print("Please enter a number for the Y axis")
sys.exit()
try:
cam = cv2.VideoCapture(0)
model = cv2.COLOR_BGR2GRAY
exit_string = 'q'
while True:
_, image = cam.read()
gray = cv2.cvtColor(image, model)
edges = cv2.Canny(gray, x_axes, y_axes)
cv2.imshow("Live Video", image)
cv2.imshow("Neuronal Network", edges)
if cv2.waitKey(1) == ord(exit_string):
break
cam.release()
cv2.destroyAllWindows()
except cv2.error as exception_string:
print(exception_string)
print("No Camera")
| 5,348,111 |
def test_logrotate_binary_file(host):
"""
Tests if logrotate binary is a file type.
"""
assert host.file(PACKAGE_BINARY).is_file
| 5,348,112 |
def _get_child_query_node_and_out_name(
ast: Union[FieldNode, InlineFragmentNode],
child_type_name: str,
child_field_name: str,
name_assigner: IntermediateOutNameAssigner,
) -> Tuple[SubQueryNode, str]:
"""Create a query node out of ast, return node and unique out_name on field with input name.
Create a new document out of the input AST, that has the same structure as the input. For
instance, if the input AST can be represented by
out_Human {
name
}
where out_Human is a vertex field going to type Human, the resulting document will be
{
Human {
name
}
}
If the input AST starts with a type coercion, the resulting document will start with the
coerced type, rather than the original union or interface type.
The output child_node will be wrapped around this new DocumentNode. In addition, if no field
of child_field_name currently exists, such a field will be added. If there is no @output
directive on this field, a new @output directive will be added.
Args:
ast: Representing the AST that we're using to build a child node.
It is not modified by this function.
child_type_name: Name of the type to which this cross schema field leads.
child_field_name: str. If no field of this name currently exists as a part of the root
selections of the input AST, a new field will be created in the AST
contained in the output child query node
name_assigner: Object used to generate and keep track of names of newly created
@output directives.
Returns:
Tuple containing:
- The child sub query node wrapping around the input AST.
- The out_name of the @output directive uniquely identifying the field used for
stitching in this sub query node.
"""
# Get type and selections of child AST, taking into account type coercions
child_selection_set = ast.selection_set
if child_selection_set is None:
raise AssertionError("Invalid AST. child_selection_set cannot be None.")
type_coercion = try_get_inline_fragment(child_selection_set.selections)
if type_coercion is not None:
child_type_name = type_coercion.type_condition.name.value
child_selection_set = type_coercion.selection_set
child_selections: List[SelectionNode] = []
for child_selection in child_selection_set.selections:
if not isinstance(child_selection, FieldNode):
raise AssertionError(
"Expected child_selection to be of type FieldNode, but was of "
f"type {type(child_selection)}."
)
child_selections.append(child_selection)
# Get existing field with name in child
existing_child_property_field = try_get_ast_by_name_and_type(
child_selections, child_field_name, FieldNode
)
# Validate that existing_child_property_field is None or FieldNode.
# It should be impossible for this to *not* be the case, but check so that mypy is happy.
if not (
existing_child_property_field is None
or isinstance(existing_child_property_field, FieldNode)
):
raise AssertionError(
"Unreachable code reached! existing_child_property_field should be None or of type "
f"FieldNode, but was type {type(existing_child_property_field)}."
)
child_property_field = _get_property_field(
existing_child_property_field, child_field_name, None
)
# Add @output if needed, record out_name
child_property_field, child_output_name = _get_out_name_optionally_add_output(
child_property_field, name_assigner
)
# Get new child_selections by replacing or adding in new property field
child_property_fields_map, child_vertex_fields = _split_selections_property_and_vertex(
child_selections
)
child_property_fields_map[child_field_name] = child_property_field
child_selections = _get_selections_from_property_and_vertex_fields(
child_property_fields_map, child_vertex_fields
)
# Wrap around
# NOTE: if child_type_name does not actually exist as a root field (not all types are
# required to have a corresponding root vertex field), then this query will be invalid.
child_query_ast = _get_query_document(child_type_name, child_selections)
child_query_node = SubQueryNode(child_query_ast)
return child_query_node, child_output_name
| 5,348,113 |
def test_call(paired_inputs_v0, group_v0, kernel_ak_v0):
"""Test call."""
inputs_0 = paired_inputs_v0[0]
inputs_1 = paired_inputs_v0[1]
kernel = kernel_ak_v0
outputs = kernel([inputs_0, inputs_1, group_v0])
desired_outputs = np.array([
0.4206200260541147,
0.4206200260541147,
0.4206200260541147,
0.4206200260541147,
0.4206200260541147
])
np.testing.assert_array_almost_equal(
desired_outputs, outputs.numpy(), decimal=4
)
| 5,348,114 |
def has_balanced_parens(exp: str) -> bool:
"""
Checks if the parentheses in the given expression `exp` are balanced,
that is, if each opening parenthesis is matched by a corresponding
closing parenthesis.
**Example:**
::
>>> has_balanced_parens("(((a * b) + c)")
False
:param exp: The expression to check.
:return: `True` if the parentheses are balanced, `False` otherwise.
"""
# Use a stack to determine if the expression is balanced.
# Ref: https://youtu.be/HJOnJU77EUs?t=75 [1:15 - 2:47]
paren_stack = []
for e in exp:
if e == '(':
paren_stack.append(e)
elif e == ')':
try:
paren_stack.pop()
except IndexError:
return False
return len(paren_stack) == 0
| 5,348,115 |
def _is_binary(c):
"""Ensures character is a binary digit."""
return c in '01'
| 5,348,116 |
def get_interface_ib_name(hosts, interface, verbose=True):
"""Get the InfiniBand name of this network interface on each host.
Args:
hosts (NodeSet): hosts on which to detect the InfiniBand name
interface (str): interface for which to obtain the InfiniBand name
verbose (bool, optional): display command details. Defaults to True.
Returns:
dict: a dictionary of InfiniBand name keys and NodeSet values on which they were detected
"""
net_path = os.path.join(os.path.sep, "sys", "class", "net")
command = f"ls -1 {os.path.join(net_path, interface, 'device', 'infiniband')}"
task = run_task(hosts, command, verbose=verbose)
if verbose:
display_task(task)
# Populate a dictionary of IB names with a NodSet of hosts on which it was detected
ib_names = {}
results = dict(task.iter_retcodes())
if 0 in results:
for output, nodelist in task.iter_buffers(results[0]):
ib_name_list = []
for line in output:
match = re.findall(r"([A-Za-z0-9;_+]+)", line.decode("utf-8"))
if len(match) == 1:
ib_name_list.append(match[0])
if ib_name_list:
ib_names[",".join(ib_name_list)] = NodeSet.fromlist(nodelist)
return ib_names
| 5,348,117 |
def merge(files_in, file_out, delete_files_in=False):
"""
Merges several alignment files (cesAlign format) into a single alignment file
:param files_in: the input files
:param file_out: the output file, with all alignments merged into one file
:param delete_files_in: whether or not to delete the input files
"""
root = etree.Element('cesAlign', attrib={'version': '1.0'})
for file_in in sorted(files_in):
# Skip empty files
if os.path.getsize(file_in) == 0:
continue
xml_parser = etree.XMLParser(remove_blank_text=True)
in_tree = etree.parse(file_in, xml_parser)
for linkGrp in in_tree.xpath('//linkGrp'):
root.set('fromDoc', linkGrp.get('fromDoc').split('/')[0])
root.set('toDoc', linkGrp.get('toDoc').split('/')[0])
root.append(linkGrp)
tree = etree.ElementTree(root)
tree.docinfo.public_id = '-//CES//DTD XML cesAlign//EN'
tree.docinfo.system_url = 'dtd/xcesAlign.dtd'
tree.write(file_out, pretty_print=True, xml_declaration=True, encoding='utf-8')
if delete_files_in:
for file_in in files_in:
os.remove(file_in)
| 5,348,118 |
def _get_script(args_file):
"""compiled contents of script or error out"""
DEFAULT_SCRIPT = 'build.jfdi'
script_path = None
if args_file != None:
script_path = args_file
elif os.path.exists(DEFAULT_SCRIPT):
script_path = DEFAULT_SCRIPT
script_path = None
if os.path.exists(DEFAULT_SCRIPT):
script_path = DEFAULT_SCRIPT
if args_file != None:
script_path = args_file
if script_path == None or not os.path.exists(script_path):
fatal_msg = "Build file not found\n"
fatal_msg += "\nIf this is your first run, use %s init\n" \
% sys.argv[0]
fatal_msg += "%s --help for detailed help.\n\n" \
% sys.argv[0]
_fatal_error(fatal_msg)
with open(script_path) as f:
script = f.read()
try:
pycode = compile(script, script_path, mode='exec')
except SyntaxError as ex:
msg = "SyntaxError in (%s, line %d):\n\t%s\n" \
% (ex.filename, ex.lineno, ex.text)
_fatal_error(msg)
return pycode
| 5,348,119 |
def render(img,
result,
classes=None,
score_thr=None,
show=True,
wait_time=0,
path=None):
"""Visualize the detection on the image and optionally save to a file.
Args:
img(BGR): CV2 BGR.
result(Tensor[K, 6] or List[(tid, Tensor[6]))+]): detection result in xyxysc
classes(list[str] or tuple[str]): A list of trained class names
score_thr(float): The threshold to visualize the bboxes and masks.
tracking(bool): Whether the results are tracking
wait_time (int): Value of waitKey param for display
path(str, optional): path to save the rendered image
"""
from ml import cv
labels = colors = None
img = np.ascontiguousarray(img)
if th.is_tensor(result):
# Detection only
if score_thr:
result = result[result[:, 4] >= score_thr]
labels = [classes[c.int()] for c in result[:, 5]] if classes else [f"[{int(c)}]" for c in result[:, 5]]
colors = [COLORS91[c.int()] for c in result[:, 5]]
logging.debug(f"Drawing detection: {result} with labels={labels}")
cv.drawBoxes(img, result[:, :4], labels=labels, scores=result[:, 4], colors=colors)
elif result:
# Detection with tracking [(tid, xyxysc)*]
tids, boxes = list(zip(*result))
result = th.stack(boxes)
if score_thr:
result = result[result[:, 4] >= score_thr]
if classes:
labels = [f"{classes[c.int()]}[{tid}]" for tid, c in zip(tids, result[:, 5])]
else:
labels = [f"[{int(c)}][{tid}]" for tid, c in zip(tids, result[:, 5])]
colors = [cv.rgb(tid, integral=True) for tid in tids]
cv.drawBoxes(img, result[:, :4], labels=labels, scores=result[:, 4], colors=colors)
# logging.info(f"Tracks labels={labels}")
# logging.info(f"Colors colors={colors}")
else:
logging.warning(f"No RoIs to render")
path = path and Path(path) or None
if sys.x_available() and show:
cv.imshow(img, title=str(path) or '')
if path:
cv.save(img, path)
return img
| 5,348,120 |
def check_python_import(package_or_module):
"""
Checks if a python package or module is importable.
Arguments:
package_or_module -- the package or module name to check
Returns:
True or False
"""
logger = logging.getLogger(__name__)
logger.debug("Checking python import '%s'...", package_or_module)
loader = pkgutil.get_loader(package_or_module)
found = loader is not None
if found:
logger.debug("Python %s '%s' found: %r",
"package" if loader.is_package(package_or_module)
else "module", package_or_module, loader.get_filename())
else:
logger.debug("Python import '%s' not found", package_or_module)
return found
| 5,348,121 |
def read_frame_positions(lmp_trj):
""" Read stream positions in trajectory file corresponding to
time-step and atom-data.
"""
ts_pos, data_pos = [], []
with open(lmp_trj, 'r') as fid:
while True:
line = fid.readline()
if not line:
break
if line.startswith('ITEM: TIMESTEP'):
ts_pos.append(fid.tell())
elif line.startswith('ITEM: ATOMS id'):
data_pos.append(fid.tell())
return ts_pos, data_pos
| 5,348,122 |
async def async_setup_entry(hass, config_entry):
"""Set up Enedis as config entry."""
hass.data.setdefault(DOMAIN, {})
pdl = config_entry.data.get(CONF_PDL)
token = config_entry.data.get(CONF_TOKEN)
session = async_create_clientsession(hass)
enedis = EnedisGateway(pdl=pdl, token=token, session=session)
coordinator = EnedisDataUpdateCoordinator(hass, config_entry, enedis)
await coordinator.async_config_entry_first_refresh()
if coordinator.data is None:
return False
undo_listener = config_entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][config_entry.entry_id] = {
COORDINATOR: coordinator,
CONF_PDL: pdl,
UNDO_LISTENER: undo_listener,
}
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
async def async_reload_history(call) -> None:
await coordinator.async_load_datas_history(call)
hass.services.async_register(
DOMAIN, "reload_history", async_reload_history, schema=vol.Schema({})
)
return True
| 5,348,123 |
def get_file_list(prefix):
""" Get file list from http prefix """
print("Fetching file list from", prefix)
k = requests.get(prefix)
if not k.ok:
raise Exception("Unable to get http directory listing")
parser = HRefParser()
parser.feed(k.content.decode())
k.close()
return parser.href_list
| 5,348,124 |
def train_model(model: nn.Module, trainDataLoader: DataLoader, testDataLoader: DataLoader, epochs: int, optimizer, lossFuction, metric, device) -> dict:
"""
Training model function: it will train the model for a number of epochs, with the corresponding optimizer.
It will return the corresponding losses and metrics in a dictionary.
"""
# Send model to the corresponding device
model.to(device)
# Creating loss dictionary
losses = {
'training_batchs': [],
'training_average': [],
'testing_average': [],
'metric_average': []
}
# Iterating over number of epochs
for epoch in range(epochs):
print(f'Starting epoch {epoch + 1}')
# Training
epoch_loss = training_epoch(
model, trainDataLoader, testDataLoader, lossFuction, optimizer, metric, device)
# Updating loss dictionary
for key, loss in epoch_loss.items():
try:
losses[key].extend(loss)
except:
losses[key].append(loss)
# print training stats after epoch
print(f'Results for epoch {epoch + 1}')
print('------------------------------')
print(f'Training loss average: {epoch_loss["training_average"]}')
print(f'Test loss average: {epoch_loss["testing_average"]}')
print(f'Metric average: {epoch_loss["metric_average"]}')
return losses
| 5,348,125 |
def _has_profile():
"""Check whether we have kernprof & kernprof has given us global 'profile'
object."""
return kernprof is not None and hasattr(builtins, 'profile')
| 5,348,126 |
def get_video_info(url):
"""
adapted from https://www.thepythoncode.com/article/get-youtube-data-python
Function takes a YouTube URL and extracts the different parts of the video:
title, view number, description, date-published, likes, dislikes, channel name,
channel url, and channel subscribers. Returned as python dictionary.
"""
# TODO: This works for most videos however there are videos that come up
# that have video info but are reported missing
# adapted from https://www.thepythoncode.com/article/get-youtube-data-python
# Starts the process of scraping the video information
try:
# requests URL
content = requests.get(url)
# create beautiful soup object to parse HTML
soup = bs(content.content, "html.parser")
# initialize the result
result = {}
# video title
try:
result['title'] = soup.find("span", attrs={"class": "watch-title"}).text.strip()
except:
result['title'] = "Not Found (Perhaps Hidden)"
# try-catch for finding video views using the HTML 'watch-view-count'
try:
# video views (converted to integer)
result['views'] = int(
soup.find("div", attrs={"class": "watch-view-count"}).text[:-6].replace(",", ""))
except:
try:
# Tries to find the views using the 'stat view-count'
result['views'] = int(
soup.find("span", attrs={"class": "stat view-count"}).text[:-6].replace(",", "").replace("views",
""))
except:
# If views can't be found
result['views'] = "Not Found (Perhaps Hidden)"
# video description
try:
result['description'] = soup.find("p", attrs={"id": "eow-description"}).text
except:
result['description'] = "Not Found (Perhaps Hidden)"
# date published
try:
result['date_published'] = soup.find("strong", attrs={"class": "watch-time-text"}).text.replace(
"Published on ", "").replace("Premiered ", "")
except:
result['date_published'] = "Not Found (Perhaps Hidden)"
# try-catch for finding the likes and dislikes
try:
# number of likes as integer
result['likes'] = int(soup.find("button", attrs={"title": "I like this"}).text.replace(",", ""))
# number of dislikes as integer
result['dislikes'] = int(
soup.find("button", attrs={"title": "I dislike this"}).text.replace(",", ""))
except:
try:
# This took me so long to figure out. If you can find a better way PLEASE let me know
# Saves FULL html file into a variable
video_html = soup.prettify()
# pattern to extract html code that has the like count
pattern_like = re.compile(r'\\"likeCount\\":[0-9]+[0-9]')
# pattern to extract numbers our of like count
pattern_like2 = re.compile(r'[0-9]+[0-9]')
# Finds the html code with likecount
matches_in_html_like = pattern_like.findall(video_html)
# Extracts the numbers from the html code
cleaned_html_number_like = int((pattern_like2.findall(''.join(matches_in_html_like)))[0])
result['likes'] = cleaned_html_number_like
pattern_dislike = re.compile(r'\\"dislikeCount\\":[0-9]+[0-9]')
# pattern to extract numbers our of like count
pattern_dislike2 = re.compile(r'[0-9]+[0-9]')
# Finds the html code with dislikeCount
matches_in_html_dislike = pattern_dislike.findall(video_html)
# Extracts the numbers from the html code
cleaned_html_number_dislike = int((pattern_dislike2.findall(''.join(matches_in_html_dislike)))[0])
result['dislikes'] = cleaned_html_number_dislike
except:
result['likes'] = "Not Found (Perhaps Hidden)"
result['dislikes'] = "Not Found (Perhaps Hidden)"
# channel details
try:
channel_tag = soup.find("div", attrs={"class": "yt-user-info"}).find("a")
except:
channel_tag = "Not Found (Perhaps Hidden)"
# channel name
try:
channel_name = channel_tag.text
except:
channel_name = "Not Found (Perhaps Hidden)"
# channel URL
try:
channel_url = f"https://www.youtube.com{channel_tag['href']}"
except:
channel_url = "Not Found (Perhaps Hidden)"
# try-catch for subscription count (youtube user can hide these)
try:
channel_subscribers = soup.find("span", attrs={"class": "yt-subscriber-count"}).text.strip()
except:
channel_subscribers = "Not Found (Perhaps Hidden)"
result['channel'] = {'name': channel_name, 'url': channel_url, 'subscribers': channel_subscribers}
# return the result
print("Video Information Found.")
return result
# If none of the information can be found will result in this a blank video info
except:
# Returns an no video information found dictionary
print("No Video Information Found.")
result = {'title': "No Video Information Found",
'views': "No Video Information Found",
'description': "No Video Information Found",
'date_published': "No Video Information Found",
'likes': "No Video Information Found",
'dislikes': "No Video Information Found"}
channel_tag = 'No Video Information Found'
channel_name = 'No Video Information Found'
channel_url = 'No Video Information Found'
channel_subscribers = 'No Video Information Found'
result['channel'] = {'name': channel_name, 'url': channel_url, 'subscribers': channel_subscribers}
return result
| 5,348,127 |
def routing_tree_to_tables(routes, net_keys):
"""Convert a set of
:py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip
set of routing tables.
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same data structure produced by routers in the
:py:mod:`~rig.place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
"""
# Pairs of inbound and outbound routes.
InOutPair = namedtuple("InOutPair", "ins, outs")
# {(x, y): {(key, mask): _InOutPair}}
route_sets = defaultdict(OrderedDict)
for net, routing_tree in iteritems(routes):
key, mask = net_keys[net]
# The direction is the Links entry which describes the direction in
# which we last moved to reach the node (or None for the root).
for direction, (x, y), out_directions in routing_tree.traverse():
# Determine the in_direction
in_direction = direction
if in_direction is not None:
in_direction = direction.opposite
# Add a routing entry
if (key, mask) in route_sets[(x, y)]:
# If there is an existing route set raise an error if the out
# directions are not equivalent.
if route_sets[(x, y)][(key, mask)].outs != out_directions:
raise MultisourceRouteError(key, mask, (x, y))
# Otherwise, add the input directions as this represents a
# merge of the routes.
route_sets[(x, y)][(key, mask)].ins.add(in_direction)
else:
# Otherwise create a new route set
route_sets[(x, y)][(key, mask)] = \
InOutPair({in_direction}, set(out_directions))
# Construct the routing tables from the route sets
routing_tables = defaultdict(list)
for (x, y), routes in iteritems(route_sets):
for (key, mask), route in iteritems(routes):
# Add the route
routing_tables[(x, y)].append(
RoutingTableEntry(route.outs, key, mask, route.ins)
)
return routing_tables
| 5,348,128 |
def extract_response_objects(image_file, mask_file, stim_file, input_dict):
"""inputs are file names for aligned images, binary mask, and unprocessed stimulus file
outputs a list of response objects"""
# read files
I = read_tifs(image_file)
mask = read_tifs(mask_file)
labels = segment_ROIs(mask)
print('number of ROIs = ' + str(np.max(labels)))
# process stimulus file
stim_data, stim_data_OG, header = count_frames(stim_file)
if (len(I)) != int(stim_data[-1][-1]):
print("number of images does not match stimulus file")
print('stimulus frames = ' + str(int(stim_data[-1][-1])))
print('image frames = ' + str(len(I)))
# stim_data = fix_dropped_frames(len(I),float(input_dict['time_interval']),stim_data,stim_data_OG,int(input_dict['gt_index']))
# get frames, relative time, stimuulus type, and stimulus state from stim data
fr, rt, st = parse_stim_file(stim_data,
rt_index=int(input_dict['rt_index']),
st_index=input_dict['st_index'])
ss = define_stim_state(rt, float(input_dict['on_time']),
float(input_dict['off_time']))
# measure fluorscence intensities in each ROI
responses, num, labels = measure_multiple_ROIs(I, mask)
# load response objects
response_objects = []
for r, n in zip(responses, num):
ro = ResponseClassSimple.Response(F=r, stim_time=rt, stim_state=ss,
ROI_num=n, stim_type=st)
ro.sample_name = input_dict['sample_name']
ro.reporter_name = input_dict['reporter_name']
ro.driver_name = input_dict['driver_name']
ro.stimulus_name = input_dict['stimulus_name']
ro.time_interval = float(input_dict['time_interval'])
response_objects.append(ro)
return response_objects, stim_data, header, labels
| 5,348,129 |
def get_top_diff_loc(imgs, ref_imgs, crop_size, grid_size, device, topk=10):
"""Randomly get a crop bounding box."""
assert imgs.shape == ref_imgs.shape
batches = imgs.size(0)
img_size = imgs.shape[2:]
crop_size = _pair(crop_size)
grid_size = _pair(grid_size)
stride_h = (img_size[0] - crop_size[0]) // (grid_size[0] - 1)
stride_w = (img_size[1] - crop_size[1]) // (grid_size[1] - 1)
diff_imgs = imgs - ref_imgs
diff_list = []
for i in range(grid_size[0]):
for j in range(grid_size[1]):
crop_diff = diff_imgs[:, :,
i * stride_h:i * stride_h + crop_size[0],
j * stride_w:j * stride_w + crop_size[1]]
diff_list.append(crop_diff.abs().sum(dim=(1, 2, 3)))
# [batches, grid_size**2]
diff_sum = torch.stack(diff_list, dim=1)
diff_topk_idx = torch.argsort(diff_sum, dim=1, descending=True)[:, :topk]
select_idx = diff_topk_idx
idx_i = select_idx // grid_size[1]
idx_j = select_idx % grid_size[1]
crop_y1, crop_y2 = idx_i * stride_h, idx_i * stride_h + crop_size[0]
crop_x1, crop_x2 = idx_j * stride_w, idx_j * stride_w + crop_size[1]
center = torch.stack([(crop_x1 + crop_x2) * 0.5,
(crop_y1 + crop_y2) * 0.5],
dim=-1).float()
return center
| 5,348,130 |
def decode(file):
"""
This function creates a dictionnary out of a given file thanks to pre-existing json functions.
:param file: The file to decode.
:return: The corresponding Python dictionnary or None if something went wrong (i.e: the given file \
is invalid).
"""
# Json to dictionnary
tmp_res = None
try:
with open(file, "r") as f:
tmp_res = json.load(f)
except Exception as e:
print(e)
return None
# Gets the type of problem handled here
problem_type = ProblemType.identify_problem(tmp_res)
res = {}
# Gets the field's limits + the bottom left and top right points of the field
res["field_limits"] = tmp_res["field_limits"]
res["bottom_left"] = Point(res["field_limits"][0][0], res["field_limits"][1][0])
res["top_right"] = Point(res["field_limits"][0][1], res["field_limits"][1][1])
# Gets the list of goals
res["goals"] = []
for goal in tmp_res["goals"]:
posts = goal["posts"]
direction = goal["direction"]
post1 = Point(posts[0][0], posts[0][1])
post2 = Point(posts[1][0], posts[1][1])
direction = Vector(direction[0], -direction[1])
goal = Goal(post1, post2, direction)
res["goals"].append(goal)
# Gets the list of opponents
res["opponents"] = []
for opponent in tmp_res["opponents"]:
res["opponents"].append(Opponent(Point(opponent[0], opponent[1])))
# Gets the radius of the robots
res["radius"] = tmp_res["robot_radius"]
# Gets theta and pos steps for opponents' shots and defenders's position respectively
res["theta_step"] = tmp_res["theta_step"]
res["pos_step"] = tmp_res["pos_step"]
# Gets the list of defenders if the problem is initial positions
if problem_type == ProblemType.INITIAL_POS:
res["defenders"] = []
for defender in tmp_res["defenders"]:
res["defenders"].append(Defender(Point(defender[0], defender[1]), res["radius"]))
# Gets the min dist if the problem is min dist
if problem_type == ProblemType.MIN_DIST:
res["min_dist"] = tmp_res["min_dist"]
# Gets the goalkeeper area if the problem is goal keeper
if problem_type == ProblemType.GOAL_KEEPER:
res["goalkeeper_area"] = tmp_res["goalkeeper_area"]
res["gk_bottom_left"] = Point(res["goalkeeper_area"][0][0], res["goalkeeper_area"][1][0])
res["gk_top_right"] = Point(res["goalkeeper_area"][0][1], res["goalkeeper_area"][1][1])
if problem_type == ProblemType.MAX_SPEED:
res["ball_max_speed"] = tmp_res["ball_max_speed"]
res["robot_max_speed"] = tmp_res["robot_max_speed"]
return (res, problem_type)
| 5,348,131 |
def kron_compact(x):
"""Calculate the unique terms of the Kronecker product x ⊗ x.
Parameters
----------
x : (n,) or (n,k) ndarray
If two-dimensional, the product is computed column-wise (Khatri-Rao).
Returns
-------
x ⊗ x : (n(n+1)/2,) or (n(n+1)/2,k) ndarray
The "compact" Kronecker product of x with itself.
"""
if x.ndim not in (1,2):
raise ValueError("x must be one- or two-dimensional")
return _np.concatenate([x[i]*x[:i+1] for i in range(x.shape[0])], axis=0)
| 5,348,132 |
def record_speech_sequentially(min_sound_lvl=0.01, speech_timeout_secs=1.):
"""Records audio in sequential audio files.
Args:
min_sound_lvl: The minimum sound level as measured by root mean square
speech_timeout_secs: Timeout of audio after that duration of silence as measured by min_sound_lvl
Returns:
The recorded audio samples.
"""
samples = []
i = 0
while True:
cmd = input("> ").encode()
if cmd == KeyInput.QUIT.value:
return samples
elif cmd == KeyInput.REDO.value:
print("Index now at {}.".format(i))
i = max(i - 1, 0)
try:
samples.pop()
except IndexError:
pass
continue
with AudioSnippetGenerator() as generator:
timeout_len = int(speech_timeout_secs * generator.sr / generator.chunk_size)
active_count = timeout_len
curr_snippet = None
for audio in generator:
if curr_snippet:
curr_snippet.append(audio)
else:
curr_snippet = audio
if audio.amplitude_rms() < min_sound_lvl:
active_count -= 1
else:
active_count = timeout_len
print("Time left: {:<10}".format(active_count), end="\r")
if active_count == 0:
i += 1
samples.append(curr_snippet)
print("Recorded #{:<10}".format(i))
break
| 5,348,133 |
def test_ga_sync_sample(ga_config: Optional[dict]):
"""Test class creation."""
tap = SampleTapGoogleAnalytics(config=ga_config, parse_env_config=True)
tap.sync_all()
| 5,348,134 |
def test_charsets_attribute_warning(attributes, warning):
"""Validates the warning message displayed for charset attribute in
anchor tag.
"""
with warnings.catch_warnings(record=True) as expected_warning:
A(**attributes)
assert len(expected_warning) == 1
assert issubclass(expected_warning[-1].category, UserWarning)
assert warning in str(expected_warning[-1].message)
| 5,348,135 |
def home():
"""
Display Hello World in a local-host website
"""
return 'Hello World'
| 5,348,136 |
def build_cinder(args):
"""Build the cinder client object."""
(os_username, os_password,
os_user_domain_name,
os_auth_url,
os_auth_type,
os_region_name,
os_project_name,
os_project_id,
os_project_domain_name,
os_project_domain_id,
os_region_name,
os_user_domain_id,
os_user_domain_name,
os_user_id,
) = (
args.os_username, args.os_password,
args.os_user_domain_name,
args.os_auth_url,
args.os_auth_type,
args.os_region_name,
args.os_project_name,
args.os_project_id,
args.os_project_domain_name,
args.os_project_domain_id,
args.os_region_name,
args.os_user_domain_id,
args.os_user_domain_name,
args.os_user_id,
)
args = {
"os_auth_url": os_auth_url,
"os_username": os_username,
"os_password": os_password,
"os_user_domain_name": os_user_domain_name,
"os_user_domain_id": os_user_domain_id,
"os_user_id": os_user_id,
"os_project_id": os_project_id,
"os_project_name": os_project_name,
"os_project_domain_name": os_project_domain_name,
"os_project_domain_id": os_project_domain_id
}
session = get_keystone_session(**args)
LOG.info(f"{session}")
client_args = dict(
region_name=os_region_name,
service_type='volumev3',
service_name='',
os_endpoint='',
endpoint_type='publicURL',
insecure=False,
cacert=None,
auth_plugin=None,
http_log_debug=True,
session=session
)
# force this to version 2.0 of Cinder API
api_version = 3.70
#LOG.info(f"{args}")
#LOG.info("Logging in with")
#LOG.info(f"{api_version} {os_username} {os_password} {os_project_name} {os_auth_url}")
#LOG.info(f"{client_args}")
c = cinder.Client(api_version,
os_username,
os_password,
os_project_name,
os_auth_url,
**client_args,
)
return c
| 5,348,137 |
def harvest_channel(channel, start_date, end_date, exclude_nicks=None,
exclude_posts=None):
"""Pull all matching irc posts
:param channel: the irc channel to search
:param start_date: the starting date of irc entries
:param end_date: the ending date of irc entries
:param exclude_nicks: the irc nicknames whose posts are to be ignored
:param exclude_posts: the substrings to cause posts to be ignored
"""
start_fields = parse_date(start_date)
end_fields = parse_date(end_date)
if not start_fields or not end_fields:
return
start_year = start_fields[0]
start_month = start_fields[1]
start_day = start_fields[2]
end_year = end_fields[0]
end_month = end_fields[1]
end_day = end_fields[2]
days_in_month = {}
days_in_month['1'] = 31
days_in_month['2'] = 28
days_in_month['3'] = 31
days_in_month['4'] = 30
days_in_month['5'] = 31
days_in_month['6'] = 30
days_in_month['7'] = 31
days_in_month['8'] = 31
days_in_month['9'] = 30
days_in_month['10'] = 31
days_in_month['11'] = 30
days_in_month['12'] = 31
pulling_data = True
current_year = start_year
current_month = start_month
current_day = start_day
while pulling_data:
current_date = '%d-%02d-%02d' % (current_year,
current_month,
current_day)
log_entries = irc_wheat.get_channel_entries(channel,
current_date,
exclude_nicks,
exclude_posts)
if log_entries:
irc_wheat.print_irc_entries(log_entries)
if current_year == end_year and \
current_month == end_month and \
current_day == end_day:
pulling_data = False
else:
if current_day < days_in_month[str(current_month)]:
current_day += 1
else:
current_month += 1
current_day = 1
if current_month > 12:
current_month = 1
current_year += 1
| 5,348,138 |
def selecaoEscalar(Mcorr, criterios, N=0, a1=0.5, a2=0.5):
""" Performs a scalar feature selection which orders all features individually,
from the best to the worst to separate the classes.
INPUTS
- Mcorr: Correlation matrix of all features.
- criterios:
- N: Number of best features to be returned.
- a1: Weigth for criterios.
- a2: Weight for Mcorr.
OUTPUTS
- ordem: Tuple with the order of features.
- M: Tuple with criteria for each feature.
"""
L = Mcorr.shape[0]
if len(criterios.shape) != 1:
criterios = criterios[0]
if N==0 or N > len(criterios):
N = len(criterios)
print('You either did not specify or you gave a number grater than the number of characteristics.')
print('Function will return all {} characteristics.'.format(N))
Mcorr = abs(Mcorr)
ordem = []
M = []
ordem.append(int(np.where(criterios == max(criterios))[0]))
M.append(criterios[int(ordem[0])])
Mcorr[:, int(ordem[0])] = 1
fator = np.zeros(N)
for n in range(1, N):
index = np.linspace(0, L-1, L)
fator = np.sum(Mcorr[tuple(ordem), :], axis=0)
MK = a1*criterios - a2*fator/n
MK = np.delete(MK, ordem)
index = np.delete(index, ordem)
M.append(max(MK))
ordem.append(int(index[int(np.where(MK == max(MK))[0])]))
ordem = tuple(ordem)
M = tuple(M)
return ordem, M
| 5,348,139 |
def sum_by_letter(list_of_dicts, letter):
"""
:param list_of_dicts: A list of dictionaries.
:param letter: A value of the letter keyed by 'letter'.
"""
total = 0
for d in list_of_dicts:
if d['letter'] == letter:
total += d['number']
return total
| 5,348,140 |
def gate_settle(gate):
""" Return gate settle times """
return 0
| 5,348,141 |
def parse_csr_domains(csr_pem=None, csr_pem_filepath=None, submitted_domain_names=None):
"""
checks found names against `submitted_domain_names`
This routine will use crypto/certbot if available.
If not, openssl is used via subprocesses
`submitted_domain_names` should be all lowecase
"""
log.info("parse_csr_domains >")
if openssl_crypto and certbot_crypto_util:
load_func = openssl_crypto.load_certificate_request
found_domains = certbot_crypto_util._get_names_from_cert_or_req(
csr_pem, load_func, typ=openssl_crypto.FILETYPE_PEM
)
else:
log.debug(".parse_csr_domains > openssl fallback")
# fallback onto OpenSSL
# openssl req -in MYCSR -noout -text
with psutil.Popen(
[openssl_path, "req", "-in", csr_pem_filepath, "-noout", "-text"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
out, err = proc.communicate()
if proc.returncode != 0:
raise IOError("Error loading {0}: {1}".format(csr_pem_filepath, err))
if PY3:
out = out.decode("utf8")
# parse the sans first, then add the commonname
found_domains = san_domains_from_text(out)
# note the conditional whitespace before/after CN
common_name = RE_openssl_x509_subject.search(out)
if common_name is not None:
found_domains.insert(0, common_name.group(1))
# ensure our CERT matches our submitted_domain_names
if submitted_domain_names is not None:
for domain in found_domains:
if domain not in submitted_domain_names:
raise ValueError("domain %s not in submitted_domain_names" % domain)
for domain in submitted_domain_names:
if domain not in found_domains:
raise ValueError("domain %s not in found_domains" % domain)
return sorted(found_domains)
| 5,348,142 |
def test(name=None):
"""
Args:
name (str): The name of the test (the string after 'test_'). When a name isn't specified all tests will be done.
"""
if name:
assert(run('python tests/test_%s.py' % name) == 0)
else:
devLinks.clear() # clear all the dev links to avoid module mixing
install()
assert(run('python setup.py test') == 0)
devLinks.create()
| 5,348,143 |
def fov_gc(lons, lats):
"""Field of view great circle.
Properties
----------
lons: [float]
Field of view longitudes (degE).
lats: [float]
Field of view latitudes (degN).
Return
------
geojson.Feature
GeoJSON field of view polygon.
"""
return geo_polygon(lons, lats, 'Limb', 'Limb field of view', 'blue')
| 5,348,144 |
def return_covid_data() -> tuple[dict, dict]:
"""A function that acts as a getter method, allowing for functions in main
to get the national and local COVID data and then display the values on
the dashboard.
Returns:
tuple: (england_data, local_data). A tuple of two values (England and
local COVID data), this allows two values to be returned at once,
and removes the need for excessive API calls, as the current
national and local COVID data can be returned without needing to
make another API call.
"""
logging.debug("Entering and exiting the return_covid_data function.")
logging.info(f"{(england_data, local_data)} returned")
return (england_data, local_data)
| 5,348,145 |
def request_publication(request, name):
"""Request publication by RFC Editor for a document which hasn't
been through the IESG ballot process."""
class PublicationForm(forms.Form):
subject = forms.CharField(max_length=200, required=True)
body = forms.CharField(widget=forms.Textarea, required=True, strip=False)
doc = get_object_or_404(Document, type="draft", name=name, stream__in=("iab", "ise", "irtf"))
if not is_authorized_in_doc_stream(request.user, doc):
permission_denied(request, "You do not have the necessary permissions to view this page.")
consensus_event = doc.latest_event(ConsensusDocEvent, type="changed_consensus")
m = Message()
m.frm = request.user.person.formatted_email()
(m.to, m.cc) = gather_address_lists('pubreq_rfced',doc=doc).as_strings()
m.by = request.user.person
next_state = State.objects.get(used=True, type="draft-stream-%s" % doc.stream.slug, slug="rfc-edit")
if request.method == 'POST' and not request.POST.get("reset"):
form = PublicationForm(request.POST)
if form.is_valid():
events = []
# start by notifying the RFC Editor
import ietf.sync.rfceditor
response, error = ietf.sync.rfceditor.post_approved_draft(settings.RFC_EDITOR_SYNC_NOTIFICATION_URL, doc.name)
if error:
return render(request, 'doc/draft/rfceditor_post_approved_draft_failed.html',
dict(name=doc.name,
response=response,
error=error))
m.subject = form.cleaned_data["subject"]
m.body = form.cleaned_data["body"]
m.save()
if doc.group.acronym != "none":
m.related_groups.set([doc.group])
m.related_docs.set([doc])
send_mail_message(request, m)
# IANA copy
(m.to, m.cc) = gather_address_lists('pubreq_rfced_iana',doc=doc).as_strings()
send_mail_message(request, m, extra=extra_automation_headers(doc))
e = DocEvent(doc=doc, type="requested_publication", rev=doc.rev, by=request.user.person)
e.desc = "Sent request for publication to the RFC Editor"
e.save()
events.append(e)
# change state
prev_state = doc.get_state(next_state.type_id)
if next_state != prev_state:
doc.set_state(next_state)
e = add_state_change_event(doc, request.user.person, prev_state, next_state)
if e:
events.append(e)
doc.save_with_history(events)
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
else:
if doc.intended_std_level_id in ("std", "ds", "ps", "bcp"):
action = "Protocol Action"
else:
action = "Document Action"
from ietf.doc.templatetags.mail_filters import std_level_prompt
subject = "%s: '%s' to %s (%s-%s.txt)" % (action, doc.title, std_level_prompt(doc), doc.name, doc.rev)
body = generate_publication_request(request, doc)
form = PublicationForm(initial=dict(subject=subject,
body=body))
return render(request, 'doc/draft/request_publication.html',
dict(form=form,
doc=doc,
message=m,
next_state=next_state,
consensus_filled_in=(
True if (doc.stream_id and doc.stream_id=='ietf')
else (consensus_event != None and consensus_event.consensus != None)),
),
)
| 5,348,146 |
def start(connection) -> None:
"""Start the local websever for auth callbacks."""
# Allow Ctrl-C break
signal.signal(signal.SIGINT, signal.SIG_DFL)
global SERVER, CON
CON = connection
app = bottle.app()
try:
SERVER = MyWSGIRefServer(host="localhost", port=5000)
app.run(server=SERVER)
except Exception as exc: # pylint: disable=broad-except
_LOGGER.error(exc)
| 5,348,147 |
def def_op_rdf_header():
"""eReefs legacy WQ def
Test for redirection based on header content type
"""
source = "http://environment.data.gov.au/def/op"
target = "http://sissvoc.ereefs.info/repo/vocab/op.rdf"
r = requests.get(
source, headers={"Accept": "application/rdf+xml"}, allow_redirects=False
)
assert r.headers.get("Location") == target, (
"Failed redirect from " + source + " to " + target
)
| 5,348,148 |
def strip_trailing_characters(unstripped_string, tail):
"""
Strip the tail from a string.
:param unstripped_string: The string to strip. Ex: "leading"
:param tail: The trail to remove. Ex: "ing"
:return: The stripped string. Ex: "lead"
"""
if unstripped_string.endswith(str(tail)):
return unstripped_string[:len(tail)]
else:
return unstripped_string
| 5,348,149 |
def is_prime(x):
""" Prove if number is prime """
if x == 0 or x == 1:
return 0
for i in range(2, x//2 +1):
if x % i == 0:
return 0
return 1
| 5,348,150 |
def test_boundary_inversion(reparameterisation, is_invertible,
boundary_inversion):
"""Test the different options for rescale to bounds"""
reparam = reparameterisation({'boundary_inversion': boundary_inversion})
assert is_invertible(reparam)
| 5,348,151 |
def obj_to_str(obj, encoding='utf8') -> str:
"""
Examples:
>>> d = dict(a=1, b=2)
>>> assert isinstance(obj_to_str(d), str)
"""
b = pickle.dumps(obj)
return bytes_to_str(b, encoding=encoding)
| 5,348,152 |
def deploy_sqlfiles(engine: Engine, directory: str, message: str, display_output: bool = False, scripting_variables: dict = None) -> bool:
"""Run every SQL script file found in given directory and print the executed file names.
If any file in directory cannot be deployed after multiple tries, raise an exeption and
list failed files to user.
Parameters
----------
engine
SQL Alchemy engine.
directory
Path of directory holding the SQL script files.
message
Message passed to OperationManager.
display_output
Indicator to print script output.
variables
Variables passed to SQL script.
Raises
------
ValueError
If engine is not instance of sqlalchemy.engine.Engine.
RuntimeError
If any of the files in given directory fail to deploy after multiple tries.
"""
with OperationManager(message):
if isinstance(engine, dict):
raise ValueError(
"First parameter of function 'deploy_sqlfiles' should be instance of sqlalchemy engine. Check your custom actions!")
if not Path(directory).is_dir():
logger.warning("Directory not found: " + directory)
return False
files = [path.join(directory, f)
for f in listdir(directory) if f.endswith('.sql')]
failed = sql_file_loop(deploy_sql_from_file, engine,
display_output, scripting_variables, file_list=files, max_loop=len(files))
if len(failed) > 0:
error_msg = "Failed to deploy the following files:\n{}".format(
'\n'.join(failed.keys()))
error_msg = error_msg + '\nSee log for error details.'
for fail_object, fail_messages in failed.items():
logger.debug(f'----- Error for object {fail_object} -----')
logger.debug(''.join(fail_messages))
raise RuntimeError(error_msg)
return True
| 5,348,153 |
def compare_methode_corpus(corpus):
"""la fonction pour la comparaison entre Bleu score et la distance d'édition au niveu des directions entieres
"""
list_DA_corpus=[]
list_bleu_corpus=[]
list_DIST_corpus=[]
list_bleu_corpus_z=[]
list_DIST_corpus_z=[]
list_bleu_corpus_scale=[]
list_DIST_corpus_scale=[]
list_DA_corpus_z=[]
list_DA_corpus_scale=[]
scores_z=[]
scores_scale=[]
for k,v in distance_edition_directions(corpus).items():
list_DIST_corpus.append([k,v])
for i in get_directions(corpus):
list_bleu_corpus.append([(i[0]["src_lang"],i[0]["orig_lang"],i[0]["tgt_lang"]),compute_bleu_corpus(i)])
list_DA_corpus.append([(i[0]["src_lang"],i[0]["orig_lang"],i[0]["tgt_lang"]),sum([j['score'] for j in i])/len(i)]) #on calcule la moyenne des scores de DA pour chaque direction
list_bleu_corpus=sorted(list_bleu_corpus,key=lambda x: x[0]) #on enumere les deux listes pour qu'ils soient alignés
list_DIST_corpus=sorted(list_DIST_corpus,key=lambda x: x[0])
list_DA_corpus=sorted(list_DA_corpus,key=lambda x: x[0])
for i in range(len(list_bleu_corpus)): #calcule des scores ajoustés
list_bleu_corpus_z=to_z_score([i[1] for i in list_bleu_corpus])
list_DIST_corpus_z=to_z_score([i[1] for i in list_DIST_corpus])
list_DA_corpus_z=to_z_score([i[1] for i in list_DA_corpus])
list_bleu_corpus_scale=convert_scale([i[1] for i in list_bleu_corpus],1)
list_DIST_corpus_scale=convert_scale([i[1] for i in list_DIST_corpus],1)
list_DA_corpus_scale=convert_scale([i[1] for i in list_DA_corpus],1)
scores_z=list(zip(sorted(compute_directions (corpus).keys()),list_DA_corpus_z,list_bleu_corpus_z,list_DIST_corpus_z)) #on assemble les noms de directions et les deux scores de direction
scores_scale=list(zip(sorted(compute_directions (corpus).keys()),list_DA_corpus_scale,list_bleu_corpus_scale,list_DIST_corpus_scale))
columns=sorted(compute_directions (corpus).keys())
#df1 = pd.DataFrame({'DA':list_DA_corpus_z,'Bleu':list_bleu_corpus_z,'Distance_edit':list_DIST_corpus_z}, index=columns)
#ax=df1.plot.bar(rot=0);
#plt.show()
df2 = pd.DataFrame({'DA':list_DA_corpus_scale,'Bleu':list_bleu_corpus_scale,'Distance_edit':list_DIST_corpus_scale}, index=columns)
ax=df2.plot.bar(rot=0);
plt.show()
| 5,348,154 |
def sample_ellipsoid(p0, covmat, size=1):
"""
Produce an ellipsoid of walkers around an initial parameter value,
according to a covariance matrix.
:param p0: The initial parameter value.
:param covmat:
The covariance matrix. Must be symmetric-positive definite or
it will raise the exception numpy.linalg.LinAlgError
:param size: The number of samples to produce.
"""
return np.random.multivariate_normal(
np.atleast_1d(p0), np.atleast_2d(covmat), size=size
)
| 5,348,155 |
def test_index_name_none(df):
"""Test expand_grid output for a pandas Index without a name."""
A = pd.Index(df["a"].array, name=None)
B = df["cities"]
others = {"A": A, "B": B}
result = expand_grid(others=others)
A = df.loc[:, ["a"]]
B = df.loc[:, ["cities"]]
expected = A.assign(key=1).merge(B.assign(key=1), on="key")
expected = expected.drop(columns="key")
expected.columns = pd.MultiIndex.from_arrays([["A", "B"], [0, "cities"]])
assert_frame_equal(result, expected)
| 5,348,156 |
def test_gaussian_blur():
"""
Feature: Test image gaussian blur.
Description: Add gaussian blur to image.
Expectation: success.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
image = np.random.random((32, 32, 3))
trans = GaussianBlur(ksize=5)
dst = trans(image)
print(dst)
| 5,348,157 |
def avg_pool_2d(x, size=(2, 2), stride=(2, 2), name='avg_pooling', padding='VALID'):
"""
Average pooling 2D Wrapper
:param x: (tf.tensor) The input to the layer (N,H,W,C).
:param size: (tuple) This specifies the size of the filter as well as the stride.
:param name: (string) Scope name.
:return: The output is the same input but halfed in both width and height (N,H/2,W/2,C).
"""
size_x, size_y = size
stride_x, stride_y = stride
return tf.nn.avg_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding=padding, name=name)
| 5,348,158 |
def train_eval(arg_params):
"""
A simple train and eval for PPO agent
:param arg_params:
parsed command-line arguments
:return:
"""
"""
initialize distribution strategy
use_gpu=False means use tf.distribute.get_strategy() which uses CPU
use_gpu=True mean use tf.distribute.MirroredStrategy() which uses all GPUs that are visible
"""
strategy = strategy_utils.get_strategy(tpu=False, use_gpu=True)
train_dir = os.path.join(
arg_params.root_dir,
learner.TRAIN_DIR
)
eval_dir = os.path.join(
arg_params.root_dir,
'eval'
)
policy_dir = os.path.join(
arg_params.root_dir,
'policy'
)
tf.profiler.experimental.start(logdir=arg_params.root_dir)
with strategy.scope():
# create or get global step tensor
global_step = tf.compat.v1.train.get_or_create_global_step()
# create sac agent
logging.info('Creating SAC Agent')
sac_agent = SACAgent(
root_dir=arg_params.root_dir,
env_load_fn=lambda model_id, mode, use_tf_function, device_idx: suite_gibson.load(
config_file=arg_params.config_file,
model_id=model_id,
env_mode=mode,
use_tf_function=use_tf_function,
is_localize_env=arg_params.is_localize_env,
action_timestep=arg_params.action_timestep,
physics_timestep=arg_params.physics_timestep,
device_idx=device_idx,
),
train_step_counter=global_step,
strategy=strategy,
gpu=arg_params.gpu_num,
use_tf_function=arg_params.use_tf_function
)
tf_agent = sac_agent.tf_agent
collect_env = sac_agent.train_py_env
eval_env = sac_agent.eval_py_env
random_policy = sac_agent.random_policy
collect_policy = sac_agent.collect_policy
eval_policy = sac_agent.eval_policy
# instantiate reverb replay buffer
rb = ReverbReplayBuffer(
table_name='uniform_table',
replay_buffer_capacity=arg_params.replay_buffer_capacity
)
# generate tf dataset from replay buffer
dataset = rb.get_dataset(
collect_data_spec=tf_agent.collect_data_spec,
sequence_length=arg_params.sequence_length,
batch_size=arg_params.batch_size,
)
experience_dataset_fn = lambda: dataset
# instantiate replay buffer traj observer
rb_traj_observer = rb.get_rb_traj_observer(
sequence_length=arg_params.sequence_length,
stride_length=arg_params.stride_length,
)
# Metrics
train_metrics = actor.collect_metrics(
buffer_size=10,
)
eval_metrics = actor.eval_metrics(
buffer_size=arg_params.num_eval_episodes,
)
# use random policy to collect initial experiences to seed the replay buffer
initial_collect_actor = actor.Actor(
env=collect_env,
policy=random_policy,
train_step=global_step,
steps_per_run=arg_params.initial_collect_steps,
observers=[rb_traj_observer],
metrics=train_metrics,
)
logging.info('Initializing replay buffer by collecting experience for %d steps '
'with a random policy.', arg_params.initial_collect_steps)
initial_collect_actor.run()
# use collect policy to gather more experiences during training
collect_actor = actor.Actor(
env=collect_env,
policy=collect_policy,
train_step=global_step,
steps_per_run=1,
observers=[rb_traj_observer],
metrics=train_metrics,
summary_dir=train_dir,
name='train',
)
# use eval policy to evaluate during training
eval_actor = actor.Actor(
env=eval_env,
policy=eval_policy,
train_step=global_step,
episodes_per_run=arg_params.num_eval_episodes,
observers=None,
metrics=eval_metrics,
summary_dir=eval_dir,
summary_interval=arg_params.eval_interval,
name='eval',
)
# policy checkpoint trigger
policy_checkpointer = common.Checkpointer(
ckpt_dir=policy_dir,
policy=tf_agent.policy,
global_step=global_step,
)
# HACK: there is problem with triggers.PolicySavedModelTrigger
# instantiate agent learner with triggers
learning_triggers = [
triggers.StepPerSecondLogTrigger(
train_step=global_step,
interval=1000
),
]
agent_learner = learner.Learner(
root_dir=arg_params.root_dir,
train_step=global_step,
agent=tf_agent,
experience_dataset_fn=experience_dataset_fn,
# triggers=learning_triggers,
strategy=strategy,
)
logging.info('====> Starting training')
# reset the train step
tf_agent.train_step_counter.assign(0)
returns = []
for _ in range(arg_params.num_iterations):
# training
# Creates a trace event for each training step with the step number.
step = agent_learner.train_step_numpy
with tf.profiler.experimental.Trace("Train", step_num=step):
collect_actor.run()
loss_info = agent_learner.run(
iterations=1
)
step = agent_learner.train_step_numpy
# evaluation
if step % arg_params.eval_interval == 0:
metrics = get_eval_metrics(eval_actor)
returns.append(metrics["AverageReturn"])
# eval_actor.log_metrics()
eval_results = ', '.join('{} = {:.6f}'.format(name, result) for name, result in metrics.items())
logging.info('step = %d: %s', step, eval_results)
# logging
if step % arg_params.log_interval == 0:
# collect_actor.log_metrics()
logging.info('step = %d: loss = %f', step, loss_info.loss.numpy())
# save policy
if step % arg_params.policy_save_interval == 0:
policy_checkpointer.save(global_step=step)
# close replay buffer
rb.close()
tf.profiler.experimental.stop()
| 5,348,159 |
def patch_base_handler(BaseHandler, log=None):
"""Patch HubAuthenticated into a base handler class
so anything inheriting from BaseHandler uses Hub authentication.
This works *even after* subclasses have imported and inherited from BaseHandler.
.. versionadded: 1.5
Made available as an importable utility
"""
if log is None:
log = logging.getLogger()
if HubAuthenticatedHandler not in BaseHandler.__bases__:
new_bases = (HubAuthenticatedHandler,) + BaseHandler.__bases__
log.info(
"Patching auth into {mod}.{name}({old_bases}) -> {name}({new_bases})".format(
mod=BaseHandler.__module__,
name=BaseHandler.__name__,
old_bases=', '.join(
_nice_cls_repr(cls) for cls in BaseHandler.__bases__
),
new_bases=', '.join(_nice_cls_repr(cls) for cls in new_bases),
)
)
BaseHandler.__bases__ = new_bases
# We've now inserted our class as a parent of BaseHandler,
# but we also need to ensure BaseHandler *itself* doesn't
# override the public tornado API methods we have inserted.
# If they are defined in BaseHandler, explicitly replace them with our methods.
for name in ("get_current_user", "get_login_url"):
if name in BaseHandler.__dict__:
log.debug(
f"Overriding {BaseHandler}.{name} with HubAuthenticatedHandler.{name}"
)
method = getattr(HubAuthenticatedHandler, name)
setattr(BaseHandler, name, method)
return BaseHandler
| 5,348,160 |
def get_username_field() -> str:
"""Get custom username field.
Returns:
str: username field.
"""
from django.contrib.auth import get_user_model
user_model = get_user_model()
return getattr(user_model, "USERNAME_FIELD", "username")
| 5,348,161 |
def get_edited_file_name():
"""
Gets the current open file in xcode
"""
script = '''
tell application "Xcode"
set last_word_in_main_window to (word -1 of (get name of window 1))
set current_document to document 1 whose name ends with last_word_in_main_window
set current_document_path to path of current_document
return current_document_path
end tell
'''
val = run_script(script, [])
if len(val) > 0:
debug_log("Currently editing " + val + " in Xcode, we'll try to use that.")
else:
error_log("Failed to get current edited document in Xcode! Is Xcode running, and is a source file open?")
return val
| 5,348,162 |
def p_metadata(p):
"""metadata : '(' metadata_seq ')'
|"""
if len(p) > 2:
p[0] = p[2]
else:
p[0] = []
| 5,348,163 |
def get_header(yaml_dict):
"""
Header merely comprises the access token
:return:
"""
headers = {"Authorization": "Bearer {}".format(get_access_token(yaml_dict)),
"Content-Type": "application/json"}
return headers
| 5,348,164 |
def change_filename_extension(filename: str, old_ext: str, new_ext: str) -> str:
"""
Change extension of a filename (e.g. "data.csv" to "data.json").
:param filename: the old filename (including extension)
:param old_ext: the extension of the old filename
:param new_ext: the extension to replace the old extension
:return: a filename with the new extension
"""
dbg.dassert(
filename.endswith(old_ext),
"Extension '%s' doesn't match file '%s'",
old_ext,
filename,
)
# Remove the old extension.
new_filename = filename.rstrip(old_ext)
# Add the new extension.
new_filename = new_filename + new_ext
return new_filename
| 5,348,165 |
def parseArgPairToBoundaryArray(pair, mesh):
"""
Parse boundary related pair argument to create a list of
[ :gimliapi:`GIMLI::Boundary`, value|callable ].
Parameters
----------
pair: tuple
- [marker, arg]
- [marker, [callable, *kwargs]]
- [marker, [arg_x, arg_y, arg_z]]
- [boundary, arg]
- ['*', arg]
- [node, arg]
- [[marker, ...], arg] (REMOVE ME because of bad design)
- [[boundary,...], arg] (REMOVE ME because of bad design)
- [marker, callable, *kwargs] (REMOVE ME because of bad design)
- [[marker, ...], callable, *kwargs] (REMOVE ME because of bad design)
arg will be parsed by
:py:mod:`pygimli.solver.solver.generateBoundaryValue`
and distributed to each boundary.
Callable functions will be executed at run time.
'*' will be interpreted as all boundary elements with one neighboring cell
mesh: :gimliapi:`GIMLI::Mesh`
Used to find boundaries by marker.
Returns
-------
bc: list()
[:gimliapi:`GIMLI::Boundary`, value|callable]
"""
bc = []
bounds = []
if isinstance(pair[1], list):
# [marker, [callable, *kwargs]]
if callable(pair[1][0]):
pair = [pair[0]] + pair[1]
if pair[0] == '*':
mesh.createNeighborInfos()
for b in mesh.boundaries():
if b.leftCell() is not None and b.rightCell() is None:
bounds.append(b)
elif isinstance(pair[0], int):
bounds = mesh.findBoundaryByMarker(pair[0])
elif isinstance(pair[0], pg.core.Node):
bc.append(pair)
return bc
####### bad Design .. need to remove
elif isinstance(pair[0], list):
print(pair[0], pair[0][0])
pg.deprecated('bad design')
# [[,,..], ]
for b in pair[0]:
for bi in mesh.boundaries(pg.find(mesh.boundaryMarkers() == b)):
bounds.append(bi)
elif isinstance(pair[0], pg.core.stdVectorBounds):
pg.deprecated('bad design')
pg.warn('in use? pair[0], pg.core.stdVectorBounds)')#20200115
bounds = pair[0]
elif isinstance(pair[0], pg.core.Boundary):
pg.warn('in use? isinstance(pair[0], pg.core.Boundary)')#20200115
bc.append(pair)
return bc
####### bad Design .. need to remove
for b in bounds:
val = None
if len(pair) > 2:
val = pair[1:]
else:
val = pair[1]
bc.append([b, val])
# print('-'*50)
# print(b, pair[1], callable(pair[1]))
# print('+'*50)
# if callable(pair[1]):
# # don't execute the callable here
# # we want to call them at runtime
# if len(pair) > 2:
# val = pair[1:]
# else:
# val = pair[1]
# else:
# this will be executed
#val = generateBoundaryValue(b, pair[1])
#print('#'*30)
return bc
| 5,348,166 |
def get_niter(outcarfile):
"""
Get the number of ionic steps that were run
Args:
outcarfile (string): full path to OUTCAR file
Returns:
niter (int): number of ionic iterations
"""
with open(outcarfile,'r') as rf:
for line in rf:
if '- Iteration' in line:
niter = line.split('(')[0].split('n')[-1].strip()
niter = int(niter)
return niter
| 5,348,167 |
def bucket_contvar(ex, ctrl, num_buckets):
"""
Given ex, which contains a continuous value for a particular control variable,
return the bucketed version of that control value.
Inputs:
ex: message dictionary. Assume it has key ctrl, mapping to the value.
ctrl: string. The name of the CT control.
num_buckets: int. The number of buckets for this control variable.
"""
if ctrl not in ex.keys():
raise ValueError(
"Control %s not found in example. Available keys in "
"this example: %s" % (ctrl, ', '.join(ex.keys()))
)
# Get the control variable value
ctrl_val = ex[ctrl] # string. the value of the control variable for this example
if ctrl == 'avg_nidf':
ctrl_val = float(ctrl_val)
assert ctrl_val >= 0
assert ctrl_val <= 1
elif ctrl == 'lastuttsim':
if ctrl_val == 'None': # bot goes first in conversation
assert num_buckets == 11
return 10 # The last bucket is for when the bot goes first
else:
ctrl_val = float(ctrl_val)
assert ctrl_val >= -1
assert ctrl_val <= 1
else:
raise ValueError('Unexpected CT ctrl: %s' % ctrl)
# Get the bucket lowerbounds
bucket_lbs = CONTROL2BUCKETLBS[(ctrl, num_buckets)] # lst len num_buckets of floats
if ctrl == 'lastuttsim':
# The 'bot goes first' bucket 10 has no lower bound
assert len(bucket_lbs) == num_buckets - 1
else:
assert len(bucket_lbs) == num_buckets
# Determine the correct bucket and return the bucket id
return sort_into_bucket(ctrl_val, bucket_lbs)
| 5,348,168 |
def handle_exception(error):
"""
Flask error handler for Exception
Parameters
----------
error : Exception
An Exception error
Returns
-------
string
A JSON string of the Exception error response
"""
response = create_error_response(error)
return response, 500
| 5,348,169 |
def isphone(value, locale='en-US'):
"""
Return whether or not given value is valid mobile number according to given locale. Default locale is 'en-US'.
If the value is valid mobile number, this function returns ``True``, otherwise ``False``.
Supported locales are: ``ar-DZ``, ``ar-SY``, ``ar-SA``, ``en-US``, ``en-CA``, ``cs-CZ``, ``de-DE``, ``da-DK``
``el-GR``, ``en-AU``, ``en-GB``, ``en-HK``, ``zh-HK``, ``en-IN``, ``en-NG``, ``en-NZ``, ``en-ZA``, ``en-ZM``
``es-ES``, ``fi-FI``, ``fr-FR``, ``he-IL``, ``hu-HU``, ``id-ID``, ``it-IT``, ``ja-JP``, ``ms-MY``, ``nb-NO``
``nl-BE``, ``fr-BE``, ``nn-NO``, ``pl-PL``, ``pt-BR``, ``pt-PT``, ``ro-RO``, ``en-PK``, ``ru-RU``, ``sr-RS``
``tr-TR``, ``vi-VN``, ``zh-CN``, ``zh-TW``, ``bn-BD``
Examples::
>>> isphone('+15673628910', 'en-US')
True
>>> isphone('+10345672645', 'en-US')
False
:param value: string to validate mobile number
:param locale: locale of mobile number to validate
"""
phones = {
'ar-DZ': r'^(\+?213|0)(5|6|7)\d{8}$',
'ar-SY': r'^(!?(\+?963)|0)?9\d{8}$',
'ar-SA': r'^(!?(\+?966)|0)?5\d{8}$',
'bn-BD': r'^(\+?88)?(01[56789]\d{2}(\s|\-)?\d{6})$',
'en-US': r'^(\+?1)?[2-9]\d{2}[2-9](?!11)\d{6}$',
'cs-CZ': r'^(\+?420)? ?[1-9][0-9]{2} ?[0-9]{3} ?[0-9]{3}$',
'de-DE': r'^(\+?49[ \.\-])?([\(]{1}[0-9]{1,6}[\)])?([0-9 \.\-\']{3,20})((x|ext|extension)[ ]?[0-9]{1,4})?$',
'da-DK': r'^(\+?45)?(\d{8})$',
'el-GR': r'^(\+?30)?(69\d{8})$',
'en-AU': r'^(\+?61|0)4\d{8}$',
'en-GB': r'^(\+?44|0)7\d{9}$',
'en-HK': r'^(\+?852\-?)?[569]\d{3}\-?\d{4}$',
'en-IN': r'^(\+?91|0)?[789]\d{9}$',
'en-NG': r'^(\+?234|0)?[789]\d{9}$',
'en-NZ': r'^(\+?64|0)2\d{7,9}$',
'en-ZA': r'^(\+?27|0)\d{9}$',
'en-ZM': r'^(\+?26)?09[567]\d{7}$',
'es-ES': r'^(\+?34)?(6\d{1}|7[1234])\d{7}$',
'fi-FI': r'^(\+?358|0)\s?(4(0|1|2|4|5)?|50)\s?(\d\s?){4,8}\d$',
'fr-FR': r'^(\+?33|0)[67]\d{8}$',
'he-IL': r'^(\+972|0)([23489]|5[0248]|77)[1-9]\d{6}',
'hu-HU': r'^(\+?36)(20|30|70)\d{7}$',
'id-ID': r'^(\+?62|0[1-9])[\s|\d]+$',
'it-IT': r'^(\+?39)?\s?3\d{2} ?\d{6,7}$',
'ja-JP': r'^(\+?81|0)\d{1,4}[ \-]?\d{1,4}[ \-]?\d{4}$',
'ms-MY': r'^(\+?6?01){1}(([145]{1}(\-|\s)?\d{7,8})|([236789]{1}(\s|\-)?\d{7}))$',
'nb-NO': r'^(\+?47)?[49]\d{7}$',
'nl-BE': r'^(\+?32|0)4?\d{8}$',
'nn-NO': r'^(\+?47)?[49]\d{7}$',
'pl-PL': r'^(\+?48)? ?[5-8]\d ?\d{3} ?\d{2} ?\d{2}$',
'pt-BR': r'^(\+?55|0)\-?[1-9]{2}\-?[2-9]{1}\d{3,4}\-?\d{4}$',
'pt-PT': r'^(\+?351)?9[1236]\d{7}$',
'ro-RO': r'^(\+?4?0)\s?7\d{2}(\'|\s|\.|\-)?\d{3}(\s|\.|\-)?\d{3}$',
'en-PK': r'^((\+92)|(0092))-{0,1}\d{3}-{0,1}\d{7}$|^\d{11}$|^\d{4}-\d{7}$',
'ru-RU': r'^(\+?7|8)?9\d{9}$',
'sr-RS': r'^(\+3816|06)[- \d]{5,9}$',
'tr-TR': r'^(\+?90|0)?5\d{9}$',
'vi-VN': r'^(\+?84|0)?((1(2([0-9])|6([2-9])|88|99))|(9((?!5)[0-9])))([0-9]{7})$',
'zh-CN': r'^(\+?0?86\-?)?1[345789]\d{9}$',
'zh-TW': r'^(\+?886\-?|0)?9\d{8}$'
}
phones['en-CA'] = phones['en-US']
phones['fr-BE'] = phones['nl-BE']
phones['zh-HK'] = phones['en-HK']
loc = phones.get(locale)
if loc is None:
raise ValueError('Please provide a supported locale.')
else:
loc_pattern = re.compile(loc)
return bool(loc_pattern.match(value))
| 5,348,170 |
def _darken(color):
"""
Takes a hexidecimal color and makes it a shade darker
:param color: The hexidecimal color to darken
:return: A darkened version of the hexidecimal color
"""
# Get the edge color
darker = "#"
hex1 = color[1:3]
hex2 = color[3:5]
hex3 = color[5:7]
for val in [hex1, hex2, hex3]:
if val == "00":
darker += "00"
else:
x = int(val, base=16)
x -= int("11", base=16)
x = str(hex(x))[2:].upper()
darker += x
return darker
| 5,348,171 |
def commit_datetime(author_time: str, author_tz: str):
"""
Convert a commit's timestamp to an aware datetime object.
Args:
author_time: Unix timestamp string
author_tz: string in the format +hhmm
Returns:
datetime.datetime object with tzinfo
"""
# timezone info looks like +hhmm or -hhmm
tz_hours = int(author_tz[:3])
th_minutes = int(author_tz[0] + author_tz[3:])
return datetime.fromtimestamp(
int(author_time), timezone(timedelta(hours=tz_hours, minutes=th_minutes))
)
| 5,348,172 |
def flatten_acfg_list(acfg_list):
"""
Returns a new config where subconfig params are prefixed by subconfig keys
"""
flat_acfg_list = []
for acfg in acfg_list:
flat_dict = {
prefix + '_' + key: val
for prefix, subdict in acfg.items()
for key, val in subdict.items()
}
flat_acfg_list.append(flat_dict)
return flat_acfg_list
| 5,348,173 |
def happy_birthday(name: hug.types.text, age: hug.types.number):
"""Says happy birthday to a user"""
return "Happy {0} Birthday {1}!".format(name, age)
| 5,348,174 |
def train(epoch):
"""
DOCSTRING
"""
model.train()
if epoch == 6:
for param_group in optimizer.param_groups:
param_group['lr'] = 0.001
if epoch == 16:
for param_group in optimizer.param_groups:
param_group['lr'] = 0.0001
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
torch.nn.functional.nll_loss(model(data), data.y).backward()
optimizer.step()
| 5,348,175 |
def emit_end_time_duration(start_time, activity_name, signals):
"""Emits the end time and duration of something that has started before."""
end_time = time.time()
signals.message.emit(
"End time: {}".format(time.strftime("%X", time.localtime(end_time))), True
)
signals.message.emit(
"{} took (hh:mm:ss): {}.".format(
activity_name, time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))
),
True,
)
| 5,348,176 |
def _format_d10_singlecell(row):
"""
Format the D10 input data for a single cell (corresponds to a single row
in the input csv file).
"""
nlayers = int(row['nlayer'])
if nlayers == 0:
# This means this cell cannot be run in HELP.
return None
try:
title = str(int(row['cid']))
except ValueError:
title = str(row['cid'])
iu10 = 2
ipre = 0
irun = 1
osno = 0 # initial snow water
area = 6.25 # area projected on horizontal plane
frunof = 100
runof = float(row['CN'])
d10dat = []
# READ (10, 5070) TITLE
# 5070 FORMAT(A60)
d10dat.append(['{0:<60}'.format(title)])
# READ (10, 5080) IU10, IPRE, OSNO, AREA, FRUNOF, IRUN
# 5080 FORMAT(I2,I2,2F10.0,F6.0,I2)
d10dat.append(['{0:>2}'.format(iu10) +
'{0:>2}'.format(ipre) +
'{0:>10.0f}'.format(osno) +
'{0:>10.0f}'.format(area) +
'{0:>6.0f}'.format(frunof) +
'{0:>2}'.format(irun)])
# IF (IRUN .EQ. 1) READ (10, 5090) CN2
# 5090 FORMAT(F7.0)
d10dat.append(['{0:>7.0f}'.format(runof)])
# Format the layer properties.
for i in range(nlayers):
lay = str(i+1)
layer = int(row['lay_type'+lay])
thick = max(float(row['thick'+lay]), MINTHICK)
isoil = 0
poro = float(row['poro'+lay])
fc = float(row['fc'+lay])
wp = float(row['wp'+lay])
sw = ''
rc = float(row['ksat'+lay])
xleng = float(row['dist_dr'+lay])
slope = float(row['slope'+lay])
# Check that all values are valid for the layer.
check = [val == -9999 for val in
(thick, poro, fc, wp, rc, xleng, slope)]
if any(check):
return None
# READ (10, 5120) LAYER (J), THICK (J), ISOIL (J),
# PORO (J), FC (J), WP (J), SW (J), RC (J)
# 5120 FORMAT(I2,F7.0,I4,4F6.0,F16.0)
d10dat.append(['{0:>2}'.format(layer) +
'{0:>7.0f}'.format(thick) +
'{0:>4}'.format(isoil) +
'{0:>6.3f}'.format(poro) +
'{0:>6.3f}'.format(fc) +
'{0:>6.3f}'.format(wp) +
'{0:>6}'.format(sw) +
'{0:>16.14f}'.format(rc)])
recir = subin = phole = defec = ipq = trans = ''
layr = 0
# READ (10, 5130) XLENG (J), SLOPE (J), RECIR (J), LAYR (J),
# SUBIN (J), PHOLE (J), DEFEC (J), IPQ (J), TRANS (J)
# 5130 FORMAT(F7.0,2F6.0,I3,F13.0,2F7.0,I2,G14.6)
d10dat.append(['{0:>7.0f}'.format(xleng) +
'{0:>6.2f}'.format(slope) +
'{0:>6}'.format(recir) +
'{0:>3}'.format(layr) +
'{0:>13}'.format(subin) +
'{0:>7}'.format(phole) +
'{0:>7}'.format(defec) +
'{0:>2}'.format(ipq) +
'{0:>14}'.format(trans)])
return d10dat
| 5,348,177 |
def logical_and(x, y, out=None, name=None):
"""
:alias_main: paddle.logical_and
:alias: paddle.logical_and,paddle.tensor.logical_and,paddle.tensor.logic.logical_and
:old_api: paddle.fluid.layers.logical_and
logical_and Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = X \land Y
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(LoDTensor or Tensor): The LoDTensor or Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
y = fluid.layers.data(name='y', shape=[2], dtype='bool')
res = fluid.layers.logical_and(x=x, y=y)
# The comment lists another available method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_and(x=x, y=y, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
print(res_val) # [[True, False], [False, False]]
"""
return _logical_op(
op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True)
| 5,348,178 |
def _create_tileZeros():
""" Create a function mapping to the Scala implementation."""
def _(cols, rows, cellType = 'float64'):
jfcn = RFContext.active().lookup('tile_zeros')
return Column(jfcn(cols, rows, cellType))
_.__name__ = 'tile_zeros'
_.__doc__ = "Create column of constant tiles of zero"
_.__module__ = THIS_MODULE
return _
| 5,348,179 |
def load_modules():
"""
Dynamically loads all the modules in the modules folder and sorts
them by the PRIORITY key. If no PRIORITY is defined for a given
module, a priority of 0 is assumed.
"""
# logger = logging.getLogger(__name__)
locations = [marvin.support.path.PLUGIN_PATH]
modules = []
for finder, name, ispkg in pkgutil.walk_packages(locations):
try:
loader = finder.find_module(name)
mod = loader.load_module(name)
except:
Log.warn("Skipped loading module '{0}' due to an error.", name)
else:
# if hasattr(mod, 'WORDS'):
modules.append(mod)
# else:
# Log.warn("Skipped loading module '{0}' because it misses " +
# "the WORDS constant.", name)
modules.sort(key=lambda mod: mod.PRIORITY if hasattr(mod, 'PRIORITY')
else 0, reverse=True)
return modules
| 5,348,180 |
def get_merge_image(location_list, url):
"""
根据图片位置合并还原
:param location_list: 图片位置数组
:param url: 图片 url
:return:
"""
save_path = os.path.abspath('...') + '\\' + 'images'
if not os.path.exists(save_path):
os.mkdir(save_path)
filename = _pic_download(url, 'all')
im = Image.open(filename)
width, height = im.size
# print(width, height)
big = im.crop((0, 0, 260, height))
captcha_path = save_path + '\\' + 'captcha.jpg'
slider_path = save_path + '\\' + 'slider.jpg'
big.convert('RGB').save(captcha_path)
small = im.crop((260, 0, width, height))
small.convert('RGB').save(slider_path)
new_im = Image.new('RGB', (260, height))
upper_list = location_list[:20]
lower_list = location_list[20:]
x_offset = 0
for location in upper_list:
imgcrop = big.crop((abs(location['x']), abs(location['y']), abs(location['x']) + 13, abs(location['y']) + 60))
new_im.paste(imgcrop, (x_offset, 0))
x_offset += 13
x_offset = 0
for location in lower_list:
imgcrop = big.crop((abs(location['x']), abs(location['y']), abs(location['x']) + 13, abs(location['y']) + 60))
new_im.paste(imgcrop, (x_offset, 60))
x_offset += 13
new_im.show()
new_im.save(captcha_path)
return captcha_path, slider_path
| 5,348,181 |
def _rbe_autoconfig_impl(ctx):
"""Core implementation of _rbe_autoconfig repository rule."""
bazel_version_debug = "Bazel %s" % ctx.attr.bazel_version
if ctx.attr.bazel_rc_version:
bazel_version_debug += " rc%s" % ctx.attr.bazel_rc_version
print("%s is used in rbe_autoconfig." % bazel_version_debug)
name = ctx.attr.name
image_name = None
if ctx.attr.digest:
image_name = ctx.attr.registry + "/" + ctx.attr.repository + "@" + ctx.attr.digest
else:
image_name = ctx.attr.registry + "/" + ctx.attr.repository + ":" + ctx.attr.tag
# Use l.gcr.io registry to pull marketplace.gcr.io images to avoid auth
# issues for users who do not do gcloud login.
image_name = image_name.replace("marketplace.gcr.io", "l.gcr.io")
docker_tool_path = None
# Resolve the project_root
project_root, use_default_project = resolve_project_root(ctx)
# Check if pulling a container will be needed and pull it if so
if pull_container_needed(ctx):
ctx.report_progress("validating host tools")
docker_tool_path = validate_host(ctx)
# Pull the image using 'docker pull'
pull_image(ctx, docker_tool_path, image_name)
# If tag is specified instead of digest, resolve it to digest in the
# image_name as it will be used later on in the platform targets.
if ctx.attr.tag:
result = ctx.execute([docker_tool_path, "inspect", "--format={{index .RepoDigests 0}}", image_name])
print_exec_results("Resolve image digest", result, fail_on_error = True)
image_name = result.stdout.splitlines()[0]
print("Image with given tag `%s` is resolved to %s" %
(ctx.attr.tag, image_name))
# Create a default BUILD file with the platform + toolchain targets that
# will work with RBE with the produced toolchain
ctx.report_progress("creating platform")
create_platform(
ctx,
# Use "marketplace.gcr.io" instead of "l.gcr.io" in platform targets.
image_name = image_name.replace("l.gcr.io", "marketplace.gcr.io"),
name = name,
)
# If user picks rbe-ubuntu 16_04 container and
# a config exists for the current version of Bazel, create aliases and return
if ctx.attr.config_version and not ctx.attr.config_repos:
use_standard_config(ctx)
# Copy all outputs to the test directory
if ctx.attr.create_testdata:
copy_to_test_dir(ctx)
return
# Get the value of JAVA_HOME to set in the produced
# java_runtime
if ctx.attr.create_java_configs:
java_home = get_java_home(ctx, docker_tool_path, image_name)
create_java_runtime(ctx, java_home)
config_repos = []
if ctx.attr.create_cc_configs:
config_repos.extend(_CONFIG_REPOS)
if ctx.attr.config_repos:
config_repos.extend(ctx.attr.config_repos)
if config_repos:
# run the container and extract the autoconf directory
run_and_extract(
ctx,
bazel_version = ctx.attr.bazel_version,
bazel_rc_version = ctx.attr.bazel_rc_version,
config_repos = config_repos,
docker_tool_path = docker_tool_path,
image_name = image_name,
project_root = project_root,
use_default_project = use_default_project,
)
ctx.report_progress("expanding outputs")
# Expand outputs to project dir if user requested it
if ctx.attr.output_base:
expand_outputs(
ctx,
bazel_version = ctx.attr.bazel_version,
project_root = project_root,
)
# TODO(nlopezgi): refactor call to _copy_to_test_dir
# so that its not needed to be duplicated here and
# above.
# Copy all outputs to the test directory
if ctx.attr.create_testdata:
copy_to_test_dir(ctx)
| 5,348,182 |
def cli():
"""
Entrypoint for Zelt.
"""
# Disable deprecation warning coming from Kubernetes client's YAML loading.
# See https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
yaml.warnings({"YAMLLoadWarning": False})
config = _load_config(docopt(__doc__, version=_version()))
logging.basicConfig(level=config.logging)
if config.from_har:
config = config._replace(
locustfile=zelt.invoke_transformer(
paths=config.har_files, plugin_names=config.transformer_plugins
)
)
_deploy(config)
if config.from_locustfile:
_deploy(config)
if config.rescale:
_rescale(config)
if config.delete:
_delete(config)
| 5,348,183 |
def test_update_market_value_of_asset_earlier_date():
"""
Test update_market_value_of_asset for asset
with current_trade_date in past
"""
start_dt = pd.Timestamp('2017-10-05 08:00:00', tz=pytz.UTC)
earlier_dt = pd.Timestamp('2017-10-04 08:00:00', tz=pytz.UTC)
later_dt = pd.Timestamp('2017-10-06 08:00:00', tz=pytz.UTC)
port = Portfolio(start_dt, portfolio_id='1234')
asset = 'EQ:AAA'
port.subscribe_funds(later_dt, 100000.0)
tn_asset = Transaction(
asset=asset,
quantity=100,
dt=later_dt,
price=567.0,
order_id=1,
commission=15.78
)
port.transact_asset(tn_asset)
with pytest.raises(ValueError):
port.update_market_value_of_asset(
asset, 50.23, earlier_dt
)
| 5,348,184 |
def property_fragment():
"""Builds and returns a random Property init fragment."""
return _build_property_fragment()
| 5,348,185 |
def create_person_node(author):
"""
Parameters
----------
author : dict
author field of JSON file.
Returns
-------
ID : str
Document _id from 'Person' collection.
"""
given = author.get('given', '')
family = author.get('family', '')
ID = search_person(given, family)
if ID == 0:
collec = db.collection('Person')
para = {'URI': given+'_'+family, 'type': subject_type_author, 'sameas': '', 'given': given, 'family': family}
metadata = collec.insert(para)
ID = metadata['_id']
memo_name_ID.update({str((given, family)): ID})
print(ID, "created")
return ID
else:
return ID
| 5,348,186 |
def test_reaction_pattern_match_complex_pattern_ordering():
"""Ensure CP equivalence is insensitive to MP order."""
Monomer('A', ['s1', 's2'])
cp0 = A(s1=1, s2=2) % A(s1=2, s2=1)
cp1 = A(s1=2, s2=1) % A(s1=1, s2=2)
rp0 = cp0 + cp1
rp1 = cp1 + cp0
rp2 = cp0 + cp0
assert rp0.matches(rp1)
assert rp1.matches(rp0)
assert rp2.matches(rp0)
| 5,348,187 |
def create_date_list(startDt='2020-11-01', endDt='2020-12-01'):
"""
Create a date list ranging from start to end dates. Date output format = yyyy_mm
:startDt = beginning date for the range
:endDt = end date for the range
To run the current method requires a minimum one month difference between dates
FUTURE: Could provide more of the common date movements e.g. (M, Q, Y), and have these
added to the functionality with a keyword parameter
"""
dates = pd.date_range(startDt, endDt, freq='1M') - pd.offsets.MonthBegin(1)
listDates = [str(x.year)+"_"+str(x.month).zfill(2) for x in dates]
return listDates
| 5,348,188 |
def create_sequences(data,
seq_len,
forward,
stride,
debug=False):
""" Create training and test sequences.
Args:
data (numpy.array): Assumed to be of shape (N, T, M).
seq_len (int): Sequence length.
forward (int): Predict forward N periods.
stride (int): Shift by k amounts.
"""
X = []
y = []
N, T, M = data.shape
for i in range(seq_len, T - forward + 1, stride):
X.append(data[:, i - seq_len:i, :])
# -1 because python slicing excludes end point
y.append(data[:, i + forward - 1, :])
if debug:
print(f'X from {i - seq_len} to {i - 1}, y at {i + forward - 1}')
return np.concatenate(X), np.concatenate(y)
| 5,348,189 |
def batch_distance_metrics_from_coords(coords, mask):
"""
Given coordinates of neighboring atoms, compute bond
distances and 2-hop distances in local neighborhood
"""
d_mat_mask = mask.unsqueeze(1) * mask.unsqueeze(2)
if coords.dim() == 4:
two_dop_d_mat = torch.square(coords.unsqueeze(1) - coords.unsqueeze(2) + 1e-10).sum(dim=-1).sqrt() * d_mat_mask.unsqueeze(-1)
one_hop_ds = torch.linalg.norm(torch.zeros_like(coords[0]).unsqueeze(0) - coords, dim=-1)
elif coords.dim() == 5:
two_dop_d_mat = torch.square(coords.unsqueeze(2) - coords.unsqueeze(3) + 1e-10).sum(dim=-1).sqrt() * d_mat_mask.unsqueeze(-1).unsqueeze(1)
one_hop_ds = torch.linalg.norm(torch.zeros_like(coords[0]).unsqueeze(0) - coords, dim=-1)
return one_hop_ds, two_dop_d_mat
| 5,348,190 |
def ComputeHash256(buf: bytes) -> bytes:
"""ComputeHash256 Compute a cryptographically strong 256 bit hash of the input byte slice."""
return ComputeHash256Array(buf)
| 5,348,191 |
def acf_std(x, maxlag=None, periodogram=True,
confidence=0.6826895, simplified=True, acf_cached=None):
"""Computes the approximate standard deviation of the autocorrelation
coefficients.
Parameters
----------
x : ndarray
Input data.
maxlag : {None, int} optional
Maximum lag beyond which the ACF coefficient can be considered as null.
periodogram : {True, False}
Whether to use a periodogram-like estimate of the ACF or not.
confidence : {0.6826895, float} optional
Confidence level. The default value returns the standard deviation.
simplified : {True, False} optional
Whether to use a simplified or more complex approximation.
acf_cached : {ndarray} optional
Pre-computed acf coefficients.
Notes
-----
When simplified is True, the standard error is computed as:
\begin{equation}
var[r_k] &\appr \frac{1}{N} \left\{ 1 + 2 \sum_{j=1}^{+q}{ r_{j}^2 } \right\
\end{equation}
Otherwise, it is computed as:
\begin{equation}
\begin{split}
var[r_k] &\approx
\frac{1}{N} \sum_{j=-\infty}^{+\infty}{ \left\{
r_{j}^2 + r_{j+k} r_{j-k} - 4 r_{k} r_{j} r_{j-k} + 2 r_{j}^2 r_{k}^2
\right\} \\
\frac{1}{N} \sum_{j=-\infty}^{+\infty}{ \left\{
r_{j}^2 [ 1 + 2 r_{k}^2] + r_{j+k} r_{j-k} - 4 r_{k} r_{j} r_{j-k}
\right\}
\end{split}
\end{equation}
References
----------
Hippel & McLeod 1994: Time series modeling.
"""
if acf_cached is None:
acfx = acf(x,periodogram)
else:
acfx = acf_cached
n = x.size
r_i = acfx[:n]
rr_i = (r_i)**2
# Artifically set the ACF coefficients to 0 beyond lag maxlag
if maxlag > 0:
rr_i[maxlag:] = 0
# Compute the variance of the ACF coeffs
if simplified:
var_i = 1 + 2*rr_i.cumsum()
else:
var_i = (1 + 2 * rr_i) * rr_i.sum()
cov_ = np.correlate(r_i,r_i,'full')[n-1:]
var_i[:n//2] = cov_[::2]
var_i -= (4*r_i*cov_)
var_i /= float(n)
var_i[0] = 0
#....
std_i = np.sqrt(var_i)
std_i = np.concatenate([std_i, std_i[n-1:0:-1]])
#....
if confidence < 0.5:
confidence = 1.-confidence
thresh = norm.isf((1.-confidence)/2.)
std_i *= thresh
return std_i
| 5,348,192 |
def traverse_map(map, x_step, y_step):
"""
iterates over a "map" (array of strings) starting at the top left until reaching the
bottom of the map. every iteration advances position by <x_step,y_step> and checks if
a tree is hit
returns: the total number of Trees hit
rtype: int
"""
trees_hit = 0
map_depth = len(map)
y_steps = range(0,map_depth,y_step)
for j,step in enumerate(y_steps):
trees_hit += 1 if tree_in_path(map[step],j*x_step) else 0
return trees_hit
| 5,348,193 |
def q_to_res(Q: float) -> Optional[float]:
"""
:param Q: Q factor
:return: res, or None if Q < 0.25
"""
res = 1 - 1.25 / (Q + 1)
if res < 0.0:
return None
return res
| 5,348,194 |
def list_document_classifier():
"""[Lists Document Classifiers for Text Classification on AWS]
Raises:
error: [description]
Returns:
[list]: [List of Document Classifiers]
"""
try:
logging.info(f"List Document Classifiers")
return client.list_document_classifiers()
except Exception as error:
logging.error(f"{error=}")
raise error
| 5,348,195 |
def id_str_to_bytes(id_str: str) -> bytes:
"""Convert a 40 characters hash into a byte array.
The conversion results in 160 bits of information (20-bytes array). Notice
that this operation is reversible (using `id_bytes_to_str`).
Args:
id_str: Hash string containing 40 characters.
Returns:
bytes: The ID converted to bytes.
"""
return int(id_str, 16).to_bytes(20, byteorder='big')
| 5,348,196 |
def facts_domain(junos, facts):
"""
The following facts are required:
facts['hostname']
The following facts are assigned:
facts['domain']
facts['fqdn']
"""
# changes done to fix issue #332
domain_filter_xml = E('configuration', E('system', E('domain-name')))
domain = junos.rpc.get_config(domain_filter_xml)
domain_name = domain.xpath('.//domain-name')
if len(domain_name) > 0:
facts['domain'] = domain_name[0].text
facts['fqdn'] = facts['hostname'] + '.' + facts['domain']
return
fs = FS(junos)
file_content = fs.cat('/etc/resolv.conf') or fs.cat('/var/etc/resolv.conf')
words = file_content.split() if file_content is not None else ''
if 'domain' not in words:
facts['domain'] = None
facts['fqdn'] = facts['hostname']
else:
idx = words.index('domain') + 1
facts['domain'] = words[idx]
facts['fqdn'] = facts['hostname'] + '.' + facts['domain']
| 5,348,197 |
def add_or_update_user(username):
"""
Takes a username and adds them to our DB from the twitter DB.
Get user and get up to 2000 of their tweets and add to our
SQLAlchemy database.
"""
# Error handling
# How do we deal with the possibility of getting a user that doesn't exist?
# Will break our code! We can handle that by using a try statement!
try:
twitter_user = api.get_user(username)
# Where we decide whether or not to add or update.
# By prefacing code with a db, that means we're adding it to our database
# .get will be grabbing our twitter users by their id. If that user is in our database? Grab that user and assign it to db_user.
# If that user isn't in our database, it'll go with the second argument where it will CREATE a user
db_user = (User.query.get(twitter_user.id)) or User(
id=twitter_user.id, username=username)
DB.session.add(db_user)
# TODO: grab same number of tweets for each user. Use tweepy documentation to figure this out with pre-filtering to counter the current post-filtering we have written here
tweets = twitter_user.timeline(
count=2000,
exclude_replies=True,
include_rts=False,
# Returns everything about a tweet, including emojis or whatever else
tweet_mode="extended",
since_id=db_user.newest_tweet_id # This is where the updating happens
)
if tweets:
db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
# Run vectorize_tweet function
tweet_vector = vectorize_tweet(tweet.full_text)
# Creating a Tweet object to add to our DB
db_tweet = Tweet(
id=tweet.id, text=tweet.full_text, vect=tweet_vector)
# Connects the tweet to the user through this tweets list (user.tweets)
# SQLAlchemy will make that connection between user and tweets
db_user.tweets.append(db_tweet)
# Note: If we added before appending we would likely get an error
DB.session.add(db_tweet)
except Exception as e:
# This will be returned as the reason the non-existent user cannot be added
print(f"Error Processing {username}: {e}")
raise e
else:
DB.session.commit()
| 5,348,198 |
def transform(data):
"""Transform words and tags to ids
"""
new_data = []
unknown_word_count = 0
total_word_count = 0
for words, tags in data:
word_ids = [word_to_ix.get(w, word_to_ix[UNK]) for w in words]
tag_ids = [tag_to_ix.get(t) for t in tags]
new_data.append((word_ids, tag_ids))
# count
total_word_count += len(words)
for w in words:
if w not in word_to_ix:
unknown_word_count += 1
unknown_proportion = unknown_word_count / total_word_count
return new_data, unknown_proportion
| 5,348,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.