content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def _make_relative_path(base_path, full_path):
"""
Strip out the base_path from full_path and make it relative.
"""
flask.current_app.logger.debug(
'got base_path: %s and full_path: %s' % (base_path, full_path))
if base_path in full_path:
# Get the common prefix
common_prefix =\
os.path.commonprefix([base_path, full_path])
rel_path = full_path[len(common_prefix):]
# Remove '/' from the beginning
if os.path.isabs(rel_path):
rel_path = rel_path[1:]
return rel_path
| 5,346,900 |
def md5_hash_file(fh):
"""Return the md5 hash of the given file-object"""
md5 = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
md5.update(data)
return md5.hexdigest()
| 5,346,901 |
def rule(request):
"""Administration rule content"""
if request.user.is_authenticated():
return helpers.render_page('rule.html', show_descriptions=True)
else:
return redirect('ahmia.views_admin.login')
| 5,346,902 |
def get_tenant_info(schema_name):
"""
get_tenant_info return the first tenant object by schema_name
"""
with schema_context(schema_name):
return Pharmacy.objects.filter(schema_name=schema_name).first()
| 5,346,903 |
def get_lr(curr_epoch, hparams, iteration=None):
"""Returns the learning rate during training based on the current epoch."""
assert iteration is not None
batches_per_epoch = int(hparams.train_size / hparams.batch_size)
if 'svhn' in hparams.dataset and 'wrn' in hparams.model_name:
lr = step_lr(hparams.lr, curr_epoch)
elif 'cifar' in hparams.dataset or ('svhn' in hparams.dataset and
'shake_shake' in hparams.model_name):
lr = cosine_lr(hparams.lr, curr_epoch, iteration, batches_per_epoch,
hparams.num_epochs)
else:
lr = hparams.lr
tf.logging.log_first_n(tf.logging.WARN, 'Default not changing learning rate.', 1)
return lr
| 5,346,904 |
def delete_map():
"""
Delete all maps
Returns
-------
(response, status_code): (dict, int)
The response is a dictionary with the keys -> status, message and
result.
The status is a bool that says if the operation was successful.
The message is a str with comments for the user.
The result is an empty list.
The status_code is always 204 (deleted).
"""
# Delete all metadata_map figures
# Loop Through the folder projects all files and deleting them one by one
for file_map in glob.glob(f'{fig_folder}/metadata_map*'):
os.remove(file_map)
response = {
'status': True,
'message': 'All maps deleted',
'result': []
}
status_code = 204
return response, status_code
| 5,346,905 |
def test_building_scenarioloop_scenarios(mocker):
"""
Test building Scenarios from a Scenario Loop
"""
# given
scenario_loop = ScenarioLoop(1, 'Scenario Loop', 'Iterations', 'I am a Scenario Loop', 'foo.feature', 1, parent=None,
tags=None, preconditions=None, background=None)
# add steps
scenario_loop.steps.extend([
mocker.MagicMock(sentence='Given I have 1', path='foo.feature'),
mocker.MagicMock(sentence='And I have 2', path='foo.feature'),
mocker.MagicMock(sentence='When I add those', path='foo.feature')
])
# set iterations
scenario_loop.iterations = 2
# when - build the scenarios
scenario_loop.build_scenarios()
# then - expect 2 built Scenarios
assert len(scenario_loop.scenarios) == 2
# then - expect that Scenarios are of type ExampleScenario
assert all(isinstance(x, IterationScenario) for x in scenario_loop.scenarios)
# then - expect correct Example Scenario sentences
assert scenario_loop.scenarios[0].sentence == 'I am a Scenario Loop - iteration 0'
assert scenario_loop.scenarios[1].sentence == 'I am a Scenario Loop - iteration 1'
# then - expect correctly replaced Step sentences
assert scenario_loop.scenarios[0].steps[0].sentence == 'Given I have 1'
assert scenario_loop.scenarios[0].steps[1].sentence == 'And I have 2'
assert scenario_loop.scenarios[0].steps[2].sentence == 'When I add those'
assert scenario_loop.scenarios[1].steps[0].sentence == 'Given I have 1'
assert scenario_loop.scenarios[1].steps[1].sentence == 'And I have 2'
assert scenario_loop.scenarios[1].steps[2].sentence == 'When I add those'
| 5,346,906 |
def delete_page(shortname):
"""Delete page from the database."""
# Check page existency
if get_page(shortname) is None:
abort(404)
if shortname is None:
flash("No parameters for page deletion!")
return redirect(url_for("admin"))
else:
query_db("DELETE FROM pages WHERE shortname = ?", (shortname,))
commit_db()
flash("Page '" + shortname + "' deleted!")
return redirect(url_for("admin"))
| 5,346,907 |
def has_property(name, match=None):
"""Matches if object has a property with a given name whose value satisfies
a given matcher.
:param name: The name of the property.
:param match: Optional matcher to satisfy.
This matcher determines if the evaluated object has a property with a given
name. If no such property is found, ``has_property`` is not satisfied.
If the property is found, its value is passed to a given matcher for
evaluation. If the ``match`` argument is not a matcher, it is implicitly
wrapped in an :py:func:`~hamcrest.core.core.isequal.equal_to` matcher to
check for equality.
If the ``match`` argument is not provided, the
:py:func:`~hamcrest.core.core.isanything.anything` matcher is used so that
``has_property`` is satisfied if a matching property is found.
Examples::
has_property('name', starts_with('J'))
has_property('name', 'Jon')
has_property('name')
"""
if match is None:
match = anything()
return IsObjectWithProperty(name, wrap_shortcut(match))
| 5,346,908 |
def gain_stability_task(run, det_name, fe55_files):
"""
This task fits the Fe55 clusters to the cluster data from each frame
sequence and writes a pickle file with the gains as a function of
sequence number and MJD-OBS.
Parameters
----------
run: str
Run number.
det_name: str
Sensor name in the focal plane, e.g., 'R22_S11'.
fe55_files: list
Raw Fe55 for the sensor being consider. The MJD-OBS values
will be extracted from these files.
Returns:
(pandas.DataFrame, str), i.e., a tuple of the data frame containing
the gain sequence and the file name of the output pickle file.
"""
file_prefix = make_file_prefix(run, det_name)
# Extract MJD-OBS values into a dict to provide look up table in
# case there are missing sequence frames in the psf results table.
mjd_obs = dict()
for item in fe55_files:
with fits.open(item) as hdus:
mjd_obs[hdus[0].header['SEQNUM']] = hdus[0].header['MJD-OBS']
psf_results_file = sorted(glob.glob(f'{file_prefix}_psf_results*.fits'))[0]
try:
df = sensorTest.gain_sequence(det_name, psf_results_file)
except ValueError as eobj:
print("ValueError in gain_stability_task:", eobj)
return None
df['mjd'] = [mjd_obs[seqnum] for seqnum in df['seqnum']]
outfile = f'{file_prefix}_gain_sequence.pickle'
df.to_pickle(outfile)
return df, outfile
| 5,346,909 |
def get_datetime(timestamp):
"""Parse several representations of time into a datetime object"""
if isinstance(timestamp, datetime.datetime):
# Timestamp is already a datetime object.
return timestamp
elif isinstance(timestamp, (int, float)):
try:
# Handle Unix timestamps.
return datetime.datetime.fromtimestamp(timestamp)
except ValueError:
pass
try:
# Handle Unix timestamps in milliseconds.
return datetime.datetime.fromtimestamp(timestamp / 1000)
except ValueError:
pass
elif isinstance(timestamp, string_types):
try:
timestamp = float(timestamp)
except (ValueError, TypeError):
pass
else:
# Timestamp is probably Unix timestamp given as string.
return get_datetime(timestamp)
try:
# Try to parse as string date in common formats.
return iso8601.parse_date(timestamp)
except:
pass
# Fuck this shit.
raise ValueError("Couldn't extract date object from %r" % timestamp)
| 5,346,910 |
def conda_installed_files(prefix, exclude_self_build=False):
"""
Return the set of files which have been installed (using conda) into
a given prefix.
"""
res = set()
for dist in install.linked(prefix):
meta = install.is_linked(prefix, dist)
if exclude_self_build and 'file_hash' in meta:
continue
res.update(set(meta['files']))
return res
| 5,346,911 |
def computeStarsItembased(corated, target_bid, model):
"""
corated - {bid: star, ...}
"""
if corated == None:
return None
corated.pop(target_bid, None)
bid_cor = list(corated.keys())
collect = []
for b in bid_cor:
pair = None
if b < target_bid:
pair = (b, target_bid)
else:
pair = (target_bid, b)
# if b == target_bid:
# print('same:', pair)
w = model.get(pair)
if w != None:
# pair may not have a value in the model
# when b == target_bid, pair have no value, too
collect.append((pair, w, b))
# else:
# collect.append((pair, 0, b))
# print(collect)
collect.sort(key=lambda x: x[1], reverse=True)
neighbors = collect[:N_NEIGHBORS_ITEMBASED]
sum_w = 0
n = 0
for p, w, b in neighbors:
star = corated[b]
n += star * w
sum_w += w
if sum_w == 0:
return None
else:
return n / sum_w
| 5,346,912 |
def print_json_errors(res):
"""
from a dimcli.DslDataset object, print out an errors summary
"""
if "errors" in res.json.keys():
if "query" in res.json["errors"]:
print(res.json["errors"]["query"]["header"].strip("\n"))
for key in res.json["errors"]["query"]["details"]:
print(key)
else:
print(res.json["errors"])
| 5,346,913 |
def main():
"""
This script will create a new fabric at the site specified in the param provided.
"""
# logging, debug level, to file {application_run.log}
logging.basicConfig(
filename='application_run.log',
level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
current_time = str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print('\nCreate Fabric App Start, ', current_time)
with open('fabric_operations.yml', 'r') as file:
project_data = yaml.safe_load(file)
print('\n\nProject Details:\n')
pprint(project_data)
# parse the input data
area_name = project_data['area_info']['name']
area_hierarchy = project_data['area_info']['hierarchy']
building_name = project_data['building_info']['name']
building_address = project_data['building_info']['address']
building_lat = project_data['building_info']['lat']
building_long = project_data['building_info']['long']
floor_name = project_data['floor_info']['name']
floor_number = project_data['floor_info']['number']
floor_rf_model = project_data['floor_info']['rf_model']
floor_width = project_data['floor_info']['width']
floor_length = project_data['floor_info']['length']
floor_height = project_data['floor_info']['height']
site_hierarchy = 'Global/' + area_name + '/' + building_name + '/' + floor_name
dhcp_server = project_data['network_settings']['dhcp_server']
dns_server = project_data['network_settings']['dns_server']
syslog_server = project_data['network_settings']['syslog_server']
ntp_server = project_data['network_settings']['ntp_server']
device_ips = project_data['devices_info']['device_ips']
ip_pool_name = project_data['ip_pool']['name']
ip_pool_type = project_data['ip_pool']['type']
ip_pool_cidr = project_data['ip_pool']['subnet']
ip_pool_gateway = project_data['ip_pool']['gateway']
ip_pool_dhcp_server = project_data['ip_pool']['dhcp_server']
ip_pool_dns_server = project_data['ip_pool']['dns_server']
ip_pool_address_space = project_data['ip_pool']['address_family']
ip_sub_pool_name = project_data['ip_sub_pool']['name']
ip_sub_pool_type = project_data['ip_sub_pool']['type']
ip_sub_pool_cidr = project_data['ip_sub_pool']['subnet']
ip_sub_pool_gateway = project_data['ip_sub_pool']['gateway']
ip_sub_pool_dhcp_server = project_data['ip_sub_pool']['dhcp_server']
ip_sub_pool_dns_server = project_data['ip_sub_pool']['dns_server']
ip_sub_pool_address_space = project_data['ip_sub_pool']['address_family']
ip_transit_pool_name = project_data['ip_transit_pool']['name']
ip_transit_pool_type = project_data['ip_transit_pool']['type']
ip_transit_pool_cidr = project_data['ip_transit_pool']['subnet']
ip_transit_pool_gateway = project_data['ip_transit_pool']['gateway']
ip_transit_pool_dhcp_server = project_data['ip_transit_pool']['dhcp_server']
ip_transit_pool_address_space = project_data['ip_transit_pool']['address_family']
l3_vn_name = project_data['l3_vn']['name']
border_device_ip = project_data['border_devices']['ip'][0]
routing_protocol = project_data['border_devices']['routing_protocol']
internal_bpg_as = str(project_data['border_devices']['internal_bgp_as'])
external_bpg_as = str(project_data['border_devices']['external_bgp_as'])
external_interface_name = project_data['border_devices']['external_interface']
transit_network = project_data['border_devices']['transit_network']
transit_vlan = str(project_data['border_devices']['transit_vlan'])
control_plane_device_ips = project_data['control_plane_devices']['ip']
edge_device_ips = project_data['edge_devices']['ip']
default_auth_profile = project_data['auth_profile']['name']
# Create a DNACenterAPI "Connection Object"
dnac_api = DNACenterAPI(username=DNAC_USER, password=DNAC_PASS, base_url=DNAC_URL, version='2.2.2.3', verify=False)
# get Cisco DNA Center Auth token
dnac_auth = get_dnac_token(DNAC_AUTH)
# create a new area
print('\nCreating a new area:', area_name)
area_payload = {
"type": "area",
"site": {
"area": {
"name": area_name,
"parentName": area_hierarchy
}
}
}
response = dnac_api.sites.create_site(payload=area_payload)
time_sleep(10)
# create a new building
print('\n\nCreating a new building:', building_name)
building_payload = {
'type': 'building',
'site': {
'building': {
'name': building_name,
'parentName': 'Global/' + area_name,
'address': building_address,
'latitude': building_lat,
'longitude': building_long
}
}
}
response = dnac_api.sites.create_site(payload=building_payload)
print(response.text)
time_sleep(10)
# create a new floor
print('\n\nCreating a new floor:', floor_name)
floor_payload = {
'type': 'floor',
'site': {
'floor': {
'name': floor_name,
'parentName': 'Global/' + area_name + '/' + building_name,
'height': floor_height,
'length': floor_length,
'width': floor_width,
'rfModel': floor_rf_model
}
}
}
response = dnac_api.sites.create_site(payload=floor_payload)
time_sleep(10)
# create site network settings
network_settings_payload = {
'settings': {
'dhcpServer': [
dhcp_server
],
'dnsServer': {
'domainName': '',
'primaryIpAddress': dns_server,
},
'syslogServer': {
'ipAddresses': [
syslog_server
],
'configureDnacIP': True
},
'ntpServer': [
ntp_server
]
}
}
# get the site_id
print('\n\nConfiguring Network Settings:')
pprint(project_data['network_settings'])
response = dnac_api.sites.get_site(name=site_hierarchy)
site_id = response['response'][0]['id']
response = dnac_api.network_settings.create_network(site_id=site_id, payload=network_settings_payload)
time_sleep(10)
# add devices to inventory
print('\n\nAdding devices to inventory: ')
for ip_address in device_ips:
add_device_payload = {
"cliTransport": "ssh",
"enablePassword": "apiuser123!",
"ipAddress": [
ip_address
],
"password": "apiuser123!",
"snmpRWCommunity": "wr!t3",
"snmpVersion": "v2",
"userName": "dnacenter"
}
response = dnac_api.devices.add_device(payload=add_device_payload)
time.sleep(5)
time_sleep(120)
# add devices to site
print('\n\nAssigning devices to site:', site_hierarchy)
for ip_address in device_ips:
assign_device_payload = {
'device': [
{
'ip': ip_address
}
]
}
response = dnac_api.sites.assign_device_to_site(site_id=site_id, payload=assign_device_payload)
time_sleep(60)
# create a new Global Pool
print('\n\nCreating the Global Pool: ', ip_pool_name)
global_pool_payload = {
'settings': {
'ippool': [
{
'ipPoolName': ip_pool_name,
'type': ip_pool_type,
'ipPoolCidr': ip_pool_cidr,
'gateway': ip_pool_gateway,
'dhcpServerIps': [
ip_pool_dhcp_server
],
'dnsServerIps': [
ip_pool_dns_server
],
'IpAddressSpace': ip_pool_address_space
}
]
}
}
response = dnac_api.network_settings.create_global_pool(payload=global_pool_payload)
time_sleep(10)
# create an IP sub_pool for site_hierarchy
ip_sub_pool_subnet = ip_sub_pool_cidr.split('/')[0]
ip_sub_pool_mask = int(ip_sub_pool_cidr.split('/')[1])
print('\n\nCreating the IP subpool: ', ip_pool_cidr)
sub_pool_payload = {
'name': ip_sub_pool_name,
'type': ip_sub_pool_type,
'ipv4GlobalPool': ip_pool_cidr,
'ipv4Prefix': True,
'ipv6AddressSpace': False,
'ipv4PrefixLength': ip_sub_pool_mask,
'ipv4Subnet': ip_sub_pool_subnet,
'ipv4GateWay': ip_sub_pool_gateway,
'ipv4DhcpServers': [
ip_sub_pool_dhcp_server
],
'ipv4DnsServers': [
ip_sub_pool_dns_server
],
'ipv6Prefix': True,
'ipv6GlobalPool': '2001:2021::1000/64',
'ipv6PrefixLength': 96,
'ipv6Subnet': '2001:2021::1000'
}
response = dnac_api.network_settings.reserve_ip_subpool(site_id=site_id, payload=sub_pool_payload)
time_sleep(10)
# create an IP transit pool for site_hierarchy
print('\n\nCreating the IP transit pool: ', ip_transit_pool_cidr)
ip_transit_pool_subnet = ip_transit_pool_cidr.split('/')[0]
ip_transit_pool_mask = int(ip_transit_pool_cidr.split('/')[1])
transit_pool_payload = {
'name': ip_transit_pool_name,
'type': ip_transit_pool_type,
'ipv4GlobalPool': ip_pool_cidr,
'ipv4Prefix': True,
'ipv6AddressSpace': False,
'ipv4PrefixLength': ip_transit_pool_mask,
'ipv4Subnet': ip_transit_pool_subnet,
'ipv4GateWay': ip_transit_pool_gateway,
'ipv4DhcpServers': [
ip_transit_pool_dhcp_server
],
'ipv6Prefix': True,
'ipv6GlobalPool': '2001:2021::1000/64',
'ipv6PrefixLength': 96,
'ipv6Subnet': '2001:2021::1000'
}
response = dnac_api.network_settings.reserve_ip_subpool(site_id=site_id, payload=transit_pool_payload)
time_sleep(10)
# create a new fabric at site
print('\n\nCreating new fabric at site:', site_hierarchy)
response = create_fabric_site(site_hierarchy, dnac_auth)
time_sleep(15)
# provision devices
print('\n\nProvisioning devices to site:', site_hierarchy)
for ip_address in device_ips:
response = provision_device(ip_address, site_hierarchy, dnac_auth)
time_sleep(120)
# create L3 VN at global level
print('\n\nCreating new L3 Virtual Network: ', l3_vn_name)
l3_vn_payload = {
'virtualNetworkName': l3_vn_name,
"isGuestVirtualNetwork": False,
}
response = dnac_api.sda.add_virtual_network_with_scalable_groups(payload=l3_vn_payload)
time_sleep(5)
# assign Layer 3 VN to fabric
print('\n\nAssign L3 Virtual Network: ', l3_vn_name)
response = create_l3_vn(l3_vn_name, site_hierarchy, dnac_auth)
time_sleep(5)
# add auth profile to fabric
print('\n\nAdding default auth profile to fabric: ', default_auth_profile)
response = create_auth_profile(default_auth_profile, site_hierarchy, dnac_auth)
time_sleep(5)
# add control-plane node to fabric
print('\n\nAdding control-plane devices to fabric: ', control_plane_device_ips)
for device_ip in control_plane_device_ips:
response = add_control_plane_node(device_ip, site_hierarchy, dnac_auth)
time.sleep(2)
time_sleep(5)
# add border node to fabric
print('\n\nAdding a border node device: ', border_device_ip)
border_payload = {
'deviceManagementIpAddress': border_device_ip,
'siteNameHierarchy': site_hierarchy,
'externalDomainRoutingProtocolName': routing_protocol,
'externalConnectivityIpPoolName': ip_transit_pool_name,
'internalAutonomouSystemNumber': internal_bpg_as,
'borderSessionType': 'External',
'connectedToInternet': True,
'externalConnectivitySettings': [
{
'interfaceName': external_interface_name,
'externalAutonomouSystemNumber': external_bpg_as,
'l3Handoff': [
{
'virtualNetwork': {
'virtualNetworkName': l3_vn_name,
'vlanId': transit_vlan
}
}
]
}
]
}
response = add_border_device(border_payload, dnac_auth)
time_sleep(5)
# add edge devices to fabric
print('\n\nAdding edge devices to fabric: ', edge_device_ips)
for device_ip in edge_device_ips:
response = add_edge_device(device_ip, site_hierarchy, dnac_auth)
time.sleep(2)
time_sleep(5)
current_time = str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print('\n\nCreate Fabric App Run End, ', current_time)
| 5,346,914 |
def beacon(config):
"""
Monitor the memory usage of the minion
Specify thresholds for percent used and only emit a beacon
if it is exceeded.
.. code-block:: yaml
beacons:
memusage:
- percent: 63%
"""
ret = []
_config = {}
list(map(_config.update, config))
_current_usage = psutil.virtual_memory()
current_usage = _current_usage.percent
monitor_usage = _config["percent"]
if isinstance(monitor_usage, str) and "%" in monitor_usage:
monitor_usage = re.sub("%", "", monitor_usage)
monitor_usage = float(monitor_usage)
if current_usage >= monitor_usage:
ret.append({"memusage": current_usage})
return ret
| 5,346,915 |
def copy_params(config_file, new_dir):
"""
copy config_file to new location
"""
name = os.path.basename(config_file)
new_dir = os.path.join(new_dir, 'params', name)
shutil.copyfile(config_file, new_dir)
| 5,346,916 |
def dump_bdb(db_dump_name, repo_path, dump_dir):
"""Dump all the known BDB tables in the repository at REPO_PATH into a
single text file in DUMP_DIR. Omit any "next-key" records."""
dump_file = dump_dir + "/all.bdb"
file = open(dump_file, 'w')
for table in ['revisions', 'transactions', 'changes', 'copies', 'nodes',
'node-origins', 'representations', 'checksum-reps', 'strings',
'locks', 'lock-tokens', 'miscellaneous', 'uuids']:
file.write(table + ":\n")
next_key_line = False
for line in db_dump(db_dump_name, repo_path, table):
# Omit any 'next-key' line and the following line.
if next_key_line:
next_key_line = False
continue
if line == ' next-key\n':
next_key_line = True
continue
# The line isn't necessarily a skel, but pretty_print_skel() shouldn't
# do too much harm if it isn't.
file.write(pretty_print_skel(line))
file.write("\n")
file.close()
| 5,346,917 |
def write_config(data, path):
"""
Takes a dict data and writes it in json format to path.
Args:
data (dict[str: dict[str: str]]): The data to be written to path
path: (PosxPath): Path to the file data will be written to
"""
with open(path, "w+") as file:
json.dump(data, indent=4, sort_keys=True, fp=file)
| 5,346,918 |
def get_surrounding_points(search_values, point_set):
"""
#for each value p[i] in search_values, returns a pair of surrounding points from point_set
the surrounding points are a tuplet of the form (lb[i], ub[i]) where
- lb[i] < p[i] < ub[i] if p[i] is not in point_set, and p[i] is within range
- lb[i] == p[i] == ub[i] if p[i] in point_set, p[i] < min(point_set), p[i] > max(point_set)
:param search_values: set of points that need neighbors
:param point_set: set of points that need be sorted
:return: list of points in point_set that surround search_values
"""
# http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
upper_indices = np.searchsorted(point_set, search_values, side="left")
n_points = len(point_set)
n_search = len(search_values)
neighbors = []
for i in range(n_search):
idx = upper_indices[i]
val = search_values[i]
if idx == 0:
n = (point_set[0], point_set[0])
elif idx == n_points:
n = (point_set[-1], point_set[-1])
else:
n = (point_set[idx-1], point_set[idx])
neighbors.append(n)
return neighbors
| 5,346,919 |
def _scriptable_get(obj, name):
""" The getter for a scriptable trait. """
global _outermost_call
saved_outermost = _outermost_call
_outermost_call = False
try:
result = getattr(obj, '_' + name, None)
if result is None:
result = obj.trait(name).default
finally:
_outermost_call = saved_outermost
if saved_outermost:
get_script_manager().record_trait_get(obj, name, result)
return result
| 5,346,920 |
def Iq(q, intercept, slope):
"""
:param q: Input q-value
:param intercept: Intrecept in linear model
:param slope: Slope in linear model
:return: Calculated Intensity
"""
inten = intercept + slope*q
return inten
| 5,346,921 |
def read_sph(input_file_name, mode='p'):
"""
Read a SPHERE audio file
:param input_file_name: name of the file to read
:param mode: specifies the following (\* =default)
.. note::
- Scaling:
- 's' Auto scale to make data peak = +-1 (use with caution if reading in chunks)
- 'r' Raw unscaled data (integer values)
- 'p' Scaled to make +-1 equal full scale
- 'o' Scale to bin centre rather than bin edge (e.g. 127 rather than 127.5 for 8 bit values,
can be combined with n+p,r,s modes)
- 'n' Scale to negative peak rather than positive peak (e.g. 128.5 rather than 127.5 for 8 bit values,
can be combined with o+p,r,s modes)
- Format
- 'l' Little endian data (Intel,DEC) (overrides indication in file)
- 'b' Big endian data (non Intel/DEC) (overrides indication in file)
- File I/O
- 'f' Do not close file on exit
- 'd' Look in data directory: voicebox('dir_data')
- 'w' Also read the annotation file \*.wrd if present (as in TIMIT)
- 't' Also read the phonetic transcription file \*.phn if present (as in TIMIT)
- NMAX maximum number of samples to read (or -1 for unlimited [default])
- NSKIP number of samples to skip from start of file (or -1 to continue from previous read when FFX
is given instead of FILENAME [default])
:return: a tupple such that (Y, FS)
.. note::
- Y data matrix of dimension (samples,channels)
- FS sample frequency in Hz
- WRD{\*,2} cell array with word annotations: WRD{\*,:)={[t_start t_end],'text'} where times are in seconds
only present if 'w' option is given
- PHN{\*,2} cell array with phoneme annotations: PHN{\*,:)={[t_start t_end],'phoneme'} where times
are in seconds only present if 't' option is present
- FFX Cell array containing
1. filename
2. header information
1. first header field name
2. first header field value
3. format string (e.g. NIST_1A)
4.
1. file id
2. current position in file
3. dataoff byte offset in file to start of data
4. order byte order (l or b)
5. nsamp number of samples
6. number of channels
7. nbytes bytes per data value
8. bits number of bits of precision
9. fs sample frequency
10. min value
11. max value
12. coding 0=PCM,1=uLAW + 0=no compression, 0=shorten,20=wavpack,30=shortpack
13. file not yet decompressed
5. temporary filename
If no output parameters are specified,
header information will be printed.
The code to decode shorten-encoded files, is
not yet released with this toolkit.
"""
codings = dict([('pcm', 1), ('ulaw', 2)])
compressions = dict([(',embedded-shorten-', 1),
(',embedded-wavpack-', 2),
(',embedded-shortpack-', 3)])
byteorder = 'l'
endianess = dict([('l', '<'), ('b', '>')])
if not mode == 'p':
mode = [mode, 'p']
k = list((m >= 'p') & (m <= 's') for m in mode)
# scale to input limits not output limits
mno = all([m != 'o' for m in mode])
sc = ''
if k[0]:
sc = mode[0]
# Get byte order (little/big endian)
if any([m == 'l' for m in mode]):
byteorder = 'l'
elif any([m == 'b' for m in mode]):
byteorder = 'b'
ffx = ['', '', '', '', '']
if isinstance(input_file_name, str):
if os.path.exists(input_file_name):
fid = open(input_file_name, 'rb')
elif os.path.exists("".join((input_file_name, '.sph'))):
input_file_name = "".join((input_file_name, '.sph'))
fid = open(input_file_name, 'rb')
else:
raise Exception('Cannot find file {}'.format(input_file_name))
ffx[0] = input_file_name
elif not isinstance(input_file_name, str):
ffx = input_file_name
else:
fid = input_file_name
# Read the header
if ffx[3] == '':
fid.seek(0, 0) # go to the begining of the file
l1 = fid.readline().decode("utf-8")
l2 = fid.readline().decode("utf-8")
if not (l1 == 'NIST_1A\n') & (l2 == ' 1024\n'):
logging.warning('File does not begin with a SPHERE header')
ffx[2] = l1.rstrip()
hlen = int(l2[3:7])
hdr = {}
while True: # Read the header and fill a dictionary
st = fid.readline().decode("utf-8").rstrip()
if st[0] != ';':
elt = st.split(' ')
if elt[0] == 'end_head':
break
if elt[1][0] != '-':
logging.warning('Missing ''-'' in SPHERE header')
break
if elt[1][1] == 's':
hdr[elt[0]] = elt[2]
elif elt[1][1] == 'i':
hdr[elt[0]] = int(elt[2])
else:
hdr[elt[0]] = float(elt[2])
if 'sample_byte_format' in list(hdr.keys()):
if hdr['sample_byte_format'][0] == '0':
bord = 'l'
else:
bord = 'b'
if (bord != byteorder) & all([m != 'b' for m in mode]) \
& all([m != 'l' for m in mode]):
byteorder = bord
icode = 0 # Get encoding, default is PCM
if 'sample_coding' in list(hdr.keys()):
icode = -1 # unknown code
for coding in list(codings.keys()):
if hdr['sample_coding'].startswith(coding):
# is the signal compressed
# if len(hdr['sample_coding']) > codings[coding]:
if len(hdr['sample_coding']) > len(coding):
for compression in list(compressions.keys()):
if hdr['sample_coding'].endswith(compression):
icode = 10 * compressions[compression] \
+ codings[coding] - 1
break
else: # if the signal is not compressed
icode = codings[coding] - 1
break
# initialize info of the files with default values
info = [fid, 0, hlen, ord(byteorder), 0, 1, 2, 16, 1, 1, -1, icode]
# Get existing info from the header
if 'sample_count' in list(hdr.keys()):
info[4] = hdr['sample_count']
if not info[4]: # if no info sample_count or zero
# go to the end of the file
fid.seek(0, 2) # Go to te end of the file
# get the sample count
info[4] = int(math.floor((fid.tell() - info[2]) / (info[5] * info[6]))) # get the sample_count
if 'channel_count' in list(hdr.keys()):
info[5] = hdr['channel_count']
if 'sample_n_bytes' in list(hdr.keys()):
info[6] = hdr['sample_n_bytes']
if 'sample_sig_bits' in list(hdr.keys()):
info[7] = hdr['sample_sig_bits']
if 'sample_rate' in list(hdr.keys()):
info[8] = hdr['sample_rate']
if 'sample_min' in list(hdr.keys()):
info[9] = hdr['sample_min']
if 'sample_max' in list(hdr.keys()):
info[10] = hdr['sample_max']
ffx[1] = hdr
ffx[3] = info
info = ffx[3]
ksamples = info[4]
if ksamples > 0:
fid = info[0]
if (icode >= 10) & (ffx[4] == ''): # read compressed signal
# need to use a script with SHORTEN
raise Exception('compressed signal, need to unpack in a script with SHORTEN')
info[1] = ksamples
# use modes o and n to determine effective peak
pk = 2 ** (8 * info[6] - 1) * (1 + (float(mno) / 2 - int(all([m != 'b'
for m in
mode]))) / 2 **
info[7])
fid.seek(1024) # jump after the header
nsamples = info[5] * ksamples
if info[6] < 3:
if info[6] < 2:
logging.debug('Sphere i1 PCM')
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
if info[11] % 10 == 1:
if y.shape[0] % 2:
y = numpy.frombuffer(audioop.ulaw2lin(
numpy.concatenate((y, numpy.zeros(1, 'int8'))), 2),
numpy.int16)[:-1]/32768.
else:
y = numpy.frombuffer(audioop.ulaw2lin(y, 2), numpy.int16)/32768.
pk = 1.
else:
y = y - 128
else:
logging.debug('Sphere i2')
y = numpy.fromfile(fid, endianess[byteorder]+"i2", -1)
else: # non verifie
if info[6] < 4:
y = numpy.fromfile(fid, endianess[byteorder]+"i1", -1)
y = y.reshape(nsamples, 3).transpose()
y = (numpy.dot(numpy.array([1, 256, 65536]), y) - (numpy.dot(y[2, :], 2 ** (-7)).astype(int) * 2 ** 24))
else:
y = numpy.fromfile(fid, endianess[byteorder]+"i4", -1)
if sc != 'r':
if sc == 's':
if info[9] > info[10]:
info[9] = numpy.min(y)
info[10] = numpy.max(y)
sf = 1 / numpy.max(list(list(map(abs, info[9:11]))), axis=0)
else:
sf = 1 / pk
y = sf * y
if info[5] > 1:
y = y.reshape(ksamples, info[5])
else:
y = numpy.array([])
if mode != 'f':
fid.close()
info[0] = -1
if not ffx[4] == '':
pass # VERIFY SCRIPT, WHICH CASE IS HANDLED HERE
return y.astype(numpy.float32), int(info[8]), int(info[6])
| 5,346,922 |
def test_bulk_disable():
"""
Test for disable all the given workers in the specific load balancer
"""
with patch.object(modjk, "_do_http", return_value={"worker.result.type": "OK"}):
assert modjk.bulk_disable(["node1", "node2", "node3"], "loadbalancer1")
| 5,346,923 |
def unpack(path, catalog_name):
"""
Place a catalog configuration file in the user configuration area.
Parameters
----------
path: Path
Path to output from pack
catalog_name: Str
A unique name for the catalog
Returns
-------
config_path: Path
Location of new catalog configuration file
"""
# Validate user input.
if not os.path.isdir(path):
raise ValueError(f"{path} is not a directory")
source_catalog_file_path = pathlib.Path(path, "catalog.yml")
if not os.path.isfile(source_catalog_file_path):
raise ValueError(f"Cold not find 'catalog.yml' in {path}")
if catalog_name in databroker.utils.list_configs():
raise CatalogNameExists(catalog_name)
config_dir = databroker.catalog_search_path()[0]
with open(source_catalog_file_path) as file:
catalog = yaml.safe_load(file)
source = catalog["sources"].pop("packed_catalog")
# Handle temporary condition where 'pack' puts absolute paths in "args"
# and puts relative paths off to the side.
if any(pathlib.Path(p).is_absolute() for p in source["args"]["paths"]):
relative_paths = source["metadata"]["relative_paths"]
new_paths = [str(pathlib.Path(path, rel_path)) for rel_path in relative_paths]
source["args"]["paths"] = new_paths
# The root_map values may be relative inside a pack, given relative to the
# catalog file. Now that we are going to use a catalog file in a config
# directory, we need to make these paths absolute.
for k, v in source["args"].get("root_map", {}).items():
if not pathlib.Path(v).is_absolute():
source["args"]["root_map"][k] = str(pathlib.Path(path, v))
catalog["sources"][catalog_name] = source
config_filename = f"databroker_unpack_{catalog_name}.yml"
config_path = pathlib.Path(config_dir, config_filename)
os.makedirs(config_dir, exist_ok=True)
with open(config_path, "xt") as file:
yaml.dump(catalog, file)
return config_path
| 5,346,924 |
def psi4ToStrain(mp_psi4, f0):
"""
Convert the input mp_psi4 data to the strain of the gravitational wave
mp_psi4 = Weyl scalar result from simulation
f0 = cutoff frequency
return = strain (h) of the gravitational wave
"""
#TODO: Check for uniform spacing in time
t0 = mp_psi4[:, 0]
list_len = len(t0)
complexPsi = np.zeros(list_len, dtype=np.complex_)
complexPsi = mp_psi4[:, 1]+1.j*mp_psi4[:, 2]
freq, psif = myFourierTransform(t0, complexPsi)
dhf = ffi(freq, psif, f0)
hf = ffi(freq, dhf, f0)
time, h = myFourierTransformInverse(freq, hf, t0[0])
hTable = np.column_stack((time, h))
return hTable
| 5,346,925 |
def get_optimizer(library, solver):
"""Constructs Optimizer given and optimization library and optimization
solver specification"""
options = {
'maxiter': 100
}
if library == 'scipy':
optimizer = optimize.ScipyOptimizer(method=solver, options=options)
elif library == 'ipopt':
optimizer = optimize.IpoptOptimizer()
elif library == 'dlib':
optimizer = optimize.DlibOptimizer(options=options)
elif library == 'pyswarm':
optimizer = optimize.PyswarmOptimizer(options=options)
elif library == 'cmaes':
optimizer = optimize.CmaesOptimizer(options=options)
elif library == 'scipydiffevolopt':
optimizer = optimize.ScipyDifferentialEvolutionOptimizer(
options=options)
elif library == 'pyswarms':
optimizer = optimize.PyswarmsOptimizer(options=options)
elif library == 'nlopt':
optimizer = optimize.NLoptOptimizer(method=solver, options=options)
elif library == 'fides':
options[fides.Options.SUBSPACE_DIM] = solver[1]
optimizer = optimize.FidesOptimizer(options=options,
hessian_update=solver[0])
else:
raise ValueError(f"Optimizer not recognized: {library}")
return optimizer
| 5,346,926 |
def use_profile(profile_name):
"""Make Yolo use an AWS CLI named profile."""
client.YoloClient().use_profile(profile_name)
| 5,346,927 |
def _cast_query(query, col):
"""
ALlow different query types (e.g. numerical, list, str)
"""
query = query.strip()
if col in {"t", "d"}:
return query
if query.startswith("[") and query.endswith("]"):
if "," in query:
query = ",".split(query[1:-1])
return [i.strip() for i in query]
if query.isdigit():
return int(query)
try:
return float(query)
except Exception:
return query
| 5,346,928 |
def test_unpack_raw_package_input():
"""Test dissasembly of the user's raw package input."""
# test for sources with branch
source = "someuser/repository:devel_branch"
extras = "[pre-commit, testing, docs]"
full_input = source + extras
out_source, out_extras = utils.unpack_raw_package_input(full_input)
assert out_source == source
assert out_extras == extras
# test also for source without branch definition
source = "someuser/repository"
extras = "[pre-commit, testing, docs]"
full_input = source + extras
out_source, out_extras = utils.unpack_raw_package_input(full_input)
assert out_source == source
assert out_extras == extras
# check if no extras are given
source = "someuser/repository:devel_branch"
out_source, out_extras = utils.unpack_raw_package_input(source)
assert out_source == source
assert out_extras == ''
| 5,346,929 |
def binary_loss(pred_raw,
label_raw,
loss_func,
weight=None,
class_weight=None,
class_weight_norm=False,
reduction='mean',
avg_factor=None,
smooth=1.0):
"""
:param pred: [N, C, *] scores without softmax
:param label: [N, *] in [0, C], 0 stands for background, 1~C stands for pred in 0~C-1
:return: reduction([N])
"""
pred = pred_raw.clone()
label = label_raw.clone()
num_classes = pred.shape[1]
if class_weight is not None:
class_weight = class_weight.float()
if pred.shape != label.shape:
label = _make_one_hot(label, num_classes)
pred = torch.sigmoid(pred)
loss = 0.
for i in range(num_classes):
if isinstance(loss_func, tuple):
loss_function = loss_func[i]
else:
loss_function = loss_func
class_loss = loss_function(pred[:, i], label[:, i], smooth=smooth)
if class_weight is not None:
class_loss *= class_weight[i]
loss += class_loss
if class_weight is not None and class_weight_norm:
loss = loss / torch.sum(class_weight)
else:
loss = loss / num_classes
loss = weight_reduce_loss(loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
| 5,346,930 |
def picture_upload_to(instance, filename):
"""
Returns a unique filename for picture which is hard to guess.
Will use uuid.uuid4() the chances of collision are very very very low.
"""
ext = os.path.splitext(filename)[1].strip('.')
if not ext:
ext = 'jpg'
filename = '%s.%s' % (uuid.uuid4(), ext)
return os.path.join(UPLOAD_TO, filename)
| 5,346,931 |
def showerActivityModel(sol, flux_max, b, sol_max):
""" Activity model taken from: Jenniskens, P. (1994). Meteor stream activity I. The annual streams.
Astronomy and Astrophysics, 287., equation 8.
Arguments:
sol: [float] Solar longitude for which the activity is computed (radians).
flux_max: [float] Peak relative flux.
b: [float] Slope of the shower.
sol_max: [float] Solar longitude of the peak of the shower (radians).
"""
# Compute the flux at given solar longitude
flux = flux_max*10**(-b*np.degrees(np.abs(sol - sol_max)))
return flux
| 5,346,932 |
def create_directories(directory_name):
"""
Create directories
"""
# Create directory
try:
# Create target Directory
os.mkdir(directory_name)
logger.info("Directory %s Created", directory_name)
except FileExistsError:
logger.info("Directory %s already exists", directory_name)
exit(1)
subdirectory_name = os.path.join(directory_name, 'res')
# Create target directory & all intermediate directories if don't exists
try:
os.makedirs(subdirectory_name)
logger.info("Directory %s Created", subdirectory_name)
except FileExistsError:
logger.info("Directory %s already exists", subdirectory_name)
return subdirectory_name
| 5,346,933 |
def setup_loggers():
"""
Configure loggers.
"""
# Create logging path
if not os.path.isdir(Config.LOGGING_PATH):
os.makedirs(Config.LOGGING_PATH)
# Set log format
default_log_format = (
"[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d]"
" %(message)s"
)
log_date_format = "%Y-%m-%d %H:%M:%S"
# Set log level
log_level = getattr(logging, Config.LOG_LEVEL)
formatter = logging.Formatter(default_log_format)
# ----------------------------------------------------------------------- #
# Handlers
# ----------------------------------------------------------------------- #
# Console
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
# File
log_file_path = os.path.join(Config.LOGGING_PATH, Config.LOGGING_FILE)
file_handler = RotatingFileHandler(
log_file_path,
maxBytes=Config.LOGGING_FILE_MAX_BYTES,
backupCount=Config.LOGGING_FILE_BACKUP_COUNT,
)
file_handler.setFormatter(formatter)
# CSV
csv_log_format = (
"%(asctime)s,%(levelname)s,%(name)s,"
"%(funcName)s,%(lineno)d,%(message)s"
)
csv_log_file_path = os.path.join(Config.CSV_LOGGING_PATH, "log.csv")
csv_log_header = [
"asctime",
"levelname",
"name",
"funcName",
"lineno",
"message",
]
csv_handler = CsvRotatingFileHandler(
csv_log_format,
log_date_format,
csv_log_file_path,
Config.LOGGING_FILE_MAX_BYTES,
Config.LOGGING_FILE_BACKUP_COUNT,
csv_log_header,
)
# ----------------------------------------------------------------------- #
# Loggers
# ----------------------------------------------------------------------- #
handlers = [stream_handler, file_handler, csv_handler]
set_logger_config("workflower", log_level, handlers)
| 5,346,934 |
def new_update_template(args):
"""
Command line function that creates or updates an assignment template
repository. Implementation of
both the new_template and update_template console scripts (which perform
the same basic functions but with different command line arguments and
defaults).
Creates an assignment entry in the config file if one does not already
exist.
Parameters
----------
args : command line arguments
"""
try:
create_template(
mode=args.mode,
push_to_github=args.github,
custom_message=args.custom_message,
assignment_name=args.assignment,
)
except FileNotFoundError as fnfe:
# if the assignment does not exist in course_materials/release
print(fnfe)
except FileExistsError as fee:
# if mode = fail and assignment repository already exists
print(fee)
| 5,346,935 |
def download_azure_storage_blob_file(
file_name,
container_name,
connection_string,
destination_file_name=None):
"""
Download a selected file from Google Cloud Storage to local storage in
the current working directory.
"""
local_path = os.path.normpath(f'{os.getcwd()}/{destination_file_name}')
blob = BlobClient.from_connection_string(
conn_str=connection_string,
container_name=container_name,
blob_name=file_name)
with open(local_path, 'wb') as new_blob:
blob_data = blob.download_blob()
blob_data.readinto(new_blob)
print(f'{container_name}/{file_name} successfully downloaded to {local_path}')
return
| 5,346,936 |
def test_from_int_error():
"""Verify from_int error
"""
x = uut.FixedPoint(1)
# Verify that passing in something other than an int throws an error
errmsg = re.escape(f'Expected {type(1)}; got {type(13.0)}.')
with nose.tools.assert_raises_regex(TypeError, errmsg):
x.from_int(13.0)
| 5,346,937 |
def request_slow_log(db_cluster_id, start_datetime, end_datetime, page_number, page_size):
"""
请求慢SQL日志
:param db_cluster_id:
:param start_datetime:
:param end_datetime:
:param page_number:
:param page_size:
:return:
"""
request = DescribeSlowLogRecordsRequest()
request.set_accept_format('json')
request.set_DBClusterId(db_cluster_id)
# 格式化前一天的日期
request.set_StartTime(start_datetime)
request.set_EndTime(end_datetime)
request.set_PageNumber(page_number)
request.set_PageSize(page_size)
response = client.do_action_with_exception(request)
response = str(response, encoding='utf-8')
resp_result = json.loads(response)
return resp_result
| 5,346,938 |
def pad_omni_image(image, pad_size, image_dims=None):
"""Pad an omni-directional image with the correct image wrapping at the edges.
Parameters
----------
image
Image to perform the padding on *[batch_shape,h,w,d]*
pad_size
Number of pixels to pad.
image_dims
Image dimensions. Inferred from Inputs if None. (Default value = None)
Returns
-------
ret
New padded omni-directional image *[batch_shape,h+ps,w+ps,d]*
"""
if image_dims is None:
image_dims = image.shape[-3:-1]
# BS x PS x W/2 x D
top_left = image[..., 0:pad_size, int(image_dims[1] / 2):, :]
top_right = image[..., 0:pad_size, 0:int(image_dims[1] / 2), :]
# BS x PS x W x D
top_border = _ivy.flip(_ivy.concatenate((top_left, top_right), -2), -3)
# BS x PS x W/2 x D
bottom_left = image[..., -pad_size:, int(image_dims[1] / 2):, :]
bottom_right = image[..., -pad_size:, 0:int(image_dims[1] / 2), :]
# BS x PS x W x D
bottom_border = _ivy.flip(_ivy.concatenate((bottom_left, bottom_right), -2), -3)
# BS x H+2PS x W x D
image_expanded = _ivy.concatenate((top_border, image, bottom_border), -3)
# BS x H+2PS x PS x D
left_border = image_expanded[..., -pad_size:, :]
right_border = image_expanded[..., 0:pad_size, :]
# BS x H+2PS x W+2PS x D
return _ivy.concatenate((left_border, image_expanded, right_border), -2)
| 5,346,939 |
def add_film(
film: FilmCreate,
db: Session = Depends(get_db),
user: User = Depends(get_current_user),
) -> Any:
"""
Add new film
"""
if not user.role.can_add_films:
raise ForbiddenAction
db_film = db.query(Film).filter(Film.name == film.name).first()
if db_film is not None:
raise FilmAlreadyExists
db_film = Film(
name=film.name,
released_year=film.released_year,
owner_user=user,
)
db.add(db_film)
db.flush()
film_model = FilmGet.from_orm(db_film)
db.commit()
return {
'status': 'ok',
'data': film_model,
}
| 5,346,940 |
def load_cube_file(lines, target_mode=None, cls=ImageFilter.Color3DLUT):
"""Loads 3D lookup table from .cube file format.
:param lines: Filename or iterable list of strings with file content.
:param target_mode: Image mode which should be after color transformation.
The default is None, which means mode doesn't change.
:param cls: A class which handles the parsed file.
Default is ``ImageFilter.Color3DLUT``.
"""
name, size = None, None
channels = 3
file = None
lines = open(lines, 'rt')
try:
iterator = iter(lines)
for i, line in enumerate(iterator, 1):
line = line.strip()
if line.startswith('TITLE "'):
name = line.split('"')[1]
continue
if line.startswith('LUT_3D_SIZE '):
size = [int(x) for x in line.split()[1:]]
if len(size) == 1:
size = size[0]
continue
if line.startswith('CHANNELS '):
channels = int(line.split()[1])
if line.startswith('LUT_1D_SIZE '):
raise ValueError("1D LUT cube files aren't supported")
try:
float(line.partition(' ')[0])
except ValueError:
pass
else:
# Data starts
break
if size is None:
raise ValueError('No size found in the file')
table = []
for i, line in enumerate(chain([line], iterator), i):
line = line.strip()
if not line or line.startswith('#'):
continue
try:
pixel = [float(x) for x in line.split()]
except ValueError:
raise ValueError("Not a number on line {}".format(i))
if len(pixel) != channels:
raise ValueError(
"Wrong number of colors on line {}".format(i))
table.extend(pixel)
finally:
if file is not None:
file.close()
instance = cls(size, table, channels=channels,
target_mode=target_mode, _copy_table=False)
if name is not None:
instance.name = name
return instance
| 5,346,941 |
def vector_to_Hermitian(vec):
"""Construct a Hermitian matrix from a vector of N**2 independent
real-valued elements.
Args:
vec (torch.Tensor): (..., N ** 2)
Returns:
mat (ComplexTensor): (..., N, N)
""" # noqa: H405, D205, D400
N = int(np.sqrt(vec.shape[-1]))
mat = torch.zeros(size=vec.shape[:-1] + (N, N, 2), device=vec.device)
# real component
triu = np.triu_indices(N, 0)
triu2 = np.triu_indices(N, 1) # above main diagonal
tril = (triu2[1], triu2[0]) # below main diagonal; for symmetry
mat[(...,) + triu + (np.zeros(triu[0].shape[0]),)] = vec[..., : triu[0].shape[0]]
start = triu[0].shape[0]
mat[(...,) + tril + (np.zeros(tril[0].shape[0]),)] = mat[
(...,) + triu2 + (np.zeros(triu2[0].shape[0]),)
]
# imaginary component
mat[(...,) + triu2 + (np.ones(triu2[0].shape[0]),)] = vec[
..., start : start + triu2[0].shape[0]
]
mat[(...,) + tril + (np.ones(tril[0].shape[0]),)] = -mat[
(...,) + triu2 + (np.ones(triu2[0].shape[0]),)
]
return ComplexTensor(mat[..., 0], mat[..., 1])
| 5,346,942 |
def new_recipe(argv):
"""Makes a new recipe template"""
verb = argv[1]
parser = gen_common_parser()
parser.set_usage(
f"Usage: %prog {verb} [options] recipe_pathname\n" "Make a new template recipe."
)
# Parse arguments
parser.add_option("-i", "--identifier", help="Recipe identifier")
parser.add_option(
"-p", "--parent-identifier", help="Parent recipe identifier for this recipe."
)
parser.add_option(
"--format",
action="store",
default="plist",
help=(
"The format of the new recipe to be created. "
"Valid options include: 'plist' (default) or 'yaml'"
),
)
(options, arguments) = common_parse(parser, argv)
if len(arguments) != 1:
log_err("Must specify exactly one recipe pathname!")
log_err(parser.get_usage())
return -1
filename = arguments[0]
name = os.path.basename(filename).split(".")[0]
identifier = options.identifier or "local." + name
recipe = {
"Description": "Recipe description",
"Identifier": identifier,
"Input": {"NAME": name},
"MinimumVersion": "1.0",
"Process": [
{
"Arguments": {"Argument1": "Value1", "Argument2": "Value2"},
"Processor": "ProcessorName",
}
],
}
if options.parent_identifier:
recipe["ParentRecipe"] = options.parent_identifier
try:
if options.format == "yaml" or filename.endswith(".recipe.yaml"):
# Yaml recipes require AutoPkg 2.3 or later.
recipe["MinimumVersion"] = "2.3"
with open(filename, "wb") as f:
yaml.dump(recipe, f, encoding="utf-8")
else:
with open(filename, "wb") as f:
plistlib.dump(recipe, f)
log(f"Saved new recipe to {filename}")
except Exception as err:
log_err(f"Failed to write recipe: {err}")
| 5,346,943 |
def view_payment(request):
""" A view that renders the payment page template """
user = request.user
# Check if user has already paid and redirect them to definitions app.
if user.has_perm('definitionssoftware.access_paid_definitions_app'):
return redirect(reverse('view_definitionssoftware'))
# Get stripe environment variables
stripe_public_key = settings.STRIPE_PUBLIC_KEY
stripe_secret_key = settings.STRIPE_SECRET_KEY
if request.method == 'POST':
request.session['payment_successful'] = True
return redirect(reverse('payment_success'))
# Create Stripe Payment Intent
stripe_total = 2500
stripe.api_key = stripe_secret_key
intent = stripe.PaymentIntent.create(
amount=stripe_total,
currency=settings.STRIPE_CURRENCY,
)
print(intent)
if not stripe_public_key:
messages.warning(request, 'Stripe public key is missing. \
Did you forget to set it in your environment?')
template = 'payment/payment.html'
context = {
'stripe_public_key': stripe_public_key,
'client_secret': intent.client_secret,
}
return render(request, template, context)
| 5,346,944 |
def augment_bag(store, bag, username=None):
"""
Augment a bag object with information about it's policy type.
"""
if not bag.store:
bag = store.get(bag)
if not username:
username = bag.policy.owner
policy_type = determine_tank_type(bag, username)
bag.icon = POLICY_ICONS[policy_type]
bag.type = policy_type
return bag
| 5,346,945 |
async def test_login_refresh_token_row_na_401():
"""Test the login flow using refresh_token."""
with account_mock() as mock_api:
account = MyBMWAccount(TEST_USERNAME, TEST_PASSWORD, get_region_from_name(TEST_REGION_STRING))
await account.get_vehicles()
with mock.patch(
"bimmer_connected.api.authentication.MyBMWAuthentication._refresh_token_row_na",
wraps=account.mybmw_client_config.authentication._refresh_token_row_na, # pylint: disable=protected-access
) as mock_listener:
mock_api.get("/eadrax-vcs/v1/vehicles").mock(
side_effect=[httpx.Response(401), *([httpx.Response(200, json=[])] * 10)]
)
mock_listener.reset_mock()
await account.get_vehicles()
assert mock_listener.call_count == 1
assert account.mybmw_client_config.authentication.refresh_token is not None
| 5,346,946 |
async def test_track_template_rate_limit(hass):
"""Test template rate limit."""
template_refresh = Template("{{ states | count }}", hass)
refresh_runs = []
@ha.callback
def refresh_listener(event, updates):
refresh_runs.append(updates.pop().result)
info = async_track_template_result(
hass,
[TrackTemplate(template_refresh, None, timedelta(seconds=0.1))],
refresh_listener,
)
await hass.async_block_till_done()
info.async_refresh()
await hass.async_block_till_done()
assert refresh_runs == [0]
hass.states.async_set("sensor.one", "any")
await hass.async_block_till_done()
assert refresh_runs == [0]
info.async_refresh()
assert refresh_runs == [0, 1]
hass.states.async_set("sensor.two", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1]
next_time = dt_util.utcnow() + timedelta(seconds=0.125)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2]
hass.states.async_set("sensor.three", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2]
hass.states.async_set("sensor.four", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2]
next_time = dt_util.utcnow() + timedelta(seconds=0.125 * 2)
with patch(
"homeassistant.helpers.ratelimit.dt_util.utcnow", return_value=next_time
):
async_fire_time_changed(hass, next_time)
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2, 4]
hass.states.async_set("sensor.five", "any")
await hass.async_block_till_done()
assert refresh_runs == [0, 1, 2, 4]
| 5,346,947 |
def clear_pristine_meshes() -> None:
"""Clear the pristine bmesh data."""
global pristine_meshes
for bm in pristine_meshes.values():
bm.free()
pristine_meshes.clear()
| 5,346,948 |
def province_id_to_home_sc_power() -> Dict[utils.ProvinceID, int]:
"""Which power is this a home sc for?"""
content = get_mdf_content(MapMDF.STANDARD_MAP)
home_sc_line = content.splitlines()[2]
tag_to_id = _tag_to_id(get_mdf_content(MapMDF.STANDARD_MAP))
# Assume powers are ordered correctly
id_to_power = {}
power = -1
words = str(home_sc_line).split(' ')
for w in words:
if w in ['(', ')']:
pass
elif w in tag_to_id: # Is a province
id_to_power[tag_to_id[w]] = power
else: # Must be a power tag
power += 1
return id_to_power
| 5,346,949 |
def evaluate_cubic_spline(x, y, r, t):
"""Evaluate cubic spline at points.
Parameters:
x : rank-1 np.array of np.float64
data x coordinates
y : rank-1 np.array of np.float64
data y coordinates
r : rank-1 np.array of np.float64
output of solve_coeffs() for your data
t : rank-1 np.array of np.float64
points where to evaluate. Must satisfy (x[0] <= t <= x[-1]).all().
Returns:
s : rank-1 np.array of np.float64
Value of the spline at the points t.
"""
return _evaluate_generic(x,y,r,t, _evaluate_cubic_spline_one)
| 5,346,950 |
async def test_iter_pulls_from_buffer():
"""Check that the EventIterable emits buffered data if available."""
e = emitter.EventEmitter()
i = iterable.EventIterable(e, 'test')
iterator = await i.__aiter__()
sentinel = object()
e.emit('test', sentinel, test=sentinel)
await asyncio.sleep(0)
args, kwargs = await iterator.__anext__()
assert sentinel in args
assert 'test' in kwargs
assert kwargs['test'] is sentinel
| 5,346,951 |
def get_username(host, meta_host, config):
"""Find username from sources db/metadata/config."""
username = host.username or meta_host.get("username")
if is_windows_host(meta_host):
username = username or "Administrator"
default_user = get_config_value(config["users"], meta_host["os"])
username = username or default_user
return username
| 5,346,952 |
def get_all_tests():
"""
Collect all tests and return them
:return: A test suite as returned by xunitparser with all the tests
available in the w3af framework source code, without any selectors.
"""
return _get_tests('all.xml')
| 5,346,953 |
def spinner_clear():
"""
Get rid of any spinner residue left in stdout.
"""
sys.stdout.write("\b \b")
sys.stdout.flush()
| 5,346,954 |
def main():
"""Builds OSS-Fuzz project's fuzzers for CI tools.
Note: The resulting fuzz target binaries of this build are placed in
the directory: ${GITHUB_WORKSPACE}/out
Returns:
0 on success or nonzero on failure.
"""
return build_fuzzers_entrypoint()
| 5,346,955 |
def find_first_img_dim(import_gen):
"""
Loads in the first image in a provided data set and returns its dimensions
Intentionally returns on first iteration of the loop
:param import_gen: PyTorch DataLoader utilizing ImageFolderWithPaths for its dataset
:return: dimensions of image
"""
for x, _, _ in import_gen:
return x[0].shape[-2], x[0].shape[-1]
| 5,346,956 |
def trib(x1,y1,x2,y2,x3,y3,color):
"""
Usage:
trib x1 y1 x2 y2 x3 y3 color
Parameters:
x1, y1 : the coordinates of the first triangle corner
x2, y2 : the coordinates of the second corner
x3, y3 : the coordinates of the third corner
color: the index of the desired color in the current palette
Description:
This function draws a triangle border with color, using the supplied vertices.
"""
pygame.draw.polygon(screen,TIC["PALETTE"][color%len(TIC["PALETTE"])],[(x1, y1), (x2, y2), (x3, y3)],1)
| 5,346,957 |
def restaurantJSON():
""" Returns all restaurants by JSON call """
restaurants = session.query(Restaurant)
return jsonify(Restaurants=[r.serialize for r in restaurants])
| 5,346,958 |
def _to_histogram_plotgroup(use_spec, plotgroup_id, plot_id, read_type, bincounts, output_dir, png_name):
"""
Create a histogram of length distribution.
"""
plot_spec = use_spec.get_plot_spec(plotgroup_id, plot_id)
png_file = op.join(output_dir, png_name)
png, thumb = plot_read_lengths_binned(bincounts,
png_file,
read_type=read_type,
title=plot_spec.title,
color=get_blue(3),
edgecolor=get_blue(2))
return to_plotgroup(plotgroup_id, plot_id, png, thumb)
| 5,346,959 |
def prepare_string(x, max_length=None):
""" Converts a string from LaTeX escapes to UTF8 and truncates it to max_length """
# data = latex2text(x, tolerant_parsing=True)
try:
data = latex_to_unicode(filter_using_re(x))
if max_length is not None:
data = (data[:max_length-5] + '[...]') if len(data) > max_length else data
return smart_text(data)
except TypeError:
logger.warning("Encountered a TypeError which may be linked to unicode handling "
"in bibtexparser when processing the following string: %s."%x)
return ""
| 5,346,960 |
def q_b(m0, m1, m2, n0, n1, n2):
"""Stretch"""
return math.sqrt((m0 - n0)**2 + (m1 - n1)**2 + (m2 - n2)**2)
| 5,346,961 |
def poly_union(poly_det, poly_gt):
"""Calculate the union area between two polygon.
Args:
poly_det (Polygon): A polygon predicted by detector.
poly_gt (Polygon): A gt polygon.
Returns:
union_area (float): The union area between two polygons.
"""
assert isinstance(poly_det, plg.Polygon)
assert isinstance(poly_gt, plg.Polygon)
area_det = poly_det.area()
area_gt = poly_gt.area()
area_inters, _ = poly_intersection(poly_det, poly_gt)
return area_det + area_gt - area_inters
| 5,346,962 |
def get_stoich(geom_i, geom_j):
""" get the overall combined stoichiometry
"""
form_i = automol.geom.formula(geom_i)
form_j = automol.geom.formula(geom_j)
form = automol.formula.join(form_i, form_j)
stoich = ''
for key, val in form.items():
stoich += key + str(val)
return stoich
| 5,346,963 |
async def test_export_chat_invite_link(bot: Bot):
""" exportChatInviteLink method test """
from .types.dataset import CHAT, INVITE_LINK
chat = types.Chat(**CHAT)
async with FakeTelegram(message_data=INVITE_LINK):
result = await bot.export_chat_invite_link(chat_id=chat.id)
assert result == INVITE_LINK
| 5,346,964 |
def get_all_text(url):
"""Retrieves all text in paragraphs.
:param str url: The URL to scrap.
:rtype: str :return: Text in the URL.
"""
try:
response = requests.get(url)
# If the response was successful, no Exception will be raised
response.raise_for_status()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}') # Python 3.6
return None
# sys.exit()
except Exception as err:
print(f'Other error occurred: {err}') # Python 3.6
return None
# sys.exit()
soup = BeautifulSoup(response.text, "lxml")
text = ""
for i in soup.find_all('p'): # soup.select
# i.encode("utf-8") # default
# Delete citations (e.g. "The Alhambra is a UNESCO World Heritage Site.[2]")
text += i.get_text() + '\n'
text = clean_text.del_nonAscii(clean_text.del_refs(text))
return text
| 5,346,965 |
def metadata_partitioner(rx_txt: str) -> List[str]:
"""Extract Relax program and metadata section.
Parameters
----------
rx_txt : str
The input relax text.
Returns
-------
output : List[str]
The result list of partitioned text, the first element
is the relax program, and the second is metadata section.
"""
partitions = []
left_curly = 0
meta_start = 0
meta_end = 0
for i, char in enumerate(rx_txt):
if i < 0:
raise ValueError("The program is invalid.")
if char == "{":
if meta_start == 0:
meta_start = i
left_curly += 1
elif char == "}":
left_curly -= 1
if left_curly == 0:
meta_end = i + 1
break
if meta_end == 0:
raise ValueError("The metadata section was not found.")
metadata = rx_txt[meta_start:meta_end]
rx_program = rx_txt[meta_end:-1]
partitions.append(rx_program)
partitions.append(metadata)
return partitions
| 5,346,966 |
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
| 5,346,967 |
def to_dict(funs):
"""Convert an object to a dict using a dictionary of functions.
to_dict(funs)(an_object) => a dictionary with keys calculated from functions on an_object
Note the dictionary is copied, not modified in-place.
If you want to modify a dictionary in-place, do adict.update(to_dict(funs)(a_dict))
Use to_dict(funs) in a map, and you can generate a list of dictionaries from a list of objects (which could also be dictionaries).
:: K is hashable type => {K: (X -> V)} -> [X] -> {K: V}
Equivalent to the following in Python 3:
{k: f(an_object) for (k, f) in funs.items()}
>>> from operator import itemgetter
>>> funs = {'id': itemgetter('id'), 'fullname': lambda x: '%(forename)s %(surname)s' % x}
>>> an_object = {'id': 1, 'forename': 'Fred', 'surname': 'Bloggs'}
>>> result = to_dict(funs)(an_object)
>>> result['id']
1
>>> result['fullname']
'Fred Bloggs'
>>> 'forename' in result # Original keys are left out
False
"""
def to_dict_funs(an_object):
return dict((k, f(an_object)) for (k, f) in funs.items())
return to_dict_funs
| 5,346,968 |
def parse_duration_string_ms(duration):
"""Parses a duration string of the form 1h2h3m4s5.6ms4.5us7.8ns into milliseconds."""
pattern = r'(?P<value>[0-9]+\.?[0-9]*?)(?P<units>\D+)'
matches = list(re.finditer(pattern, duration))
assert matches, 'Failed to parse duration string %s' % duration
times = {'h': 0, 'm': 0, 's': 0, 'ms': 0}
for match in matches:
parsed = match.groupdict()
times[parsed['units']] = float(parsed['value'])
return (times['h'] * 60 * 60 + times['m'] * 60 + times['s']) * 1000 + times['ms']
| 5,346,969 |
def test_get_file_cancelled(cbcsdk_mock, connection_mock):
"""Test the response to the 'get file' command."""
cbcsdk_mock.mock_request('POST', '/appservices/v6/orgs/test/liveresponse/sessions', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/appservices/v6/orgs/test/liveresponse/sessions/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/appservices/v6/orgs/test/devices/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/appservices/v6/orgs/test/liveresponse/sessions/1:2468/commands',
GET_FILE_COMMAND_RESP)
cbcsdk_mock.mock_request('GET', '/appservices/v6/orgs/test/liveresponse/sessions/1:2468/commands/7',
GET_FILE_CANCELLED_RESP)
cbcsdk_mock.mock_request('DELETE', '/appservices/v6/orgs/test/liveresponse/sessions/1:2468', None)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
with pytest.raises(ApiError) as ex:
session.get_file('c:\\\\test.txt')
assert 'The command has been cancelled.' in str(ex.value)
| 5,346,970 |
def fin(activity):
"""Return the end time of the activity. """
return activity.finish
| 5,346,971 |
def add_outgoing_flow(node_id, successor_node_id, bpmn_diagram):
"""
:param node_id:
:param successor_node_id:
:param bpmn_diagram:
"""
if bpmn_diagram.diagram_graph.node[node_id].get(consts.Consts.outgoing_flow) is None:
bpmn_diagram.diagram_graph.node[node_id][consts.Consts.outgoing_flow] = []
bpmn_diagram.diagram_graph.node[node_id][consts.Consts.outgoing_flow].append(get_flow_id(node_id, successor_node_id))
| 5,346,972 |
def test_work_dbpedia_query():
"""CreativeWork - dbpedia_fr : Should pass"""
work1 = CreativeWork(
title="Arrival",
author="ABBA",
author_is_organisation=True,
endpoints=[Endpoint.dbpedia_fr],
query_language=Lang.French)
work1.query(strict_mode=True, check_type=True)
work2 = CreativeWork(
title="Arrival",
author="ABBA",
author_is_organisation=True,
endpoints=[Endpoint.dbpedia_fr],
query_language=Lang.French)
work2.query(strict_mode=True, check_type=False)
work3 = CreativeWork(
title="Arrival",
author="ABBA",
author_is_organisation=True,
endpoints=[Endpoint.dbpedia_fr],
query_language=Lang.French)
work3.query(strict_mode=False, check_type=True)
work4 = CreativeWork(
title="Arrival",
author="ABBA",
author_is_organisation=True,
endpoints=[Endpoint.dbpedia_fr],
query_language=Lang.French)
work4.query(strict_mode=False, check_type=False)
assert work1.attributes
assert work2.attributes
assert work3.attributes
assert work4.attributes
| 5,346,973 |
def unfreeze_map(obj):
"""
Unfreezes all elements of mappables
"""
return {key: unfreeze(value) for key, value in obj.items()}
| 5,346,974 |
def iterate_entries(incident_id: Optional[str], query_filter: Dict[str, Any],
entry_filter: Optional[EntryFilter] = None) -> Iterator[Entry]:
"""
Iterate war room entries
:param incident_id: The incident ID to search entries from.
:param query_filter: Filters to search entries.
:param entry_filter: Filters to filter entries.
:return: An iterator to retrieve entries.
"""
query_filter = dict(**query_filter)
first_id = 1
while True:
query_filter['firstId'] = str(first_id)
ents = demisto.executeCommand('getEntries', assign_params(
id=incident_id,
filter=query_filter
))
if not ents:
break
if is_error(ents[0]):
if first_id == 1:
return_error('Unable to retrieve entries')
break
for ent in ents:
if not entry_filter:
yield Entry(ent, None, None)
else:
match = entry_filter.match(ent)
if match:
yield Entry(ent, match[0], match[1])
# Set the next ID
last_id = ent['ID']
m = re.match('([0-9]+)', last_id)
if not m:
raise ValueError(f'Invalid entry ID: {last_id}')
next_id = int(m[1]) + 1
if next_id <= first_id:
break
first_id = next_id
| 5,346,975 |
def get_date(
value: Optional[Union[date, datetime, str]],
raise_error=False
) -> Optional[date]:
"""
Convert a given value to a date.
Args:
raise_error: flag to raise error if return is None or not
value: to be converted. Can be date/datetime obj as well as str formatted in date/datetime
Returns:
date obj
Raises:
ValueError: If raise_error flag is True and parsed_date is None
Examples:
>>> get_date(date(2021, 1, 1))
datetime.date(2021, 1, 1)
>>> get_date(datetime(2021, 1, 1, 0, 2))
datetime.date(2021, 1, 1)
>>> get_date('2020-01-01 13:12:13')
datetime.date(2020, 1, 1)
>>> get_date('sadasadasdas') is None
True
>>> get_date(None) is None
True
>>> get_date('2021-20-20-20-20', raise_error=True)
Traceback (most recent call last):
...
ValueError: Invalid date 2021-20-20-20-20
"""
if isinstance(value, datetime):
return value.date()
if isinstance(value, date):
return value
if value is not None:
# A common date is in the form "2020-01-01", 10 characters
if len(value) > 10:
parsed_date = parse_datetime(value)
parsed_date = parsed_date.date() if parsed_date else None
else:
parsed_date = parse_date(value)
else:
parsed_date = None
if parsed_date is None and raise_error:
raise ValueError(f"Invalid date {value}")
return parsed_date
| 5,346,976 |
def make_working_directories ():
""" Creates directories that we will be working in.
In particular, we will have DOC_ROOT/stage-PID and
DOC_ROOT/packages-PID """
global doc_root
import os.path, os
stage_dir = os.path.join (doc_root, "stage-" + str (os.getpid ()))
package_dir = os.path.join (doc_root, "package-" + str (os.getpid ()))
os.mkdir (stage_dir)
os.mkdir (package_dir)
return (stage_dir, package_dir)
| 5,346,977 |
def k_param(kguess, s):
"""
Finds the root of the maximum likelihood estimator
for k using Newton's method. Routines for using Newton's method
exist within the scipy package but they were not explored. This
function is sufficiently well behaved such that we should not
have problems solving for k, especially since we have a good
estimate of k to use as a starting point.
"""
k = kguess
val = np.log(k) - sps.psi(k) - s
counter = 0
while np.abs(val) >= 0.0001:
k = k - (np.log(k)-sps.psi(k)-s)/(1/k-sps.polygamma(1, k))
val = np.log(k) - sps.psi(k) - s
# sps.polygamma(1,k) is first derivative of sps.psi(k)
counter += 1
if counter > MAX_NEWTON_ITERATIONS:
raise Exception("Max Newton's method iterations exceeded")
return k
| 5,346,978 |
def hex_to_bin(value: hex) -> bin:
"""
convert a hexadecimal to binary
0xf -> '0b1111'
"""
return bin(value)
| 5,346,979 |
def four_oneports_2_twoport(s11: Network, s12: Network, s21: Network, s22: Network, *args, **kwargs) -> Network:
"""
Builds a 2-port Network from list of four 1-ports
Parameters
----------
s11 : one-port :class:`Network`
s11
s12 : one-port :class:`Network`
s12
s21 : one-port :class:`Network`
s21
s22 : one-port :class:`Network`
s22
\*args, \*\*kwargs :
passed to :func:`Network.__init__` for the twoport
Returns
-------
twoport : two-port :class:`Network`
result
See Also
--------
n_oneports_2_nport
three_twoports_2_threeport
"""
return n_oneports_2_nport([s11, s12, s21, s22], *args, **kwargs)
| 5,346,980 |
def find_xml_command(rvt_version, xml_path):
"""
Finds name index src path and group of Commands in RevitPythonShell.xml configuration.
:param rvt_version: rvt version to find the appropriate RevitPythonShell.xml.
:param xml_path: path where RevitPythonShell.xml resides.
:return: Commands dictionary: {com_name:[index, src_path, group]}
"""
if not xml_path:
xml_path = op.join(op.expanduser("~"),
"AppData\\Roaming\\RevitPythonShell{0}\\RevitPythonShell.xml").format(rvt_version)
xml_tree = ETree.parse(xml_path)
xml_root = xml_tree.getroot()
commands = defaultdict(list)
for child in xml_root:
if child.tag == 'Commands':
com_children = child.getchildren()
for i, com_child in enumerate(com_children):
com_name = com_child.attrib["name"]
commands[com_name].append(i)
commands[com_name].append(com_child.attrib["src"])
commands[com_name].append(com_child.attrib["group"])
return commands
| 5,346,981 |
def put_data_to_s3(data, bucket, key, acl=None):
"""data is bytes not string"""
content_type = mimetypes.guess_type(key)[0]
if content_type is None:
content_type = 'binary/octet-stream'
put_object_args = {'Bucket': bucket, 'Key': key, 'Body': data,
'ContentType': content_type}
if acl:
put_object_args.update({'ACL': acl})
return boto3.client('s3').put_object(**put_object_args)
| 5,346,982 |
def join_analysis_json_path(data_path: Path, analysis_id: str, sample_id: str) -> Path:
"""
Join the path to an analysis JSON file for the given sample-analysis ID combination.
Analysis JSON files are created when the analysis data is too large for a MongoDB document.
:param data_path: the path to the application data
:param analysis_id: the ID of the analysis
:param sample_id: the ID of the sample
:return: a path
"""
return join_analysis_path(data_path, analysis_id, sample_id) / "results.json"
| 5,346,983 |
def user_profile(uname=None):
"""
Frontend gets user's profile by user name or modify user profile (to do).
Return user's complete profile and the recommendations for him (brief events).
:param uname: user's name, a string
:return: a json structured as {'user': [(0, 'who', 'password', '[email protected]', 'address', 'Limoges')],
'event': [{'event_id': 1234, 'title': '...', ...},{'event_id': 2345, ...}, ...]}
"""
verify_headers(request.headers)
if request.method == 'GET':
user = user_manager.return_user_data(uname)
if len(user) == 0:
abort(404)
preferred_events_id = rcmd_manager.get_recommendations_for_user(user_manager.return_user_id(uname))
preferred_events = []
for pair in preferred_events_id:
preferred_events.append({'activity': event_manager.get_event_with_nearest(pair[0]),
'score': pair[1]})
return jsonify({'user': user, 'event': preferred_events})
elif request.method == 'POST':
if not request.json:
abort(400)
# to do for user profile modification
| 5,346,984 |
def circuit_status(self, handle: ResultHandle) -> CircuitStatus:
"""
Return a CircuitStatus reporting the status of the circuit execution
corresponding to the ResultHandle
"""
if handle in self._cache:
return CircuitStatus(StatusEnum.COMPLETED)
raise CircuitNotRunError(handle)
| 5,346,985 |
def run_users(mode: str, email: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = ''):
"""Users CLI entrypoint"""
if mode not in MODES:
raise ValueError(f"`mode` must be one of `{MODES}`. Value `{mode}` is given")
if mode == LIST_USERS:
run_list_users()
elif mode == CREATE_USER:
run_create_user(email, username, password)
elif mode == ACTIVATE_USER:
run_set_activation(username, True)
elif mode == DEACTIVATE_USER:
run_set_activation(username, False)
| 5,346,986 |
def _add_enum(params, name, enum):
"""Adds enum information to our template parameters."""
enum_info = {
'name': name,
'identname': _ident(name),
'description': _format_comment(name, _get_desc(enum)),
'members': []}
for en in enum.get('enum', []):
member = {'identname': _ident(en), 'name': en}
if enum.get('enumLongDescriptions', {}).get(en):
desc = enum.get('enumLongDescriptions', {}).get(en)
else:
desc = enum.get('enumDescriptions', {}).get(en, '')
member['description'] = _format_comment(
'%s%s' % (en, name), desc, cutpoint='shall', add='')
enum_info['members'].append(member)
params['enums'].append(enum_info)
| 5,346,987 |
def run_pyfunnel(test_dir):
"""Run pyfunnel compareAndReport function.
The test is run:
* with the parameters, reference and test values from the test directory
passed as argument;
* from current directory (to which output directory path is relative).
Args:
test_dir (str): path of test directory
Returns:
int: exit status of funnel binary
"""
with open(os.path.join(test_dir, 'param.json'), 'r') as f:
par = json.load(f)
ref = pd.read_csv(os.path.join(test_dir, par['reference']))
test = pd.read_csv(os.path.join(test_dir, par['test']))
par['outputDirectory'] = par['output']
for t in ['atolx', 'atoly', 'ltolx', 'ltoly', 'rtolx', 'rtoly']:
try:
par[t]
except KeyError:
par[t] = None
rc = pyfunnel.compareAndReport(
ref.iloc(axis=1)[0],
ref.iloc(axis=1)[1],
test.iloc(axis=1)[0],
test.iloc(axis=1)[1],
**{k: par[k] for k in ['outputDirectory', 'atolx', 'atoly', 'ltolx', 'ltoly', 'rtolx', 'rtoly']}
)
return rc
| 5,346,988 |
def get_portfolio() -> pd.DataFrame:
"""
Get complete user portfolio
Returns:
pd.DataFrame: complete portfolio
"""
portfolio = get_simple_portfolio()
full_portfolio = pd.DataFrame()
for ticket in portfolio.index:
full_portfolio = full_portfolio.append(
_clear_individual_information(get_individual_information(ticket)))
return full_portfolio
| 5,346,989 |
def create():
"""Create and scaffold application, module"""
pass
| 5,346,990 |
def findZeros( vec, tol = 0.00001 ):
"""Given a vector of a data, finds all the zeros
returns a Nx2 array of data
each row is a zero, first column is the time of the zero, second column indicates increasing
or decreasing (+1 or -1 respectively)"""
zeros = []
for i in range( vec.size - 1 ):
a = float( vec[ i ] )
b = float( vec[ i + 1] )
increasing = 1
if ( b < a ):
increasing = -1
if ( a * b < 0 ):
t = -a / ( b - a )
zeros.append( ( i + t, increasing ) )
if ( abs( vec[ -1 ] ) < tol ):
if ( vec[-1] > vec[-2] ):
zeros.append( ( vec.size - 1, 1 ) )
else:
zeros.append( ( vec.size - 1, -1 ) )
return np.array( zeros, dtype=np.int )
| 5,346,991 |
def test_multiply(time_data: TimeData, mult_arg: Union[float, Dict[str, float]]):
"""Test multiply"""
from resistics.time import Multiply
time_data_new = Multiply(multiplier=mult_arg).run(time_data)
assert time_data_new.data.dtype == time_data.data.dtype
to_mult = mult_arg
if isinstance(to_mult, (float, int)):
to_mult = {x: to_mult for x in time_data.metadata.chans}
for chan in time_data.metadata.chans:
mult_val = to_mult[chan] if chan in to_mult else 1
np.testing.assert_array_equal(time_data_new[chan], time_data[chan] * mult_val)
| 5,346,992 |
def test_notice():
""" NOTICE command """
assert like("NOTICE #ch :hello, world", pack_command(
"NOTICE", target="#ch", message="hello, world"))
assert like("NOTICE WiZ :hello, world", pack_command(
"NOTICE", target="WiZ", message="hello, world"))
| 5,346,993 |
def get_index(square_num: int) -> List[int]:
"""
Gets the indices of a square given the square number
:param square_num: An integer representing a square
:return: Returns a union with 2 indices
"""
for i in range(4):
for j in range(4):
if puzzle_state[i][j] == square_num:
return [i, j]
| 5,346,994 |
def _ResolveName(item):
"""Apply custom name info if provided by metadata"""
# ----------------------------------------------------------------------
def IsValidName(value):
return bool(value)
# ----------------------------------------------------------------------
if Attributes.UNIVERSAL_NAME_OVERRIDE_ATTRIBUTE_NAME in item.metadata.Values:
metadata_value = item.metadata.Values[Attributes.UNIVERSAL_NAME_OVERRIDE_ATTRIBUTE_NAME]
if not IsValidName(metadata_value.Value):
raise Exceptions.ResolveInvalidCustomNameException(
metadata_value.Source,
metadata_value.Line,
metadata_value.Column,
name=metadata_value.Value,
)
item.name = metadata_value.Value
del item.metadata.Values[Attributes.UNIVERSAL_NAME_OVERRIDE_ATTRIBUTE_NAME]
| 5,346,995 |
def extract_errno(errstr):
"""
Given an error response from a proxyfs RPC, extracts the error number
from it, or None if the error isn't in the usual format.
"""
# A proxyfs error response looks like "errno: 18"
m = re.match(PFS_ERRNO_RE, errstr)
if m:
return int(m.group(1))
| 5,346,996 |
def index():
""" Index page """
return render_template("index.html");
| 5,346,997 |
def solve(in_array):
"""
Similar to 46442a0e, but where new quadrants are flips of the original array rather than rotations
:param in_array: input array
:return: expected output array
"""
array_edgelength = len(in_array[0]) # input array edge length
opp_end = array_edgelength*2-1 # used for getting opposite end of array
prediction = [[-1]*array_edgelength*2 for i in range(array_edgelength*2)] # init 2d array
# iterate through all values
for y in range(len(in_array)):
for x in range(len(in_array[0])):
val = in_array[y][x]
prediction[y][x] = val
# other 3 quadrants are flips
prediction[y][opp_end-x] = val
prediction[opp_end-y][opp_end-x] = val
prediction[opp_end-y][x] = val
return prediction
| 5,346,998 |
def trim_all(audio, rate, frame_duration, ambient_power=1e-4):
"""Trims ambient silence in the audio anywhere.
params:
audio: A numpy ndarray, which has 1 dimension and values within
-1.0 to 1.0 (inclusive)
rate: An integer, which is the rate at which samples are taken
frame_duration: A float, which is the duration of each frame
to check
ambient_power: A float, which is the Root Mean Square of ambient noise
return: A numpy ndarray, which has 1 dimension and values within
-1.0 to 1.0 (inclusive)
"""
new_audio = []
powers, fr = for_each_frame(audio, rate, frame_duration, calc_rms)
frame_length = round(rate / fr)
for ndx, power in enumerate(powers):
if power > ambient_power:
new_audio += audio[ndx*frame_length:(ndx+1)*frame_length].tolist()
return np.array(new_audio)
| 5,346,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.