content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def validate_policy_spec(policy_spec):
"""
:param PolicySpecification policy_spec:
"""
# validate policy values
if policy_spec.policy:
p = policy_spec.policy
# validate key pair values
if policy_spec.policy.key_pair:
if len(policy_spec.policy.key_pair.key_types) > 1:
raise VenafiError("Key Type values exceeded. Only one Key Type is allowed by VaaS")
if policy_spec.policy.key_pair.key_types \
and policy_spec.policy.key_pair.key_types[0].lower() != KeyType.RSA:
raise VenafiError(f"Key Type [{p.key_pair.key_types[0]}] is not supported by VaaS")
if len(policy_spec.policy.key_pair.rsa_key_sizes) > 0:
invalid_value = get_invalid_cloud_rsa_key_size_value(policy_spec.policy.key_pair.rsa_key_sizes)
if invalid_value:
raise VenafiError(f"The Key Size [{invalid_value}] is not supported by VaaS")
# validate subject CN and SAN regexes
if p.subject_alt_names:
sans = get_sans(policy_spec.policy.subject_alt_names)
if len(sans) > 0:
for k, v in sans.items():
if v is True and not (k == RPA.TPP_DNS_ALLOWED):
raise VenafiError(f"Subject Alt name [{k}] is not allowed by VaaS")
# validate default subject values against policy values
if policy_spec.defaults and policy_spec.defaults.subject and policy_spec.policy.subject:
ds = policy_spec.defaults.subject
s = policy_spec.policy.subject
if ds.org and len(s.orgs) > 0:
if not is_valid_policy_value(s.orgs, ds.org):
raise VenafiError(default_error_msg.format('Organization', ds.org, s.orgs))
if ds.org_units and len(ds.org_units) > 0 and len(s.org_units) > 0:
if not member_of(ds.org_units, s.org_units):
raise VenafiError(default_error_msg.format('Org Units', ds.org_units, s.org_units))
if ds.locality and len(s.localities) > 0:
if not is_valid_policy_value(s.localities, ds.locality):
raise VenafiError(default_error_msg.format('Localities', ds.locality, s.localities))
if ds.state and len(s.states) > 0:
if not is_valid_policy_value(s.states, ds.state):
raise VenafiError(default_error_msg.format('States', ds.state, s.states))
if ds.country and len(s.countries) > 0:
if not is_valid_policy_value(s.countries, ds.country):
raise VenafiError(default_error_msg.format('Countries', ds.country, s.countries))
# validate default key pair values against policy values
if policy_spec.defaults and policy_spec.defaults.key_pair and policy_spec.policy.key_pair:
dkp = policy_spec.defaults.key_pair
kp = policy_spec.policy.key_pair
if dkp.key_type and len(kp.key_types) > 0:
if dkp.key_type not in kp.key_types:
raise VenafiError(default_error_msg.format('Key Types', dkp.key_type, kp.key_types))
if dkp.rsa_key_size and len(kp.rsa_key_sizes) > 0:
if dkp.rsa_key_size not in kp.rsa_key_sizes:
raise VenafiError(default_error_msg.format('RSA Key Sizes', dkp.rsa_key_size, kp.rsa_key_sizes))
if dkp.elliptic_curve and len(kp.elliptic_curves) > 0:
if dkp.elliptic_curve not in kp.elliptic_curves:
raise VenafiError(default_error_msg.format('Elliptic Curves', dkp.elliptic_curve,
kp.elliptic_curves))
if dkp.service_generated is not None and kp.service_generated is not None:
if dkp.service_generated != kp.service_generated:
raise VenafiError(
default_error_msg.format('Service Generated', dkp.service_generated, kp.service_generated))
else:
policy_spec.policy = Policy()
# validate default values when policy is not defined
if policy_spec.defaults and policy_spec.defaults.key_pair:
dkp = policy_spec.defaults.key_pair
if dkp.key_type and dkp.key_type != "RSA":
raise VenafiError(f"Default Key Type [{dkp.key_type}] is not supported by VaaS")
if dkp.rsa_key_size:
invalid_value = get_invalid_cloud_rsa_key_size_value([dkp.rsa_key_size])
if invalid_value:
raise VenafiError(f"Default Key Size [{invalid_value}] is not supported by VaaS")
| 5,346,400 |
def zeros(shape, dtype=None):
"""
Create a Tensor filled with zeros, closer to Numpy's syntax than ``alloc``.
"""
if dtype is None:
dtype = config.floatX
return alloc(numpy.array(0, dtype=dtype), *shape)
| 5,346,401 |
def station_location_from_rinex(rinex_path: str) -> Optional[types.ECEF_XYZ]:
"""
Opens a RINEX file and looks in the headers for the station's position
Args:
rinex_path: the path to the rinex file
Returns:
XYZ ECEF coords in meters for the approximate receiver location
approximate meaning may be off by a meter or so
or None if ECEF coords could not be found
"""
xyz = None
lat = None
lon = None
height = None
with open(rinex_path, "rb") as filedat:
for _ in range(50):
linedat = filedat.readline()
if b"POSITION XYZ" in linedat:
xyz = numpy.array([float(x) for x in linedat.split()[:3]])
elif b"Monument location:" in linedat:
lat, lon, height = [float(x) for x in linedat.split()[2:5]]
elif b"(latitude)" in linedat:
lat = float(linedat.split()[0])
elif b"(longitude)" in linedat:
lon = float(linedat.split()[0])
elif b"(elevation)" in linedat:
height = float(linedat.split()[0])
if lat is not None and lon is not None and height is not None:
xyz = coordinates.geodetic2ecef((lat, lon, height))
if xyz is not None:
return cast(types.ECEF_XYZ, xyz)
return None
| 5,346,402 |
def UpdatePDB(pdb_filename, verbose=True, build_dir=None, toolchain_dir=None):
"""Update a pdb file with source information."""
dir_blacklist = { }
if build_dir:
# Blacklisting the build directory allows skipping the generated files, for
# Chromium this makes the indexing ~10x faster.
build_dir = (os.path.normpath(build_dir)).lower()
for directory, _, _ in os.walk(build_dir):
dir_blacklist[directory.lower()] = True
dir_blacklist[build_dir.lower()] = True
if toolchain_dir:
# Blacklisting the directories from the toolchain as we don't have revision
# info for them.
toolchain_dir = (os.path.normpath(toolchain_dir)).lower()
for directory, _, _ in os.walk(build_dir):
dir_blacklist[directory.lower()] = True
dir_blacklist[toolchain_dir.lower()] = True
# Writes the header of the source index stream.
#
# Here's the description of the variables used in the SRC_* macros (those
# variables have to be defined for every source file that we want to index):
# var1: The file path.
# var2: The name of the file without its path.
# var3: The revision or the hash of this file's repository.
# var4: The URL to this file.
# var5: (optional) The python method to call to decode this file, e.g. for
# a base64 encoded file this value should be 'base64.b64decode'.
lines = [
'SRCSRV: ini ------------------------------------------------',
'VERSION=1',
'INDEXVERSION=2',
'VERCTRL=Subversion',
'DATETIME=%s' % time.asctime(),
'SRCSRV: variables ------------------------------------------',
'SRC_EXTRACT_TARGET_DIR=%targ%\%fnbksl%(%var2%)\%var3%',
'SRC_EXTRACT_TARGET=%SRC_EXTRACT_TARGET_DIR%\%fnfile%(%var1%)',
'SRC_EXTRACT_CMD=cmd /c "mkdir "%SRC_EXTRACT_TARGET_DIR%" & python -c '
'"import urllib2, base64;'
'url = \\\"%var4%\\\";'
'u = urllib2.urlopen(url);'
'print %var5%(u.read());" > "%SRC_EXTRACT_TARGET%""',
'SRCSRVTRG=%SRC_EXTRACT_TARGET%',
'SRCSRVCMD=%SRC_EXTRACT_CMD%',
'SRCSRV: source files ---------------------------------------',
]
if ReadSourceStream(pdb_filename):
raise Exception("PDB already has source indexing information!")
filelist = ExtractSourceFiles(pdb_filename)
number_of_files = len(filelist)
indexed_files_total = 0
while filelist:
filename = next(iter(filelist))
filedir = os.path.dirname(filename)
if verbose:
print "[%d / %d] Processing: %s" % (number_of_files - len(filelist),
number_of_files, filename)
# This directory is blacklisted, either because it's not part of a
# repository, or from one we're not interested in indexing.
if dir_blacklist.get(filedir, False):
if verbose:
print " skipping, directory is blacklisted."
filelist.remove(filename)
continue
# Skip the files that don't exist on the current machine.
if not os.path.exists(filename):
filelist.remove(filename)
continue
# Try to index the current file and all the ones coming from the same
# repository.
indexed_files = IndexFilesFromRepo(filename, filelist, lines)
if not indexed_files:
if not DirectoryIsUnderPublicVersionControl(filedir):
dir_blacklist[filedir] = True
if verbose:
print "Adding %s to the blacklist." % filedir
filelist.remove(filename)
continue
indexed_files_total += indexed_files
if verbose:
print " %d files have been indexed." % indexed_files
lines.append('SRCSRV: end ------------------------------------------------')
WriteSourceStream(pdb_filename, '\r\n'.join(lines))
if verbose:
print "%d / %d files have been indexed." % (indexed_files_total,
number_of_files)
| 5,346,403 |
def get_profiles():
"""Return the paths to all profiles in the local library"""
paths = APP_DIR.glob("profile_*")
return sorted(paths)
| 5,346,404 |
def main(ctx, *args, **kwargs):
"""
This script generate a Dockerfile from Dockertemplate (jinja template).
"""
if ctx.invoked_subcommand is None:
ctx.forward(generate)
| 5,346,405 |
def split(array, nelx, nely, nelz, dof):
"""
Splits an array of boundary conditions into an array of collections of
elements. Boundary conditions that are more than one node in size are
grouped together. From the nodes, the function returns the neighboring
elements inside the array.
"""
if len(array) == 0:
return []
array.sort()
connected_nodes = [array[0]]
nlist = []
tmp = _get_elem(array[0], nelx, nely, nelz, dof)
for i in range(1, len(array)):
if _nodes_connected(connected_nodes, array[i], nelx, nely, nelz, dof):
tmp = tmp.union(_get_elem(array[i], nelx, nely, nelz, dof))
connected_nodes.append(array[i])
else:
nlist.append(list(tmp))
tmp = _get_elem(array[i], nelx, nely, nelz, dof)
nlist.append(list(tmp))
return nlist
| 5,346,406 |
def test_no_source(test_dir: str) -> None:
"""cp d_file -> d_file.back should fail if d_file does not exist"""
d_file = os.path.join(test_dir, "d_file")
sys.argv = ["pycp", d_file, "d_file.back"]
with pytest.raises(SystemExit):
pycp_main()
| 5,346,407 |
async def test_setup(hass):
"""Test the general setup of the integration."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234",
"Title 1",
15.5,
(38.0, -3.0),
locality="Locality 1",
attribution="Attribution 1",
time=datetime.datetime(2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc),
magnitude=5.7,
mmi=5,
depth=10.5,
quality="best",
)
mock_entry_2 = _generate_mock_feed_entry(
"2345", "Title 2", 20.5, (38.1, -3.1), magnitude=4.6
)
mock_entry_3 = _generate_mock_feed_entry(
"3456", "Title 3", 25.5, (38.2, -3.2), locality="Locality 3"
)
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (38.3, -3.3))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"aio_geojson_client.feed.GeoJsonFeed.update"
) as mock_feed_update:
mock_feed_update.return_value = "OK", [mock_entry_1, mock_entry_2, mock_entry_3]
assert await async_setup_component(hass, geonetnz_quakes.DOMAIN, CONFIG)
await hass.async_block_till_done()
# Artificially trigger update and collect events.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
# 3 geolocation and 1 sensor entities
assert (
len(hass.states.async_entity_ids("geo_location"))
+ len(hass.states.async_entity_ids("sensor"))
== 4
)
entity_registry = er.async_get(hass)
assert len(entity_registry.entities) == 4
state = hass.states.get("geo_location.title_1")
assert state is not None
assert state.name == "Title 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234",
ATTR_LATITUDE: 38.0,
ATTR_LONGITUDE: -3.0,
ATTR_FRIENDLY_NAME: "Title 1",
ATTR_LOCALITY: "Locality 1",
ATTR_ATTRIBUTION: "Attribution 1",
ATTR_TIME: datetime.datetime(
2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc
),
ATTR_MAGNITUDE: 5.7,
ATTR_DEPTH: 10.5,
ATTR_MMI: 5,
ATTR_QUALITY: "best",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "geonetnz_quakes",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 15.5
state = hass.states.get("geo_location.title_2")
assert state is not None
assert state.name == "Title 2"
assert state.attributes == {
ATTR_EXTERNAL_ID: "2345",
ATTR_LATITUDE: 38.1,
ATTR_LONGITUDE: -3.1,
ATTR_FRIENDLY_NAME: "Title 2",
ATTR_MAGNITUDE: 4.6,
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "geonetnz_quakes",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 20.5
state = hass.states.get("geo_location.title_3")
assert state is not None
assert state.name == "Title 3"
assert state.attributes == {
ATTR_EXTERNAL_ID: "3456",
ATTR_LATITUDE: 38.2,
ATTR_LONGITUDE: -3.2,
ATTR_FRIENDLY_NAME: "Title 3",
ATTR_LOCALITY: "Locality 3",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "geonetnz_quakes",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 25.5
# Simulate an update - two existing, one new entry, one outdated entry
mock_feed_update.return_value = "OK", [mock_entry_1, mock_entry_4, mock_entry_3]
async_fire_time_changed(hass, utcnow + DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
assert (
len(hass.states.async_entity_ids("geo_location"))
+ len(hass.states.async_entity_ids("sensor"))
== 4
)
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed_update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
assert (
len(hass.states.async_entity_ids("geo_location"))
+ len(hass.states.async_entity_ids("sensor"))
== 4
)
# Simulate an update - empty data, removes all entities
mock_feed_update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
assert (
len(hass.states.async_entity_ids("geo_location"))
+ len(hass.states.async_entity_ids("sensor"))
== 1
)
assert len(entity_registry.entities) == 1
| 5,346,408 |
def sanitize_app_name(app):
"""Sanitize the app name and build matching path"""
app = "".join(c for c in app if c.isalnum() or c in ('.', '_')).rstrip().lstrip('/')
return app
| 5,346,409 |
def send_email(ses_connection, signed_url, email_from, email_subject, email_to,
email_cc, email_rt, email_date):
"""
Function designed to send an email
:param signed_url: The Signed URL
:param email_from: The "from" email address
:param email_subject: The Subject of the email
:param email_to: The "to" email address
:param email_cc: The "cc" email address/es
:param email_rt: the "Reply-To" email addresses.
:param email_date: The date that the original email was sent.
:return: Null.
"""
# print "Setting up email Body"
email_body = "Hi,\n\nThe Blink Mobility Platform attempted to " \
"process this email (Sent: %s), however the email size " \
"(including attachments) " \
"exceeded the 10MB limit.\n\n" \
"To access the email, please click the following link " \
"(which will expire in 14 days): \n\n" \
"%s" \
"\n\n" \
"Opening Instructions:\n\n " \
"1. Download the file\n " \
"2. Open the file in your chosen mail client " \
"(e.g. Apple Mail, Microsoft Outlook, etc)\n " \
"3. The email should open displaying the " \
"email body and all attachments \n\n " \
"Thanks,\n " \
"BlinkMobile Interactive" % (email_date, signed_url)
# print emailBody
print "SENDING EMAIL -- From: %s To: %s CC: %s Subject: %s" \
% (email_from, email_to, email_cc, email_subject)
ses_connection.send_email(
source=email_from,
subject=email_subject,
body=email_body,
to_addresses=email_to,
cc_addresses=email_cc,
reply_addresses=email_rt
)
| 5,346,410 |
def get_rinex_file_version(file_path: pathlib.PosixPath) -> str:
""" Get RINEX file version for a given file path
Args:
file_path: File path.
Returns:
RINEX file version
"""
with files.open(file_path, mode="rt") as infile:
try:
version = infile.readline().split()[0]
except IndexError:
log.fatal(f"Could not find Rinex version in file {file_path}")
return version
| 5,346,411 |
def get_hm_port(identity_service, local_unit_name, local_unit_address,
host_id=None):
"""Get or create a per unit Neutron port for Octavia Health Manager.
A side effect of calling this function is that a port is created if one
does not already exist.
:param identity_service: reactive Endpoint of type ``identity-service``
:type identity_service: RelationBase class
:param local_unit_name: Name of juju unit, used to build tag name for port
:type local_unit_name: str
:param local_unit_address: DNS resolvable IP address of unit, used to
build Neutron port ``binding:host_id``
:type local_unit_address: str
:param host_id: Identifier used by SDN for binding the port
:type host_id: Option[None,str]
:returns: Port details extracted from result of call to
neutron_client.list_ports or neutron_client.create_port
:rtype: dict
:raises: api_crud.APIUnavailable, api_crud.DuplicateResource
"""
session = session_from_identity_service(identity_service)
try:
nc = init_neutron_client(session)
resp = nc.list_networks(tags='charm-octavia')
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'networks', e)
network = None
n_resp = len(resp.get('networks', []))
if n_resp == 1:
network = resp['networks'][0]
elif n_resp > 1:
raise DuplicateResource('neutron', 'networks', data=resp)
else:
ch_core.hookenv.log('No network tagged with `charm-octavia` exists, '
'deferring port setup awaiting network and port '
'(re-)creation.', level=ch_core.hookenv.WARNING)
return
health_secgrp = None
try:
resp = nc.list_security_groups(tags='charm-octavia-health')
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'security_groups', e)
n_resp = len(resp.get('security_groups', []))
if n_resp == 1:
health_secgrp = resp['security_groups'][0]
elif n_resp > 1:
raise DuplicateResource('neutron', 'security_groups', data=resp)
else:
ch_core.hookenv.log('No security group tagged with '
'`charm-octavia-health` exists, deferring '
'port setup awaiting network and port '
'(re-)creation...',
level=ch_core.hookenv.WARNING)
return
try:
resp = nc.list_ports(tags='charm-octavia-{}'
.format(local_unit_name))
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'ports', e)
port_template = {
'port': {
# avoid race with OVS agent attempting to bind port
# before it is created in the local units OVSDB
'admin_state_up': False,
'binding:host_id': host_id or socket.gethostname(),
# NOTE(fnordahl): device_owner has special meaning
# for Neutron [0], and things may break if set to
# an arbritary value. Using a value known by Neutron
# is_dvr_serviced() function [1] gets us the correct
# rules appiled to the port to allow IPv6 Router
# Advertisement packets through LP: #1813931
# 0: https://github.com/openstack/neutron/blob/
# 916347b996684c82b29570cd2962df3ea57d4b16/
# neutron/plugins/ml2/drivers/openvswitch/
# agent/ovs_dvr_neutron_agent.py#L592
# 1: https://github.com/openstack/neutron/blob/
# 50308c03c960bd6e566f328a790b8e05f5e92ead/
# neutron/common/utils.py#L200
'device_owner': (
neutron_lib.constants.DEVICE_OWNER_LOADBALANCERV2),
'security_groups': [
health_secgrp['id'],
],
'name': 'octavia-health-manager-{}-listen-port'
.format(local_unit_name),
'network_id': network['id'],
},
}
n_resp = len(resp.get('ports', []))
if n_resp == 1:
hm_port = resp['ports'][0]
# Ensure binding:host_id is up to date on a existing port
#
# In the event of a need to update it, we bring the port down to make
# sure Neutron rebuilds the port correctly.
#
# Our caller, ``setup_hm_port``, will toggle the port admin status.
if hm_port and hm_port.get(
'binding:host_id') != port_template['port']['binding:host_id']:
try:
nc.update_port(hm_port['id'], {
'port': {
'admin_state_up': False,
'binding:host_id': port_template['port'][
'binding:host_id'],
}
})
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'ports', e)
elif n_resp > 1:
raise DuplicateResource('neutron', 'ports', data=resp)
else:
# create new port
try:
resp = nc.create_port(port_template)
hm_port = resp['port']
ch_core.hookenv.log('Created port {}'.format(hm_port['id']),
ch_core.hookenv.INFO)
# unit specific tag is used by each unit to load their state
nc.add_tag('ports', hm_port['id'],
'charm-octavia-{}'
.format(local_unit_name))
# charm-wide tag is used by leader to load cluster state and build
# ``controller_ip_port_list`` configuration property
nc.add_tag('ports', hm_port['id'], 'charm-octavia')
except NEUTRON_TEMP_EXCS as e:
raise APIUnavailable('neutron', 'ports', e)
return hm_port
| 5,346,412 |
def benchmark(ctx, report=False):
"""
Run and generate benchmarks for current code
"""
import benchmarks
benchmarks.main(report=report)
| 5,346,413 |
def total_length(neurite):
"""Neurite length. For a morphology it will be a sum of all neurite lengths."""
return sum(s.length for s in neurite.iter_sections())
| 5,346,414 |
def _solarize(img, magnitude):
"""solarize"""
return ImageOps.solarize(img, magnitude)
| 5,346,415 |
def calculateCurvature(yRange, left_fit_cr):
"""
Returns the curvature of the polynomial `fit` on the y range `yRange`.
"""
return ((1 + (2 * left_fit_cr[0] * yRange * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0])
| 5,346,416 |
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
# First try an O(n) time, O(1) space solution for the common case that
# there everything is finite; fall back to O(n) space np.isfinite to
# prevent false positives from overflow in sum method.
_assert_all_finite(X.data if sparse.issparse(X) else X)
| 5,346,417 |
def find_file_in_sequence(file_root: str, file_number: int = 1) -> tuple:
"""
Returns the Nth file in an image sequence where N is file_number (-1 for first file).
Args:
file_root: image file root name.
file_number: image file number in sequence.
Returns:
tuple (filename,sequencenumber).
"""
currentfolder = azcam.utils.curdir()
for _, _, files in os.walk(currentfolder):
break
for f in files:
if f.startswith(file_root):
break
try:
if not f.startswith(file_root):
raise azcam.AzcamError("image sequence not found")
except Exception:
raise azcam.AzcamError("image sequence not found")
firstfile = azcam.utils.fix_path(os.path.join(currentfolder, f))
firstsequencenumber = firstfile[-9:-5]
firstnum = firstsequencenumber
firstsequencenumber = int(firstsequencenumber)
sequencenumber = firstsequencenumber + file_number - 1
newnum = "%04d" % sequencenumber
filename = firstfile.replace(firstnum, newnum)
return (filename, sequencenumber)
| 5,346,418 |
def testing_server_error_view(request):
"""Displays a custom internal server error (500) page"""
return render(request, '500.html', {})
| 5,346,419 |
def main_epilog() -> str:
"""
This method builds the footer for the main help screen.
"""
msg = "To get help on a specific command, see `conjur <command> -h | --help`\n\n"
msg += "To start using Conjur with your environment, you must first initialize " \
"the configuration. See `conjur init -h` for more information."
return msg
| 5,346,420 |
def apt_update(fatal=False):
"""Update local apt cache"""
cmd = ['apt-get', 'update']
_run_apt_command(cmd, fatal)
| 5,346,421 |
def sigma_M(n):
"""boson lowering operator, AKA sigma minus"""
return np.diag([np.sqrt(i) for i in range(1, n)], k=1)
| 5,346,422 |
def protected_view(context, request):
""" """
raise HTTPForbidden()
| 5,346,423 |
def windowing_is(root, *window_sys):
"""
Check for the current operating system.
:param root: A tk widget to be used as reference
:param window_sys: if any windowing system provided here is the current
windowing system `True` is returned else `False`
:return: boolean
"""
windowing = root.tk.call('tk', 'windowingsystem')
return windowing in window_sys
| 5,346,424 |
def init_columns_entries(variables):
"""
Making sure we have `columns` & `entries` to return, without effecting the original objects.
"""
columns = variables.get('columns')
if columns is None:
columns = [] # Relevant columns in proper order
if isinstance(columns, str):
columns = [columns]
else:
columns = list(columns)
entries = variables.get('entries')
if entries is None:
entries = [] # Entries of dict with relevant columns
elif isinstance(entries, dict):
entries = [entries]
else:
entries = list(entries)
return columns, entries
| 5,346,425 |
def _run_ic(dataset: str, name: str) -> Tuple[int, float, str]:
"""Run iterative compression on all datasets.
Parameters
----------
dataset : str
Dataset name.
name : str
FCL name.
Returns
-------
Tuple[int, float, str]
Solution size, time, and certificate.
"""
# Execute
time, size, certificate = solve_ic(
str(FCL_DATA_DIR / dataset / 'huffner' / (name + HUFFNER_DATA_EXT)),
timeout=EXACT_TIMEOUT,
preprocessing=2,
htime=min(0.3 * EXACT_TIMEOUT, 1)
)
# Return
return size, time, str(certificate)
| 5,346,426 |
def test_report_scaled_data(dials_data, tmpdir):
"""Test that dials.report works on scaled data."""
result = procrunner.run(
[
"dials.report",
dials_data("l_cysteine_4_sweeps_scaled") / "scaled_30.refl",
dials_data("l_cysteine_4_sweeps_scaled") / "scaled_30.expt",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("dials-report.html").check()
| 5,346,427 |
def notch(Wn, Q=10, analog=False, output="ba"):
"""
Design an analog or digital biquad notch filter with variable Q.
The notch differs from a peaking cut filter in that the gain at the
notch center frequency is 0, or -Inf dB.
Transfer function: H(s) = (s**2 + 1) / (s**2 + s/Q + 1)
Parameter
----------
Wn : float
Center frequency of the filter.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
Q : float
Quality factor of the filter. Examples:
* sqrt(2) is 1 octave wide
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'ss'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
state-space ('ss').
Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
"""
# H(s) = (s**2 + 1) / (s**2 + s/Q + 1)
b = np.array([1, 0, 1])
a = np.array([1, 1 / Q, 1])
return _transform(b, a, Wn, analog, output)
| 5,346,428 |
def dict_to_unyt(dict_obj) -> None:
"""Recursively convert values of dictionary containing units information to unyt quantities"""
for key, value in dict_obj.items():
if isinstance(value, dict):
if "array" not in value and "unit" not in value:
dict_to_unyt(value)
else:
np_array = np.array(value["array"], dtype=float)
if np_array.shape == tuple():
unyt_func = u.unyt_quantity
else:
unyt_func = u.unyt_array
dict_obj[key] = unyt_func(np_array, value["unit"])
| 5,346,429 |
def dvpool(name: str) -> None:
"""
Delete a variable from the kernel pool.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dvpool_c.html
:param name: Name of the kernel variable to be deleted.
"""
name = stypes.string_to_char_p(name)
libspice.dvpool_c(name)
| 5,346,430 |
def test_config_key_error():
"""An unknown key should throw a key error"""
c = core.Config()
with pytest.raises(KeyError):
c['doesNotExist']
| 5,346,431 |
def test_BBPSSW_psi_minus_psi_minus():
"""
psi- psi-
0b0
[0. +0.j 0.5+0.j 0.5+0.j 0. +0.j]
0b1
[0.+0.j 0.+0.j 0.+0.j 0.+0.j]
0b10
[0.+0.j 0.+0.j 0.+0.j 0.+0.j]
0b11
[ 0. +0.j -0.5+0.j -0.5+0.j 0. +0.j]
"""
counter = 0
for i in range(100):
tl, kept1, kept2, meas1, meas2, ep1, ep2 = create_scenario(psi_minus, psi_minus, i)
assert kept1.entangled_memory == {'node_id': 'a2', 'memo_id': 'kept2'}
assert kept2.entangled_memory == {'node_id': 'a1', 'memo_id': 'kept1'}
assert ep1.meas_res == ep2.meas_res
ket1 = tl.quantum_manager.get(kept1.qstate_key)
ket2 = tl.quantum_manager.get(kept2.qstate_key)
assert id(ket1) == id(ket2)
assert kept1.qstate_key in ket1.keys and kept2.qstate_key in ket1.keys
state = correct_order(ket1.state, ket1.keys)
if ep1.meas_res == 0:
counter += 1
assert complex_array_equal(phi_plus, state)
else:
assert complex_array_equal([0, -sqrt_2, -sqrt_2, 0], state)
assert abs(counter - 50) < 10
| 5,346,432 |
def Torus(radius=(1, 0.5), tile=(20, 20), device='cuda:0'):
"""
Creates a torus quad mesh
Parameters
----------
radius : (float,float) (optional)
radii of the torus (default is (1,0.5))
tile : (int,int) (optional)
the number of divisions of the cylinder (default is (20,20))
device : str or torch.device (optional)
the device the tensors will be stored to (default is 'cuda:0')
Returns
-------
(Tensor,LongTensor,Tensor)
the point set tensor, the topology tensor, the vertex normals
"""
T, P = grid2mesh(*tuple(TorusPatch(radius=radius, tile=tile, device=device)))
N = vertex_normal(P, quad2tri(T))
return P, T, N
| 5,346,433 |
def check_new_value(new_value: str, definition) -> bool:
"""
checks with definition if new value is a valid input
:param new_value: input to set as new value
:param definition: valid options for new value
:return: true if valid, false if not
"""
if type(definition) is list:
if new_value in definition:
return True
else:
return False
elif definition is bool:
if new_value == "true" or new_value == "false":
return True
else:
return False
elif definition is int:
try:
int(new_value)
return True
except ValueError:
return False
elif definition is float:
try:
float(new_value)
return True
except ValueError:
return False
elif definition is str:
return True
else:
# We could not validate the type or values so we assume it is incorrect
return False
| 5,346,434 |
def covid_API(cases_and_deaths: dict) -> dict:
"""
Imports Covid Data
:param cases_and_deaths: This obtains dictionary from config file
:return: A dictionary of covid information
"""
api = Cov19API(filters=england_only, structure=cases_and_deaths)
data = api.get_json()
return data
| 5,346,435 |
def cnn(train_x, train_y, test1_x, test1_y, test2_x, test2_y):
"""
Train and evaluate a feedforward network with two hidden layers.
"""
# Add a single "channels" dimension at the end
trn_x = train_x.reshape([-1, 30, 30, 1])
tst1_x = test1_x.reshape([-1, 30, 30, 1])
tst2_x = test2_x.reshape([-1, 30, 30, 1])
# First layer will need argument `input_shape=(30,30,1)`
model = Sequential([
# TODO: add your implementation here
Conv2D(32, kernel_size = (5, 5), strides=(1, 1), activation='relu', input_shape=(30,30,1)),
MaxPooling2D(pool_size=(2, 2), strides=(2, 2)),
Conv2D(64, kernel_size = (5, 5), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(512, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(trn_x, train_y, epochs=5)
print("Evaluating CNN on test set 1")
model.evaluate(tst1_x, test1_y)
print("Evaluating CNN on test set 2")
return model.evaluate(tst2_x, test2_y)
| 5,346,436 |
def gather_gltf2(export_settings):
"""
Gather glTF properties from the current state of blender.
:return: list of scene graphs to be added to the glTF export
"""
scenes = []
animations = [] # unfortunately animations in gltf2 are just as 'root' as scenes.
active_scene = None
for blender_scene in bpy.data.scenes:
scenes.append(__gather_scene(blender_scene, export_settings))
if export_settings[gltf2_blender_export_keys.ANIMATIONS]:
animations += __gather_animations(blender_scene, export_settings)
if bpy.context.scene.name == blender_scene.name:
active_scene = len(scenes) -1
return active_scene, scenes, animations
| 5,346,437 |
def getOneRunMountainCarFitness_modifiedReward(tup):
"""Get one fitness from the MountainCar or MountainCarContinuous
environment while modifying its reward function.
The MountainCar environments reward only success, not progress towards
success. This means that individuals that are trying to drive up the
hill, but not succeeding will get the exact same fitness as individuals
that do nothing at all. This function provides some reward to the
individual based on the maximum distance it made it up the hill.
Parameters: A tuple expected to contain the following:
0: individual - The model,
1: continuous - True if using MountainCarContinuous, false to use
MountainCar.
2: renderSpeed - None to not render, otherwise the number of seconds to
sleep between each frame; this can be a floating point
value."""
individual, continuous, renderSpeed = tup[0], tup[1], tup[2]
env = None
if continuous:
env = gym.make('MountainCarContinuous-v0')
else:
env = gym.make('MountainCar-v0')
maxFrames = 2000
runReward = 0
maxPosition = -1.2 # 1.2 is the minimum for this environment.
observation = env.reset()
individual.resetForNewTimeSeries()
for j in range(maxFrames):
# The continuous version doesn't required argmax, but it does need
# a conversion from a single value to the list that the environment
# expects:
if continuous:
action = [individual.calculateOutputs(observation)]
else:
action = np.argmax(individual.calculateOutputs(observation))
if renderSpeed is not None:
env.render()
if renderSpeed != 0:
time.sleep(renderSpeed)
observation, reward, done, info = env.step(action)
runReward += reward
# Record the furthest we made it up the hill:
maxPosition = max(observation[0], maxPosition)
if done:
break
env.close()
# Return the fitness, modified by the maxPosition attained. The position
# weighs heavier with the continuous version:
if continuous:
return runReward + (1000.0 * maxPosition)
else:
return runReward + (10.0 * maxPosition)
| 5,346,438 |
def ArtToModel(art, options):
"""Convert an Art object into a Model object.
Args:
art: geom.Art - the Art object to convert.
options: ImportOptions - specifies some choices about import
Returns:
(geom.Model, string): if there was a major problem, Model may be None.
The string will be errors and warnings.
"""
pareas = art2polyarea.ArtToPolyAreas(art, options.convert_options)
if not pareas:
return (None, "No visible faces found")
if options.scaled_side_target > 0:
pareas.scale_and_center(options.scaled_side_target)
m = model.PolyAreasToModel(pareas, options.bevel_amount,
options.bevel_pitch, options.quadrangulate)
if options.extrude_depth > 0:
model.ExtrudePolyAreasInModel(m, pareas, options.extrude_depth,
options.cap_back)
return (m, "")
| 5,346,439 |
def simulate_data(N, intercept, slope, nu, sigma2=1, seed=None):
"""Simulate noisy linear model with t-distributed residuals.
Generates `N` samples from a one-dimensional linear regression with
residuals drawn from a t-distribution with `nu` degrees of freedom, and
scaling-parameter `sigma2`. The true parameters of the linear model are
specified by the `intercept` and `slope` parameters.
Args:
N, int: Number of samples.
intercept, float: The intercept of the linear model.
slope, float: The slope of the linear model.
nu, float (>0): The degrees of freedom of the t-distribution.
sigma2, float (>0): The scale-parameter of the t-distribution.
seed, int: Set random seed for repeatability.
Return:
DataFrame containing N samples from noisy linear model.
"""
np.random.seed(seed)
# x ~ Uniform(0,1)
interval = np.linspace(0,1, num=2*N)
sample = np.random.choice(interval, size=N, replace=False)
df = pd.DataFrame({"x": sample})
# generate y values using linear model
linear_map = lambda x: intercept + slope*x
df['y'] = linear_map(df['x']) + sigma2*np.random.standard_t(nu, N)
return df
| 5,346,440 |
def _link_irods_folder_to_django(resource, istorage, foldername, exclude=()):
"""
Recursively Link irods folder and all files and sub-folders inside the folder to Django
Database after iRODS file and folder operations to get Django and iRODS in sync
:param resource: the BaseResource object representing a HydroShare resource
:param istorage: REDUNDANT: IrodsStorage object
:param foldername: the folder name, as a fully qualified path
:param exclude: UNUSED: a tuple that includes file names to be excluded from
linking under the folder;
:return: List of ResourceFile of newly linked files
"""
if __debug__:
assert(isinstance(resource, BaseResource))
if istorage is None:
istorage = resource.get_irods_storage()
res_files = []
if foldername:
store = istorage.listdir(foldername)
# add files into Django resource model
for file in store[1]:
if file not in exclude:
file_path = os.path.join(foldername, file)
# This assumes that file_path is a full path
res_files.append(link_irods_file_to_django(resource, file_path))
# recursively add sub-folders into Django resource model
for folder in store[0]:
res_files = res_files + \
_link_irods_folder_to_django(resource, istorage,
os.path.join(foldername, folder), exclude)
return res_files
| 5,346,441 |
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
"""
52ms 93.76%
13.1MB 83.1%
:param self:
:param head:
:param n:
:return:
"""
if not head:
return head
dummy = ListNode(0)
dummy.next = head
fast = dummy
while n:
fast = fast.next
n -= 1
slow = dummy
while fast and fast.next:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return dummy.next
| 5,346,442 |
def validate_template(
template: Any, allow_deprecated_identifiers: bool = False
) -> Tuple[PackageRef, str]:
"""
Return a module and type name component from something that can be interpreted as a template.
:param template:
Any object that can be interpreted as an identifier for a template.
:param allow_deprecated_identifiers:
Allow deprecated identifiers (:class:`UnresolvedTypeReference` and a period delimiter
instead of a colon between module names and entity names).
:return:
A tuple of package ID and ``Module.Name:EntityName`` (the package-scoped identifier for the
type). The special value ``'*'`` is used if either the package ID, module name, or both
should be wildcarded.
:raise ValueError:
If the object could not be interpreted as a thing referring to a template.
"""
if template == "*" or template is None:
return STAR, "*"
if allow_deprecated_identifiers:
warnings.warn(
"validate_template(..., allow_deprecated_identifiers=True) will be removed in dazl v8",
DeprecationWarning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
# noinspection PyDeprecation
from ..model.types import UnresolvedTypeReference
if isinstance(template, UnresolvedTypeReference):
template = template.name
if isinstance(template, str):
components = template.split(":")
if len(components) == 3:
# correct number of colons for a fully-qualified name
pkgid, m, e = components
return PackageRef(pkgid), f"{m}:{e}"
elif len(components) == 2:
# one colon, so assume the package ID is unspecified UNLESS the second component is a
# wildcard; then we assume the wildcard means any module name and entity name
m, e = components
if m == STAR and e != STAR and not allow_deprecated_identifiers:
# strings that look like "*:SOMETHING" are explicitly not allowed unless deprecated
# identifier support is requested; this is almost certainly an attempt to use
# periods instead of colons as a delimiter between module name and entity name
raise ValueError("string must be in the format PKG_REF:MOD:ENTITY or MOD:ENTITY")
return (STAR, f"{m}:{e}") if e != "*" else (PackageRef(m), "*")
elif len(components) == 1 and allow_deprecated_identifiers:
# no colon whatsoever
# TODO: Add a deprecation warning in the appropriate place
m, _, e = components[0].rpartition(".")
return STAR, f"{m}:{e}"
else:
raise ValueError("string must be in the format PKG_REF:MOD:ENTITY or MOD:ENTITY")
if isinstance(template, TypeConName):
return package_ref(template), package_local_name(template)
else:
raise ValueError(f"Don't know how to convert {template!r} into a template")
| 5,346,443 |
def call_status():
"""
Route received for webhook about call
"""
if 'mocean-call-uuid' in request.form:
call_uuid = request.form.get('mocean-call-uuid')
logging.info(f'### Call status received [{call_uuid}] ###')
for k, v in request.form.items():
logging.info(f'\t{k}:{v}')
if request.form.get('mocean-call-uuid') in calls \
and request.form.get('mocean-status') == 'HANGUP':
logging.debug(f'Deleting call-uuid[{call_uuid}] from calls dict')
del calls[call_uuid]
call_ended.append(call_uuid)
return Response('', status=204, mimetype='text/plain')
else:
return invalid_response()
| 5,346,444 |
def read_data(filename,**kwargs):
"""Used to read a light curve or curve object in pickle format.
Either way, it'll come out as a curve object.
Parameters
----------
filename : str
Name of the file to be read (ascii or pickle)
Returns
-------
curve : :class:`~sntd.curve` or :class:`~sntd.curveDict`
"""
return(_switch(os.path.splitext(filename)[1])(filename,**kwargs))
| 5,346,445 |
def test_invalid_unit_spec_nonalphabetic_unit_name():
"""Tests a unit specification file with a unit name 's3cond'.
"""
this_dir = get_cwd()
path = os.path.join(this_dir, "test_files", "non_alphabetic_unit_name.txt")
with pytest.raises(SyntaxError):
up = unit_parser(path)
| 5,346,446 |
def get_masters(domain):
""" """
content = request.get_json()
conf = {
'check_masters' : request.headers.get('check_masters'),
'remote_api' : request.headers.get('remote_api'),
'remote_api_key' : request.headers.get('remote_api_key')
}
masters = pdns_get_masters(
remote_api=conf['remote_api'],
remote_api_key=conf['remote_api_key'],
domain=domain
)
logging.info("masters: {}".format(masters))
return jsonify(masters)
| 5,346,447 |
def draw_all_objects():
"""For all objects, draw theirs sprites"""
window.clear()
for x_offset in (-window.width, 0, window.width):
for y_offset in (-window.height, 0, window.height):
# Remember the current state
gl.glPushMatrix()
# Move everything drawn from now on by (x_offset, y_offset, 0)
gl.glTranslatef(x_offset, y_offset, 0)
# Draw
batch.draw()
# Restore remembered state (this cancels the glTranslatef)
gl.glPopMatrix()
| 5,346,448 |
async def reddit(ctx,subreddit='wholesome'):
"""posts random reddit post with given argument"""
await ctx.send(embed=await pyrandmeme(subreddit))
| 5,346,449 |
def SplitGeneratedFileName(fname):
"""Reverse of GetGeneratedFileName()
"""
return tuple(fname.split('x',4))
| 5,346,450 |
def test_env_lag_14(request, env_complex):
"""Verify env_lag fixture adds LAGs into LagsAdmin table in complex setup.
"""
fixtures.env_lag(request, env_complex)
for _switch in env_complex.switch.values():
_switch.clearconfig()
lags = set(x['lagId'] for x in env_complex.switch[1].ui.get_table_lags())
assert lags == {3800, 3801, 3802, 3803, 3804}
lags = set(x['lagId'] for x in env_complex.switch[2].ui.get_table_lags())
assert lags == {3800, 3801, 3803, 3804}
| 5,346,451 |
def save_questions(questions, seperator="\n=========\n"):
"""
save all the question variations for future reference
"""
with open("questions.txt", "w") as f:
question_variations = [seperator.join(question) for question in questions]
output = seperator.join(question_variations)
f.write(output)
question_size = len(questions)
question_variations = sum([len(question) for question in questions])
template = "{} questions saved.\n{} total variations created."
output = template.format(question_size, question_variations)
print(output)
| 5,346,452 |
def pre_deploy_synapses(event):
"""
Создание пустого вектора нейронов.
"""
agent = event.pool.context['agent']
net = agent.context['net']
net.pre_deploy_synapses()
| 5,346,453 |
def test_folder_with_multiple_images():
"""[summary]
"""
# Empty dir
empty_dir(TEST_DIRECTORY_PATH)
# Build catalog
catalog_path, label_paths, img_paths = build_catalog(TEST_DIRECTORY_PATH)
# Get all images from folder
paths = get_all_images_from_folder(TEST_DIRECTORY_PATH)
# Assert
assert len(paths) == len(img_paths)
for img_path in img_paths:
assert img_path in paths
# Delete
delete_catalog(TEST_DIRECTORY_PATH,
catalog_path,
label_paths,
img_paths)
| 5,346,454 |
def chooseBestFeatureToSplit(dataSet):
"""
选择最优划分特征
输入: 数据集
输出: 最优特征
"""
numFeatures = len(dataSet[0])-1
baseEntropy = calcShannonEnt(dataSet) #原始数据的熵
bestInfoGain = 0
bestFfeature = -1
for i in range(numFeatures): #循环所有特征
featList = [example[i] for example in dataSet]
uniqueVals = set(featList) #某个特征的取值,如[long,short]
newEntropy = 0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value) #按某一特征的取值分类,如Long
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob*calcShannonEnt(subDataSet) #计算按该特征分类的熵,如DATASET(LONG)和DATASET(Short)的熵
infoGain = baseEntropy - newEntropy #计算增益,原始熵-Dataset(long)的熵-Dataset(short)的熵
if (infoGain>bestInfoGain):
bestInfoGain = infoGain
bestFfeature = i #选出最优分类特征
return bestFfeature
| 5,346,455 |
def evaluate_model(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader, device: torch.device):
"""Function for evaluation of a model `model` on the data in `dataloader` on device `device`"""
# Define a loss (mse loss)
mse = torch.nn.MSELoss()
# We will accumulate the mean loss in variable `loss`
loss = torch.tensor(0., device=device)
with torch.no_grad(): # We do not need gradients for evaluation
# Loop over all samples in `dataloader`
for data in tqdm(dataloader, desc="scoring", position=0):
# Get a sample and move inputs and targets to device
inputs, targets, mask = data
inputs = inputs.to(device)
targets = targets.to(device)
mask = mask.to(device)
# mask = mask.to(dtype=torch.bool)
# Get outputs for network
outputs = model(inputs) * mask
# predictions = [outputs[i, mask[i]] for i in range(len(outputs))]
# Here we could clamp the outputs to the minimum and maximum values of inputs for better performance
# Calculate mean mse loss over all samples in dataloader (accumulate mean losses in `loss`)
# losses = torch.stack([mse(prediction, target.reshape((-1,))) for prediction, target in zip(predictions, targets)])
# loss = losses.mean()
loss = mse(outputs, targets)
return loss
| 5,346,456 |
def get_DOE_quantity_byfac(DOE_xls, fac_xls, facilities='selected'):
"""
Returns total gallons of combined imports and exports
by vessel type and oil classification to/from WA marine terminals
used in our study.
DOE_xls[Path obj. or string]: Path(to Dept. of Ecology transfer dataset)
facilities [string]: 'all' or 'selected'
"""
# convert inputs to lower-case
#transfer_type = transfer_type.lower()
facilities = facilities.lower()
# Import Department of Ecology data:
print('get_DOE_quantity_byfac: not yet tested with fac_xls as input')
df = get_DOE_df(DOE_xls, fac_xls)
# get list of oils grouped by our monte_carlo oil types
oil_types = [
'akns', 'bunker', 'dilbit',
'jet', 'diesel', 'gas', 'other'
]
# names of oil groupings that we want for our output/graphics
oil_types_graphics = [
'ANS', 'Bunker-C', 'Dilbit',
'Jet Fuel', 'Diesel', 'Gasoline',
'Other'
]
oil_classification = get_DOE_oilclassification(DOE_xls)
# SELECTED FACILITIES
exports={}
imports={}
combined={}
if facilities == 'selected':
# The following list includes facilities used in Casey's origin/destination
# analysis with names matching the Dept. of Ecology (DOE) database.
# For example, the shapefile "Maxum Petroleum - Harbor Island Terminal" is
# labeled as 'Maxum (Rainer Petroleum)' in the DOE database. I use the
# Ecology language here and will need to translate to Shapefile speak
# If facilities are used in output to compare with monte-carlo transfers
# then some terminals will need to be grouped, as they are in the monte carlo.
# Terminal groupings in the voyage joins are: (1)
# 'Maxum (Rainer Petroleum)' and 'Shell Oil LP Seattle Distribution Terminal'
# are represented in
# ==>'Kinder Morgan Liquids Terminal - Harbor Island', and
# (2) 'Nustar Energy Tacoma' => 'Phillips 66 Tacoma Terminal'
facility_names = [
'Alon Asphalt Company (Paramount Petroleum)',
'Andeavor Anacortes Refinery (formerly Tesoro)',
'BP Cherry Point Refinery',
'Kinder Morgan Liquids Terminal - Harbor Island' ,
'Maxum (Rainer Petroleum)',
'Naval Air Station Whidbey Island (NASWI)',
'NAVSUP Manchester',
'Nustar Energy Tacoma',
'Phillips 66 Ferndale Refinery',
'Phillips 66 Tacoma Terminal',
'SeaPort Sound Terminal',
'Shell Oil LP Seattle Distribution Terminal',
'Shell Puget Sound Refinery',
'Tesoro Port Angeles Terminal','U.S. Oil & Refining',
'Tesoro Pasco Terminal', 'REG Grays Harbor, LLC',
'Tesoro Vancouver Terminal',
'Tidewater Snake River Terminal',
'Tidewater Vancouver Terminal',
'TLP Management Services LLC (TMS)'
]
for vessel_type in ['atb','barge','tanker']:
exports[vessel_type]={}
imports[vessel_type]={}
combined[vessel_type]={}
if vessel_type == 'barge':
print('Tallying barge quantities')
# get transfer quantities by oil type
type_description = ['TANK BARGE','TUGBOAT']
for oil in oil_types:
# exports
exports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.ReceiverTypeDescription.isin(type_description)) &
(~df.Receiver.str.contains('ITB')) &
(~df.Receiver.str.contains('ATB')) &
(df.Deliverer.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# imports
imports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.DelivererTypeDescription.isin(type_description)) &
(~df.Deliverer.str.contains('ITB')) &
(~df.Deliverer.str.contains('ATB')) &
(df.Receiver.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
elif vessel_type == 'tanker':
print('Tallying tanker quantities')
# get transfer quantities by oil type
type_description = ['TANK SHIP']
for oil in oil_types:
# exports
exports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.ReceiverTypeDescription.isin(type_description)) &
(df.Deliverer.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# imports
imports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.DelivererTypeDescription.isin(type_description)) &
(df.Receiver.isin(facility_names)) &
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
elif vessel_type == 'atb':
print('Tallying atb quantities')
# get transfer quantities by oil type
type_description = ['TANK BARGE','TUGBOAT']
for oil in oil_types:
# exports
exports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.ReceiverTypeDescription.isin(type_description)) &
(df.Receiver.str.contains('ITB') |
df.Receiver.str.contains('ATB')) &
(df.Deliverer.isin(facility_names))&
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# imports
imports[vessel_type][oil] = df.loc[
(df.TransferType == 'Cargo') &
(df.DelivererTypeDescription.isin(type_description)) &
(df.Deliverer.str.contains('ITB') |
df.Deliverer.str.contains('ATB')) &
(df.Receiver.isin(facility_names))&
(df.Product.isin(oil_classification[oil])),
['TransferQtyInGallon', 'Product']
].TransferQtyInGallon.sum()
# combine imports and exports and convert oil type names to
# those we wish to use for graphics/presentations
# The name change mostly matters for AKNS -> ANS.
for idx,oil in enumerate(oil_types):
# convert names
exports[vessel_type][oil_types_graphics[idx]] = (
exports[vessel_type][oil]
)
imports[vessel_type][oil_types_graphics[idx]] = (
imports[vessel_type][oil]
)
# remove monte-carlo names
exports[vessel_type].pop(oil)
imports[vessel_type].pop(oil)
# combine imports and exports
combined[vessel_type][oil_types_graphics[idx]] = (
imports[vessel_type][oil_types_graphics[idx]] + \
exports[vessel_type][oil_types_graphics[idx]]
)
return exports, imports, combined
| 5,346,457 |
def number_finder(page, horse):
"""Extract horse number with regex."""
if 'WinPlaceShow' in page:
return re.search('(?<=WinPlaceShow\\n).[^{}]*'.format(horse), page).group(0)
elif 'WinPlace' in page:
return re.search('(?<=WinPlace\\n).[^{}]*'.format(horse), page).group(0)
| 5,346,458 |
def ssd_300_mobilenet0_25_coco(pretrained=False, pretrained_base=True, **kwargs):
"""SSD architecture with mobilenet0.25 base networks for COCO.
Parameters
----------
pretrained : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True
Load pretrained base network, the extra layers are randomized.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
Returns
-------
HybridBlock
A SSD detection network.
"""
from ...data import COCODetection
classes = COCODetection.CLASSES
return get_ssd('mobilenet0.25', 300,
features=['relu22_fwd', 'relu26_fwd'],
filters=[256, 256, 128, 128],
sizes=[21, 45, 99, 153, 207, 261, 315],
ratios=[[1, 2, 0.5]] + [[1, 2, 0.5, 3, 1.0/3]] * 3 + [[1, 2, 0.5]] * 2,
steps=[8, 16, 32, 64, 100, 300],
classes=classes, dataset='coco', pretrained=pretrained,
pretrained_base=pretrained_base, **kwargs)
| 5,346,459 |
async def all_items(connection: asyncpg.Connection, /) -> AsyncIterator[types.Item]:
"""Returns an :class:`AsyncGenerator` of all :class:`types.Item` in the database."""
for record in await tables.Items.fetch(connection):
yield await _item(connection, record)
| 5,346,460 |
async def test_exclude_filters(hass):
"""Test exclusion filters."""
request = get_new_request("Alexa.Discovery", "Discover")
# setup test devices
hass.states.async_set("switch.test", "on", {"friendly_name": "Test switch"})
hass.states.async_set("script.deny", "off", {"friendly_name": "Blocked script"})
hass.states.async_set("cover.deny", "off", {"friendly_name": "Blocked cover"})
alexa_config = MockConfig(hass)
alexa_config.should_expose = entityfilter.generate_filter(
include_domains=[],
include_entities=[],
exclude_domains=["script"],
exclude_entities=["cover.deny"],
)
msg = await smart_home.async_handle_message(hass, alexa_config, request)
await hass.async_block_till_done()
msg = msg["event"]
assert len(msg["payload"]["endpoints"]) == 1
| 5,346,461 |
def test_get_instance_handle(participant, publisher):
"""
This test checks:
- Publisher::get_instance_handle
- Publisher::guid
"""
ih = publisher.get_instance_handle()
assert(ih is not None)
assert(ih.isDefined())
guid = participant.guid()
assert(guid is not None)
assert(ih != fastdds.c_InstanceHandle_Unknown)
assert(guid != fastdds.c_Guid_Unknown)
for i in range(0, 12):
assert(guid.guidPrefix.value[i] == ih.value[i])
| 5,346,462 |
def retry( delays=(0, 1, 1, 4, 16, 64), timeout=300, predicate=never ):
"""
Retry an operation while the failure matches a given predicate and until a given timeout
expires, waiting a given amount of time in between attempts. This function is a generator
that yields contextmanagers. See doctests below for example usage.
:param Iterable[float] delays: an interable yielding the time in seconds to wait before each
retried attempt, the last element of the iterable will be repeated.
:param float timeout: a overall timeout that should not be exceeded for all attempts together.
This is a best-effort mechanism only and it won't abort an ongoing attempt, even if the
timeout expires during that attempt.
:param Callable[[Exception],bool] predicate: a unary callable returning True if another
attempt should be made to recover from the given exception. The default value for this
parameter will prevent any retries!
:return: a generator yielding context managers, one per attempt
:rtype: Iterator
Retry for a limited amount of time:
>>> true = lambda _:True
>>> false = lambda _:False
>>> i = 0
>>> for attempt in retry( delays=[0], timeout=.1, predicate=true ):
... with attempt:
... i += 1
... raise RuntimeError('foo')
Traceback (most recent call last):
...
RuntimeError: foo
>>> i > 1
True
If timeout is 0, do exactly one attempt:
>>> i = 0
>>> for attempt in retry( timeout=0 ):
... with attempt:
... i += 1
... raise RuntimeError( 'foo' )
Traceback (most recent call last):
...
RuntimeError: foo
>>> i
1
Don't retry on success:
>>> i = 0
>>> for attempt in retry( delays=[0], timeout=.1, predicate=true ):
... with attempt:
... i += 1
>>> i
1
Don't retry on unless predicate returns True:
>>> i = 0
>>> for attempt in retry( delays=[0], timeout=.1, predicate=false):
... with attempt:
... i += 1
... raise RuntimeError( 'foo' )
Traceback (most recent call last):
...
RuntimeError: foo
>>> i
1
"""
if timeout > 0:
go = [ None ]
@contextmanager
def repeated_attempt( delay ):
try:
yield
except Exception as e:
if time.time( ) + delay < expiration and predicate( e ):
log.info( 'Got %s, trying again in %is.', e, delay )
time.sleep( delay )
else:
raise
else:
go.pop( )
delays = iter( delays )
expiration = time.time( ) + timeout
delay = next( delays )
while go:
yield repeated_attempt( delay )
delay = next( delays, delay )
else:
@contextmanager
def single_attempt( ):
yield
yield single_attempt( )
| 5,346,463 |
def get_project_by_id(client: SymphonyClient, id: str) -> Project:
"""Get project by ID
:param id: Project ID
:type id: str
:raises:
* FailedOperationException: Internal symphony error
* :class:`~psym.exceptions.EntityNotFoundError`: Project does not exist
:return: Project
:rtype: :class:`~psym.common.data_class.Project`
**Example**
.. code-block:: python
project = client.get_project_by_id(
id="12345678",
)
"""
result = ProjectDetailsQuery.execute(client, id=id)
if result is None:
raise EntityNotFoundError(entity=Entity.Project, entity_id=id)
return format_to_project(project_fragment=result)
| 5,346,464 |
def write_trans_output(k, output_fname, output_steps_fname, x, u, time, nvar):
"""
Output transient step and spectral step in a CSV file"""
# Transient
if nvar > 1:
uvars = np.split(u, nvar)
results_u = [np.linalg.norm(uvar, np.inf) for uvar in uvars]
results = [
time,
]
results[1:1] = results_u
else:
results = [time, np.linalg.norm(u, np.inf)]
fmt = ["%1.4e"]
fmt_var = ["%1.4e"] * nvar
fmt[1:1] = fmt_var
with open(output_fname, "a+", newline="") as write_obj:
np.savetxt(
write_obj,
[results],
fmt=fmt,
comments="",
delimiter=",",
)
# Spectral
if bool(output_steps_fname): # string not empty
filename = output_steps_fname + str(k) + ".csv"
if nvar > 1:
uvars = np.split(u, nvar)
uvars = [np.concatenate([[0.0], uvar, [0.0]]) for uvar in uvars]
uvars = np.array(uvars)
header = ["x"]
header_var = ["u" + str(int(k)) for k in range(nvar)]
header[1:1] = header_var
header = ",".join(header)
data = np.column_stack((np.flip(x), uvars.transpose()))
else:
u = np.concatenate([[0.0], u, [0.0]])
header = "x,u"
data = np.column_stack((np.flip(x), u))
np.savetxt(
filename, data, delimiter=",", fmt="%1.4e", header=header, comments=""
)
return None
| 5,346,465 |
def test_singular_dimensions_2d(periodic):
""" test grids with singular dimensions """
dim = np.random.randint(3, 5)
g1 = UnitGrid([dim], periodic=periodic)
g2a = UnitGrid([dim, 1], periodic=periodic)
g2b = UnitGrid([1, dim], periodic=periodic)
data = np.random.random(dim)
expected = g1.get_operator("laplace", "natural")(data)
for g in [g2a, g2b]:
res = g.get_operator("laplace", "natural")(data.reshape(g.shape))
np.testing.assert_allclose(expected.flat, res.flat)
| 5,346,466 |
def test_setext_headings_extra_16():
"""
Test case extra 16: SetExt heading containing an URI autolink
"""
# Arrange
source_markdown = """look at <http://www.google.com> for answers
---"""
expected_tokens = [
"[setext(2,1):-:3::(1,1)]",
"[text(1,1):look at :]",
"[uri-autolink(1,9):http://www.google.com]",
"[text(1,32): for answers:]",
"[end-setext::]",
]
expected_gfm = """<h2>look at <a href="http://www.google.com">http://www.google.com</a> for answers</h2>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens)
| 5,346,467 |
def flatten(l):
"""Recursively flatten a list of irregular lists.
Taken from: https://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists"""
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
| 5,346,468 |
def browser(browserWsgiAppS):
"""Fixture for testing with zope.testbrowser."""
assert icemac.addressbook.testing.CURRENT_CONNECTION is not None, \
"The `browser` fixture needs a database fixture like `address_book`."
return icemac.ab.calendar.testing.Browser(wsgi_app=browserWsgiAppS)
| 5,346,469 |
def mixture_fit(samples,
model_components,
model_covariance,
tolerance,
em_iterations,
parameter_init,
model_verbosity,
model_selection,
kde_bandwidth):
"""Fit a variational Bayesian non-parametric Gaussian mixture model to samples.
This function takes the parameters described below to initialize and then fit a
model to a provided set of data points. It returns a Scikit-learn estimator object
that can then be used to generate samples from the distribution approximated by the
model and score the log-probabilities of data points based on the returned model.
Parameters:
-----------
samples : array-like
The set of provided data points that the function's model should be fitted to.
model_components : int, defaults to rounding up (2 / 3) * the number of dimensions
The maximum number of Gaussians to be fitted to data points in each iteration.
model_covariance : {'full', 'tied', 'diag', 'spherical'}
The type of covariance parameters the model should use for the fitting process.
tolerance : float
The model's convergence threshold at which the model's fit is deemed finalized.
em_iterations : int
The maximum number of expectation maximization iterations the model should run.
parameter_init : {'kmeans', 'random'}
The method used to initialize the model's weights, the means and the covariances.
model_verbosity : {0, 1, 2}
The amount of information that the model fitting should provide during runtime.
model_selection : {'gmm', 'kde'}
The selection of the type of model that should be used for the fitting process,
i.e. either a variational Bayesian non-parametric GMM or kernel density estimation.
kde_bandwidth : float
The kernel bandwidth that should be used in the case of kernel density estimation.
Returns:
--------
model : sklearn estimator
A variational Bayesian non-parametric Gaussian mixture model fitted to samples.
Attributes:
-----------
fit(X) : Estimate a model's parameters with the expectation maximization algorithm.
sample(n_samples=1) : Generate a new set of random data points from fitted Gaussians.
score_samples(X) : Calculate the weighted log-probabilities for each data point.
"""
# Check which type of model should be used for the iterative fitting process
if model_selection == 'gmm':
# Initialize a variational Bayesian non-parametric GMM for fitting
model = BGM(n_components = model_components,
covariance_type = model_covariance,
tol = tolerance,
max_iter = em_iterations,
init_params = parameter_init,
verbose = model_verbosity,
verbose_interval = 10,
warm_start = False,
random_state = 42,
weight_concentration_prior_type = 'dirichlet_process')
if model_selection == 'kde':
model = KD(bandwidth = kde_bandwidth,
kernel = 'gaussian',
metric = 'euclidean',
algorithm = 'auto',
breadth_first = True,
atol = 0.0,
rtol = tolerance)
# Fit the previously initialized model to the provided data points
model.fit(np.asarray(samples))
return model
| 5,346,470 |
def chunks(codes, n):
"""
Breaks a list of codes into roughly equal n-sized pieces.
"""
for i in xrange(0, len(codes), n):
yield codes[i:i+n]
| 5,346,471 |
def add_project_template_files():
"""Adds the project template files to the sample project.
Raises:
IOError: An error occurred copying the project template files.
"""
firebase_feature = FEATURE_ARGS_ARRAY[0].lower()
common_ios_project_file = os.path.join(
ROOT_DIRECTORY, "common/project_template_files/project.pbxproj")
project_ios_project_file = os.path.join(
ROOT_DIRECTORY, firebase_feature, "project_files/project.pbxproj")
common_android_makefile = os.path.join(
ROOT_DIRECTORY, "common/project_template_files/Android.mk")
project_android_makefile = os.path.join(
ROOT_DIRECTORY, firebase_feature, "project_files/Android.mk")
ios_dst_dir = os.path.join(IOS_PROJECT_DIR, "HelloCpp.xcodeproj")
android_dst_dir = os.path.join(ANDROID_PROJECT_DIR, "app/jni")
try:
if os.path.isfile(project_ios_project_file):
shutil.copy(project_ios_project_file, ios_dst_dir)
else:
shutil.copy(common_ios_project_file, ios_dst_dir)
if os.path.isfile(project_android_makefile):
shutil.copy(project_android_makefile, android_dst_dir)
else:
shutil.copy(common_android_makefile, android_dst_dir)
except IOError as e:
logging.exception("IOError: [Errno %d] %s: in %s", e.errno, e.strerror,
sys._getframe().f_code.co_name)
exit()
logging.info("Added the project template files to the sample project.")
| 5,346,472 |
def print_tab_seperated(results):
"""
prints the resulting dict
@param results: dict (maps metadata_name to a a list consisting of no. correct results and no of obtained
results for each threshold )
"""
for k, v in results.items():
print(k)
for correct, obtained in v:
print('{}\t{}'.format(obtained, correct))
| 5,346,473 |
def apply_k8s_specs(specs, mode=K8S_CREATE): # pylint: disable=too-many-branches,too-many-statements
"""Run apply on the provided Kubernetes specs.
Args:
specs: A list of strings or dicts providing the YAML specs to
apply.
mode: (Optional): Mode indicates how the resources should be created.
K8S_CREATE - Use the create verb. Works with generateName
K8S_REPLACE - Issue a delete of existing resources before doing a create
K8s_CREATE_OR_REPLACE - Try to create an object; if it already exists
replace it
"""
# TODO(jlewi): How should we handle patching existing updates?
results = []
if mode not in [K8S_CREATE, K8S_CREATE_OR_REPLACE, K8S_REPLACE]:
raise ValueError(f"Unknown mode {mode}")
for s in specs:
spec = s
if not isinstance(spec, dict):
spec = yaml.load(spec)
name = spec["metadata"]["name"]
namespace = spec["metadata"]["namespace"]
kind = spec["kind"]
kind_snake = camel_to_snake(kind)
plural = spec["kind"].lower() + "s"
result = None
if not "/" in spec["apiVersion"]:
group = None
else:
group, version = spec["apiVersion"].split("/", 1)
if group is None or group.lower() == "apps":
if group is None:
api = k8s_client.CoreV1Api()
else:
api = k8s_client.AppsV1Api()
create_method_name = f"create_namespaced_{kind_snake}"
create_method_args = [namespace, spec]
replace_method_name = f"delete_namespaced_{kind_snake}"
replace_method_args = [name, namespace]
else:
api = k8s_client.CustomObjectsApi()
create_method_name = f"create_namespaced_custom_object"
create_method = getattr(api, create_method_name)
create_method_args = [group, version, namespace, plural, spec]
delete_options = k8s_client.V1DeleteOptions()
replace_method_name = f"delete_namespaced_custom_object"
replace_method_args = [group, version, namespace, plural, name, delete_options]
create_method = getattr(api, create_method_name)
replace_method = getattr(api, replace_method_name)
if mode in [K8S_CREATE, K8S_CREATE_OR_REPLACE]:
try:
result = create_method(*create_method_args)
result_namespace, result_name = _get_result_name(result)
logging.info(f"Created {kind} {result_namespace}.{result_name}")
results.append(result)
continue
except k8s_rest.ApiException as e:
# 409 is conflict indicates resource already exists
if e.status == 409 and mode == K8S_CREATE_OR_REPLACE:
pass
else:
raise
# Using replace didn't work for virtualservices so we explicitly delete
# and then issue a create
result = replace_method(*replace_method_args)
logging.info(f"Deleted {kind} {namespace}.{name}")
result = create_method(*create_method_args)
result_namespace, result_name = _get_result_name(result)
logging.info(f"Created {kind} {result_namespace}.{result_name}")
# Now recreate it
results.append(result)
return results
| 5,346,474 |
def patch_dependencies(monkeypatch):
"""
This function is called before each test.
"""
monkeypatch.setattr(SpotifyClient, "init", new_initialize)
monkeypatch.setattr(
asyncio.subprocess, "create_subprocess_exec", fake_create_subprocess_exec
)
| 5,346,475 |
def local_coherence(Q, ds=1):
""" estimate the local coherence of a spectrum
Parameters
----------
Q : numpy.array, size=(m,n), dtype=complex
array with cross-spectrum, with centered coordinate frame
ds : integer, default=1
kernel radius to describe the neighborhood
Returns
-------
M : numpy.array, size=(m,n), dtype=float
vector coherence from no to ideal, i.e.: 0...1
See Also
--------
thresh_masking
Example
-------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from ..generic.test_tools import create_sample_image_pair
>>> # create cross-spectrum with random displacement
>>> im1,im2,_,_,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> spec1,spec2 = np.fft.fft2(im1), np.fft.fft2(im2)
>>> Q = spec1 * np.conjugate(spec2)
>>> Q = normalize_spectrum(Q)
>>> Q = np.fft.fftshift(Q) # transform to centered grid
>>> C = local_coherence(Q)
>>> plt.imshow(C), cmap='OrRd'), plt.colorbar(), plt.show()
>>> plt.imshow(Q), cmap='twilight'), plt.colorbar(), plt.show()
"""
assert type(Q) == np.ndarray, ("please provide an array")
diam = 2 * ds + 1
C = np.zeros_like(Q)
(isteps, jsteps) = np.meshgrid(np.linspace(-ds, +ds, 2 * ds + 1, dtype=int), \
np.linspace(-ds, +ds, 2 * ds + 1, dtype=int))
IN = np.ones(diam ** 2, dtype=bool)
IN[diam ** 2 // 2] = False
isteps, jsteps = isteps.flatten()[IN], jsteps.flatten()[IN]
for idx, istep in enumerate(isteps):
jstep = jsteps[idx]
Q_step = np.roll(Q, (istep, jstep))
# if the spectrum is normalized, then no division is needed
C += Q * np.conj(Q_step)
C = np.abs(C) / np.sum(IN)
return C
| 5,346,476 |
def find_best_lexer(text, min_confidence=0.85):
"""
Like the built in pygments guess_lexer, except has a minimum confidence
level. If that is not met, it falls back to plain text to avoid bad
highlighting.
:returns: Lexer instance
"""
current_best_confidence = 0.0
current_best_lexer = None
for lexer in _iter_lexerclasses():
confidence = lexer.analyse_text(text)
if confidence == 1.0:
return lexer()
elif confidence > current_best_confidence:
current_best_confidence = confidence
current_best_lexer = lexer
if current_best_confidence >= min_confidence:
return current_best_lexer()
else:
return TextLexer()
| 5,346,477 |
def volta(contador, quantidade):
"""
Volta uma determinada quantidade de caracteres
:param contador: inteiro utilizado para determinar uma posição na string
:param quantidade: inteiro utilizado para determinar a nova posição na string
:type contador: int
:type quantidade: int
:return: retorna o novo contador
:rtype: int
"""
return contador - quantidade
| 5,346,478 |
def p_relop_gt(p: yacc.YaccProduction):
"""REL_OP : GREATER_THAN"""
pass
| 5,346,479 |
def keep_room(session, worker_id, room_id):
"""Try to keep a room"""
# Update room current timestamp
query = update(
Room
).values({
Room.updated: func.now(),
}).where(
and_(Room.worker == worker_id,
Room.id == room_id)
)
proxy = session.execute(query)
session.commit()
return proxy.rowcount == 1
| 5,346,480 |
def get_rounds(number: int) -> List[int]:
"""
:param number: int - current round number.
:return: list - current round and the two that follow.
"""
return list(range(number, number + 3))
| 5,346,481 |
def parse_pasinobet(url):
"""
Retourne les cotes disponibles sur pasinobet
"""
selenium_init.DRIVER["pasinobet"].get("about:blank")
selenium_init.DRIVER["pasinobet"].get(url)
match_odds_hash = {}
match = None
date_time = None
WebDriverWait(selenium_init.DRIVER["pasinobet"], 15).until(
EC.invisibility_of_element_located(
(By.CLASS_NAME, "skeleton-line")) or sportsbetting.ABORT
)
if sportsbetting.ABORT:
raise sportsbetting.AbortException
inner_html = selenium_init.DRIVER["pasinobet"].execute_script(
"return document.body.innerHTML")
soup = BeautifulSoup(inner_html, features="lxml")
date = ""
for line in soup.findAll():
if sportsbetting.ABORT:
raise sportsbetting.AbortException
if "class" in line.attrs and "category-date" in line["class"]:
date = line.text.lower()
date = date.replace("nov", "novembre")
date = date.replace("déc", "décembre")
if "class" in line.attrs and "event-title" in line["class"]:
match = " - ".join(map(lambda x: list(x.stripped_strings)[0],
line.findChildren("div", {"class": "teams-container"})))
if "class" in line.attrs and "time" in line["class"]:
try:
date_time = datetime.datetime.strptime(
date+line.text.strip(), "%A, %d %B %Y%H:%M")
except ValueError:
date_time = "undefined"
if "class" in line.attrs and "event-list" in line["class"]:
if "---" not in list(line.stripped_strings):
odds = list(map(float, line.stripped_strings))
match_odds_hash[match] = {}
match_odds_hash[match]["date"] = date_time
match_odds_hash[match]["odds"] = {"pasinobet": odds}
return match_odds_hash
| 5,346,482 |
def create_toolbutton(parent, icon=None, tip=None, triggered=None):
"""Create a QToolButton."""
button = QToolButton(parent)
if icon is not None:
button.setIcon(icon)
if tip is not None:
button.setToolTip(tip)
if triggered is not None:
button.clicked.connect(triggered)
return button
| 5,346,483 |
def for_logging(filename: str = 'logs/app.log',
filemode: str = 'w+',
level: int = logging.DEBUG,
log_format: str = '%(asctime)s :: %(levelname)-5s :: %(threadName)-24s :: %(name)-24s - %(message)s',
stdout: bool = True):
"""
Preconfigures the default python logger with the provided parameters.
:param filename: Log file name. Can include directory structure.
:param filemode: File opening mode for the logfile.
:param level: Default log level.
:param log_format: Log string format.
:param stdout: Flag to enable or disable logging to stdout
"""
os.makedirs(os.path.dirname(filename), exist_ok=True)
fileHandler = logging.FileHandler(filename, mode=filemode, encoding='UTF-8')
handlers = [fileHandler, logging.StreamHandler()] if stdout else [fileHandler]
# noinspection PyArgumentList
logging.basicConfig(handlers=handlers, format=log_format, level=level)
| 5,346,484 |
def __basic_query(model, verbose: bool = False) -> pd.DataFrame:
"""Execute and return basic query."""
stmt = select(model)
if verbose:
print(stmt)
return pd.read_sql(stmt, con=CONN, index_col="id")
| 5,346,485 |
def format_data_preprocessed(data, dtype = np.float):
"""
The input data preprocessing
data the input data frame
preprocessing whether to use features preprocessing (Default: False)
dtype the data type for ndarray (Default: np.float)
"""
train_flag = np.array(data['train_flag'])
print 'Formatting input data, size: %d' % (len(train_flag))
# outputs, nans excluded
y = data.loc[ :,'y1':'y3']
# replace nans with 0
y.fillna(0, inplace=True)
# collect only train data
ytr = np.array(y)[train_flag]
# collect only validation data
yvl = np.array(y)[~train_flag]
print 'Train data outputs collected, size: %d' % (len(ytr))
print '\n\nData before encoding\n\n%s' % data.describe()
# dropping target and synthetic columns
data.drop(['y1','y2','y3','train_flag', 'COVAR_y1_MISSING', 'COVAR_y2_MISSING', 'COVAR_y3_MISSING'], axis=1, inplace=True)
print '\n\nData after encoding\n\n%s' % data.describe()
# split into training and test
X = np.array(data).astype(dtype)
Xtr = X[train_flag]
Xvl = X[~train_flag]
#print 'Train data first: %s' % (Xtr[0])
#print 'Evaluate data first: %s' % (Xvl[0])
return Xtr, ytr, Xvl, yvl
| 5,346,486 |
def run_SVM_KNN_thread(word2vec_src):
"""
Run SVM->KNN+word embedding experiment !
This is the baseline method.
:return:None
"""
classX1 = []
classX2 = []
classX3 = []
classX4 = []
classY1 = []
classY2 = []
classY3 = []
classY4 = []
classTX1 = []
classTX2 = []
classTX3 = []
classTX4 = []
classTY1 = []
classTY2 = []
classTY3 = []
classTY4 = []
TrainingSamplesX = []
TrainingSamplesY = []
models = []
predicted_F = []
finalY = []
# Create a subplot with 1 row and 2 columns
print("# word2vec:", word2vec_src)
clf = svm.SVC(kernel="rbf", gamma=0.005)
word2vec_model = gensim.models.Word2Vec.load(word2vec_src)
data = PaperData(word2vec=word2vec_model)
train_pd = load_vec(data, data.train_data, use_pkl=False)
test_pd = load_vec(data, data.test_data, use_pkl=False)
train_X = train_pd.loc[:, "Output"].tolist()
train_Y = train_pd.loc[:, "LinkTypeId"].tolist()
test_X = test_pd.loc[:, "Output"].tolist()
test_Y = test_pd.loc[:, "LinkTypeId"].tolist()
start0 = timeit.default_timer()
clf.fit(train_X, train_Y)
stop0 = timeit.default_timer()
predicted = clf.predict(train_X)
for i in range(len(predicted)):
if predicted[i] == '1':
classX1.append(train_X[i])
classY1.append(train_Y[i])
elif predicted[i] == '2':
classX2.append(train_X[i])
classY2.append(train_Y[i])
elif predicted[i] == '3':
classX3.append(train_X[i])
classY3.append(train_Y[i])
elif predicted[i] == '4':
classX4.append(train_X[i])
classY4.append(train_Y[i])
TrainingSamplesX.append(classX1)
TrainingSamplesY.append(classY1)
TrainingSamplesX.append(classX2)
TrainingSamplesY.append(classY2)
TrainingSamplesX.append(classX3)
TrainingSamplesY.append(classY3)
TrainingSamplesX.append(classX4)
TrainingSamplesY.append(classY4)
clf2 = neighbors.KNeighborsClassifier(n_neighbors = 5)
clf3 = neighbors.KNeighborsClassifier(n_neighbors = 5)
clf4 = neighbors.KNeighborsClassifier(n_neighbors = 5)
clf5 = neighbors.KNeighborsClassifier(n_neighbors = 5)
models.append(clf2)
models.append(clf3)
models.append(clf4)
models.append(clf5)
start1 = timeit.default_timer()
for i in range((len(TrainingSamplesX))):
t = threading.Thread(target= models[i].fit, args = [TrainingSamplesX[i],TrainingSamplesY[i]])
threads.append(t)
t.start()
stop1 = timeit.default_timer()
predicted0 = clf.predict(test_X)
for i in range(len(predicted0)):
if predicted0[i] == '1':
classTX1.append(test_X[i])
classTY1.append(test_Y[i])
elif predicted0[i] == '2':
classTX2.append(test_X[i])
classTY2.append(test_Y[i])
elif predicted0[i] == '3':
classTX3.append(test_X[i])
classTY3.append(test_Y[i])
elif predicted0[i] == '4':
classTX4.append(test_X[i])
classTY4.append(test_Y[i])
predicted1 = clf2.predict(classTX1)
predicted2 = clf3.predict(classTX2)
predicted3 = clf4.predict(classTX3)
predicted4 = clf5.predict(classTX4)
finalY = np.append(classTY1, classTY2)
finalY = np.append(finalY, classTY3)
finalY = np.append(finalY, classTY4)
predicted_F = np.append(predicted1, predicted2)
predicted_F = np.append(predicted_F, predicted3)
predicted_F = np.append(predicted_F, predicted4)
print("+++++++++++++++++++Original Predcition Result+++++++++++++++++++++++++")
print(metrics.classification_report(test_Y, predicted0,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(test_Y, predicted0, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("+++++++++++++++++++2nd Layer 1st Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY1, predicted1,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY1, predicted1, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 2nd Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY2, predicted2,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY2, predicted2, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 3rd Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY3, predicted3,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY3, predicted3, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++2nd Layer 4th Prediction Model+++++++++++++++++++++++++")
print(metrics.classification_report(classTY4, predicted4,
labels=["1", "2", "3", "4"],
digits=3))
#print("print classification data")
cm=metrics.confusion_matrix(classTY4, predicted4, labels=["1", "2", "3", "4"])
print("+++++++++++++++++++combined result+++++++++++++++++++++++++")
print(metrics.classification_report(finalY, predicted_F,
labels=["1", "2", "3", "4"],
digits=3))
cm=metrics.confusion_matrix(finalY, predicted_F, labels=["1", "2", "3", "4"])
print("accuracy ", get_acc(cm))
print("1st Model training time: ", (stop0 - start0))
print("layer 2 Models training time: ", (stop1 - start1))
print("Total Model training time: ", (stop1 - start0))
| 5,346,487 |
def route_yo(oauth_client=None):
"""Sends a Yo!
We can defer sender lookup to the Yo class since it should be obtained
from the request context. Requiring an authenticated user reduces the
likelihood of accidental impersonation of senders.
Creating pseudo users is handled here. It should be limited to only
users on the app, as soon as we figure out how to do that.
"""
if 'polls' in request.user_agent.string.lower():
return route_polls_reply()
if 'status' in request.user_agent.string.lower():
return status.route_reply()
# TODO: since we weren't recording udids at signup
# record it here if provided. In the future this needs
# to be removed as it can pose a security risk.
user = g.identity.user
phone = request.json.get('phone_number')
recipients = request.json.get('to') or request.json.get('username')
if phone and not recipients:
to_user = upsert_pseudo_user(phone)
recipients = to_user.username if to_user else None
form_args = {'context': request.json.get('context') or None,
'header': request.json.get('header') or None,
'link': request.json.get('link') or None,
'location': request.json.get('location') or None,
'recipients': recipients,
'sound': request.json.get('sound'),
'yo_id': request.json.get('yo_id') or None
}
form = SendYoForm.from_json(form_args)
form.validate()
cover = request.json.get('cover')
photo = request.json.get('photo')
context_id = request.json.get('context_identifier')
reply_to = request.json.get('reply_to')
response_pair = request.json.get('response_pair')
text = request.json.get('text')
left_link = request.json.get('left_link')
right_link = request.json.get('right_link')
is_poll = request.json.get('is_poll')
region_name = request.json.get('region_name')
is_push_only = request.json.get('is_push_only')
if request.headers.get('X-APP-ID'):
app_id = request.headers.get('X-APP-ID')
sound = 'no.mp3'
else:
app_id = 'co.justyo.yoapp'
sound = form.sound.data
yo = send_yo(sender=user, recipients=form.recipients.data,
sound=sound, link=form.link.data,
location=form.location.data, header=form.header.data,
yo_id=form.yo_id.data, context=form.context.data,
cover=cover, photo=photo, context_id=context_id,
reply_to=reply_to, response_pair=response_pair,
oauth_client=oauth_client, text=text,
left_link=left_link, right_link=right_link,
is_poll=is_poll, region_name=region_name,
app_id=app_id, is_push_only=is_push_only)
contact, is_first_yo = upsert_yo_contact(yo)
#if context_id:
# mixpanel_yoapp.track(yo.recipient.user_id, 'Yo Sent', {'Type': context_id})
# Send response yo if needed.
# NOTE: By leaving this as-is groups are allowed to send
# welcome links.
if reply_to is None:
if is_first_yo and yo.recipient.welcome_link:
send_response_yo.delay(yo.yo_id, use_welcome_link=True)
elif yo.should_trigger_response():
send_response_yo.delay(yo.yo_id)
response = {'success': True, 'yo_id': yo.yo_id}
if yo.recipient:
recipient_dict = yo.recipient.get_public_dict(contact.get_name())
response.update({'recipient': recipient_dict})
if yo.not_on_yo:
response.update({'not_on_yo': yo.not_on_yo})
return make_json_response(response)
| 5,346,488 |
def get_word_idxs_1d(context, token_seq, char_start_idx, char_end_idx):
"""
0 based
:param context:
:param token_seq:
:param char_start_idx:
:param char_end_idx:
:return: 0-based token index sequence in the tokenized context.
"""
spans = get_1d_spans(context,token_seq)
idxs = []
for wordIdx, span in enumerate(spans):
if not (char_end_idx <= span[0] or char_start_idx >= span[1]):
idxs.append(wordIdx)
assert len(idxs) > 0, "{} {} {} {}".format(context, token_seq, char_start_idx, char_end_idx)
return idxs
| 5,346,489 |
def create_b64_from_private_key(private_key: X25519PrivateKey) -> bytes:
"""Create b64 ascii string from private key object"""
private_bytes = private_key_to_bytes(private_key)
b64_bytes = binascii.b2a_base64(private_bytes, newline=False)
return b64_bytes
| 5,346,490 |
def safe_makedirs(path):
"""Safely make a directory, do not fail if it already exists or is created during execution.
:type path: string
:param path: a directory to create
"""
# prechecking for existence is faster than try/except
if not exists(path):
try:
makedirs(path)
except OSError as e:
# reviewing the source for Python 2.7, this would only ever happen for the last path element anyway so no
# need to recurse - this exception means the last part of the path was already in existence.
if e.errno != errno.EEXIST:
raise
| 5,346,491 |
def auto_declare_viewsets(serializers_module, context):
""" Automatically declares classes from serializers
:param serializers_module: Passes the module to search serializer classes.
:param context: Context module to export classes, should passes locals().
"""
for serializer in serializers_module.__dict__.values():
if not inspect.isclass(serializer) \
or not issubclass(serializer, serializers.ModelSerializer):
continue
model = getattr(serializer, 'Meta').model
# Skip inconsist app, specifically when you import a Model from another app.
if model._meta.app_label != context['__package__']:
continue
viewset_name = model.__name__ + 'ViewSet'
# Do not override
if viewset_name in context:
continue
# Dynamic declare the subclass
view_set = type(
viewset_name,
(viewsets.ModelViewSet,),
dict(
queryset=model.objects.all(),
serializer_class=serializer,
filter_fields='__all__',
ordering=['-pk']
)
)
logger.debug(f'>>> Automatically declared <class \'{view_set.__name__}\'>')
context[view_set.__name__] = view_set
| 5,346,492 |
def create_fake_record(filename):
"""Create records for demo purposes."""
data_to_use = _load_json(filename)
data_acces = {
"access_right": fake_access_right(),
"embargo_date": fake_feature_date(),
}
service = Marc21RecordService()
draft = service.create(
data=data_to_use, identity=system_identity(), access=data_acces
)
record = service.publish(id_=draft.id, identity=system_identity())
return record
| 5,346,493 |
def update_file_contents(job_id: int, relative_file_path: str, contents: str) -> None:
"""
This function works only with Job Type
"""
path_to_files = get_path_to_files(Type.Job, job_id)
filepath = os.path.join(path_to_files, relative_file_path)
with open(filepath, 'w') as file_:
file_.write(contents)
handler = io.BytesIO()
with tarfile.open(fileobj=handler, mode='w:gz') as tar:
tar.add(path_to_files, arcname='.')
tar.close()
handler.seek(0)
_save_file_to_s3(handler, Type.Job, job_id)
current_file_version[Type.Job][job_id] = _get_md5sum_from_s3(Type.Job, job_id)
| 5,346,494 |
def load(filename):
"""Eval every expression from a file.
:param filename: the name of the file to load
"""
if not filename.endswith('.scm'):
filename = filename + '.scm'
inport = InPort(open(filename))
while True:
try:
x = parse(inport)
if x is eof_object: return
eval(x)
except Exception as e:
sys.print_exception(e)
| 5,346,495 |
def load_configuration():
"""
This function loads the configuration from the
config.json file and then returns it.
Returns: The configuration
"""
with open('CONFIG.json', 'r') as f:
return json.load(f)
| 5,346,496 |
def config_parse(profile_name):
"""Parse the profile entered with the command line. This profile is in the profile.cfg file.
These parameters are used to automate the processing
:param profile_name: Profile's name"""
import configparser
config = configparser.ConfigParser()
config.read(os.path.dirname(sys.argv[0]) + "\\profile.cfg")
folder_string = config.get(profile_name, "folder_names")
folder_string = [i.strip() for i in folder_string.split(",")]
cam_names = config.get(profile_name, "cam_names")
cam_names = [i.strip() for i in cam_names.split(",")]
cam_bearing = config.get(profile_name, "cam_bearing")
cam_bearing = [int(i.strip()) for i in cam_bearing.split(",")]
cam_log_count = int(config.get(profile_name, "cam_log_count"))
distance_from_center = float(config.get(profile_name, "distance_from_center"))
min_pic_distance = float(config.get(profile_name, "min_pic_distance"))
try:
cam_log_position = config.get(profile_name, "cam_log_position")
cam_log_position = [int(i.strip()) for i in cam_log_position.strip(",")]
except:
cam_log_position = list(range(len(cam_names)))
return folder_string, cam_names, cam_log_position, cam_bearing, cam_log_count, distance_from_center, min_pic_distance
| 5,346,497 |
def scrape(webpage, linkNumber, extention):
"""
scrapes the main page of a news website using request and beautiful soup and
returns the URL link to the top article as a string
Args:
webpage: a string containing the URL of the main website
linkNumber: an integer pointing to the URL of the top article from the list
of all the URL's that have been scrapped
extention: a string containing the suffix of the URL to be sent to the
function sub_soup()
returns:
headline: a string containing the 500 word summary of the scrapped article
"""
# returns the link to the top headline link
req = Request(webpage, headers={'User-Agent':'Mozilla/5.0'})
webpage = urlopen(req).read()
soup = bs.BeautifulSoup(webpage,'lxml')
link = soup.find_all('a')
if linkNumber > 0:
story = (link[linkNumber])
sub_soup = str(extention + '{}'.format(story['href']))
elif linkNumber == -1:
sub_soup = articles[0][5]
elif linkNumber == -2:
link = soup.find('a',{'class':'gs-c-promo-heading'})
sub_soup = 'https://www.bbc.co.uk{}'.format(link['href'])
headline = sub_scrape(sub_soup)
return headline
| 5,346,498 |
def Scheduler(type):
"""Instantiate the appropriate scheduler class for given type.
Args:
type (str): Identifier for batch scheduler type.
Returns:
Instance of a _BatchScheduler for given type.
"""
for cls in _BatchScheduler.__subclasses__():
if cls.is_scheduler_for(type):
return cls(type)
raise ValueError
| 5,346,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.