content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def __persistent_cache_manager(mode='r'):
"""
pds - persistent data structure
modes:
r - recover, s - save
"""
if not __PERSISTENT_CACHE:
return
global __BUZZER_CACHE
if mode == 's':
# SAVE CACHE
with open('buzzer.pds', 'w') as f:
f.write(','.join([str(k) for k in __BUZZER_CACHE]))
return
try:
# RESTORE CACHE
with open('buzzer.pds', 'r') as f:
__BUZZER_CACHE = [int(data) for data in f.read().strip().split(',')]
except:
pass
| 5,349,400 |
def default_summary_collector():
"""
Get the :class:`SummaryCollector` object at the top of context stack.
Returns:
SummaryCollector: The summary collector.
"""
return _summary_collect_stack.top()
| 5,349,401 |
def norm(*args, **kwargs):
""" See https://www.tensorflow.org/versions/master/api_docs/python/tf/norm .
"""
return tensorflow.norm(*args, **kwargs)
| 5,349,402 |
def read_fasta(input_fasta):
"""Return a list of seqeunces from a given FASTA file."""
try:
seq = []
records = []
is_fasta = False
with open_fasta(input_fasta) as fasta_fh:
for line in fasta_fh:
line = line.rstrip()
if line.startswith('>'):
is_fasta = True
if seq:
records.append(''.join(seq))
seq = []
elif records:
print("ERROR: Found FASTA record without a sequence", file=sys.stderr)
return []
elif is_fasta:
seq.append(line)
if is_fasta:
records.append(''.join(seq))
return records
except IOError as error:
raise RuntimeError("Error opening assembly.") from error
| 5,349,403 |
def get_steering_policies(compartment_id: Optional[str] = None,
display_name: Optional[str] = None,
display_name_contains: Optional[str] = None,
filters: Optional[Sequence[pulumi.InputType['GetSteeringPoliciesFilterArgs']]] = None,
health_check_monitor_id: Optional[str] = None,
id: Optional[str] = None,
state: Optional[str] = None,
template: Optional[str] = None,
time_created_greater_than_or_equal_to: Optional[str] = None,
time_created_less_than: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSteeringPoliciesResult:
"""
This data source provides the list of Steering Policies in Oracle Cloud Infrastructure DNS service.
Gets a list of all steering policies in the specified compartment.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_steering_policies = oci.dns.get_steering_policies(compartment_id=var["compartment_id"],
display_name=var["steering_policy_display_name"],
display_name_contains=var["steering_policy_display_name_contains"],
health_check_monitor_id=oci_health_checks_http_monitor["test_http_monitor"]["id"],
id=var["steering_policy_id"],
state=var["steering_policy_state"],
template=var["steering_policy_template"],
time_created_greater_than_or_equal_to=var["steering_policy_time_created_greater_than_or_equal_to"],
time_created_less_than=var["steering_policy_time_created_less_than"])
```
:param str compartment_id: The OCID of the compartment the resource belongs to.
:param str display_name: The displayName of a resource.
:param str display_name_contains: The partial displayName of a resource. Will match any resource whose name (case-insensitive) contains the provided value.
:param str health_check_monitor_id: Search by health check monitor OCID. Will match any resource whose health check monitor ID matches the provided value.
:param str id: The OCID of a resource.
:param str state: The state of a resource.
:param str template: Search by steering template type. Will match any resource whose template type matches the provided value.
:param str time_created_greater_than_or_equal_to: An [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) timestamp that states all returned resources were created on or after the indicated time.
:param str time_created_less_than: An [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) timestamp that states all returned resources were created before the indicated time.
"""
__args__ = dict()
__args__['compartmentId'] = compartment_id
__args__['displayName'] = display_name
__args__['displayNameContains'] = display_name_contains
__args__['filters'] = filters
__args__['healthCheckMonitorId'] = health_check_monitor_id
__args__['id'] = id
__args__['state'] = state
__args__['template'] = template
__args__['timeCreatedGreaterThanOrEqualTo'] = time_created_greater_than_or_equal_to
__args__['timeCreatedLessThan'] = time_created_less_than
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:dns/getSteeringPolicies:getSteeringPolicies', __args__, opts=opts, typ=GetSteeringPoliciesResult).value
return AwaitableGetSteeringPoliciesResult(
compartment_id=__ret__.compartment_id,
display_name=__ret__.display_name,
display_name_contains=__ret__.display_name_contains,
filters=__ret__.filters,
health_check_monitor_id=__ret__.health_check_monitor_id,
id=__ret__.id,
state=__ret__.state,
steering_policies=__ret__.steering_policies,
template=__ret__.template,
time_created_greater_than_or_equal_to=__ret__.time_created_greater_than_or_equal_to,
time_created_less_than=__ret__.time_created_less_than)
| 5,349,404 |
def res_input_matrix_random_sparse(idim = 1, odim = 1, density=0.1, dist = 'normal'):
"""reservoirs.res_input_matrix_random_sparse
Create a sparse reservoir input matrix. Wrapper for
create_matrix_sparse_random.
Arguments:
idim: input dimension
odim: hidden dimension
density: density
dist: distribution
Returns:
wi: input matrix
"""
# p_wi = density
# wi_ = spa.rand(odim, idim, p_wi)
# # print "sparse wi", wi_
# wi = wi_.todense()
# tmp_idx = wi != 0
# tmp = wi[tmp_idx]
# # tmp_r = np.random.normal(0, 1, size=(tmp.shape[1],))
# tmp_r = np.random.uniform(-1, 1, size=(tmp.shape[1],))
# wi[tmp_idx] = tmp_r
# # return dense repr
# return np.asarray(wi)
return create_matrix_sparse_random(odim, idim, density, dist = dist)
| 5,349,405 |
def _MakeApiMap(root_package, api_config):
"""Converts a map of api_config into ApiDef.
Args:
root_package: str, root path of where generate api will reside.
api_config: {api_name->api_version->{discovery,default,version,...}},
description of each api.
Returns:
{api_name->api_version->ApiDef()}.
Raises:
NoDefaultApiError: if for some api with multiple versions
default was not specified.
"""
apis_map = {}
apis_with_default = set()
for api_name, api_version_config in six.iteritems(api_config):
api_versions_map = apis_map.setdefault(api_name, {})
has_default = False
for api_version, api_config in six.iteritems(api_version_config):
if api_config.get('gcloud_gapic_library'):
gapic_client = _MakeGapicClientDef(root_package, api_name, api_version)
else:
gapic_client = None
default = api_config.get('default', len(api_version_config) == 1)
if has_default and default:
raise NoDefaultApiError(
'Multiple default client versions found for [{}]!'
.format(api_name))
has_default = has_default or default
enable_mtls = api_config.get('enable_mtls', True)
mtls_endpoint_override = api_config.get('mtls_endpoint_override', '')
api_versions_map[api_version] = api_def.APIDef(
_MakeApitoolsClientDef(root_package, api_name, api_version),
gapic_client,
default, enable_mtls, mtls_endpoint_override)
if has_default:
apis_with_default.add(api_name)
apis_without_default = set(apis_map.keys()).difference(apis_with_default)
if apis_without_default:
raise NoDefaultApiError('No default client versions found for [{0}]!'
.format(', '.join(sorted(apis_without_default))))
return apis_map
| 5,349,406 |
def make_election_frame(votes, shares=None, party_names=None, margin_idx=None):
"""
Constructs an election frame from at most two arrays.
If provided,
"""
if votes.ndim == 1:
votes = votes.reshape(-1,1)
if votes.shape[-1] == 1 and shares is not None:
votes, shares = votes, shares
elif votes.shape[-1] > 1 and shares is None:
if margin_idx is None:
totals = votes.sum(axis=1).reshape(-1,1)
else:
totals = votes[:,margin_idx].reshape(-1,1)
votes = np.delete(votes, margin_idx, axis=1)
shares = votes / totals
votes = totals
data = np.hstack((votes, shares))
if party_names is None:
party_names = ['Party_{}'.format(i) for i in range(data.shape[-1] - 1)]
return pd.DataFrame(data, columns=['Votes'] + list(party_names))
| 5,349,407 |
def test_cli_min(runner):
"""Test cli."""
result = runner.invoke(cli, ['-l', 'min', 'data/setup.py'])
assert result.exit_code == 0
if sys.version_info[:2] == (2, 7):
assert result.output == \
'CairoSVG==1.0.20\n' \
'click==5.0.0\n' \
'functools32==3.2.3-2\n' \
'invenio[auth,base,metadata]==3.0.0\n' \
'invenio-records==1.0.0\n' \
'ipaddr==2.1.11\n' \
'mock==1.3.0\n'
else:
assert result.output == \
'CairoSVG==1.0.20\n' \
'click==5.0.0\n' \
'invenio[auth,base,metadata]==3.0.0\n' \
'invenio-records==1.0.0\n' \
'mock==1.3.0\n'
| 5,349,408 |
def payload_from_api_post_event(event):
"""Maps an API event to the expected payload"""
# event = {
# 'timeserie1': [(1, 100), (2, 100)],
# 'timeserie2': [(3, 100), (4, 100)],
# }
body = json.loads(event['body'])
return body
| 5,349,409 |
def delete(*filenames):
"""Permanently delete files.
Delete on non-finalized/non-existent files is a no-op.
Args:
filenames: finalized file names as strings. filename should has format
"/gs/bucket/filename" or "/blobstore/blobkey".
Raises:
InvalidFileNameError: Raised when any filename is not of valid format or
not a finalized name.
IOError: Raised if any problem occurs contacting the backend system.
"""
from google.appengine.api.files import blobstore as files_blobstore
from google.appengine.api.files import gs
from google.appengine.ext import blobstore
blobkeys = []
for filename in filenames:
if not isinstance(filename, six.string_types):
raise InvalidArgumentError('Filename should be a string, but is %s(%r)' %
(filename.__class__.__name__, filename))
if filename.startswith(files_blobstore._BLOBSTORE_DIRECTORY):
__checkIsFinalizedName(filename)
blobkey = files_blobstore.get_blob_key(filename)
if blobkey:
blobkeys.append(blobkey)
elif filename.startswith(gs._GS_PREFIX):
__checkIsFinalizedName(filename)
blobkeys.append(blobstore.create_gs_key(filename))
else:
raise InvalidFileNameError('Filename should start with /%s or /%s' %
(files_blobstore._BLOBSTORE_DIRECTORY,
gs._GS_PREFIX))
try:
blobstore.delete(blobkeys)
except Exception as e:
raise IOError('Blobstore failure.', e)
| 5,349,410 |
def format_link_header(link_header_data):
"""Return a string ready to be used in a Link: header."""
links = ['<{0}>; rel="{1}"'.format(data['link'], data['rel'])
for data in link_header_data]
return ', '.join(links)
| 5,349,411 |
def parse_collection_members(object_: dict) -> dict:
"""Parse the members of a collection to make it easier
to insert in database.
:param object_: The body of the request having object members
:type object_: dict
:return: Object with parsed members
:rtype: dict
"""
members = list()
for member in object_['members']:
# example member
# {
# "@id": "/serverapi/LogEntry/aab38f9d-516a-4bb2-ae16-068c0c5345bd",
# "@type": "LogEntry"
# }
member_id = member['@id'].split('/')[-1]
member_type = member['@type']
if crud.item_exists(member_type, member_id, get_session()):
members.append({
"id_": member_id,
"@type": member_type,
})
else:
error = HydraError(code=400, title="Data is not valid")
return error_response(error)
object_['members'] = members
return object_
| 5,349,412 |
def PoissonWasserstein_S2(tau, rho, function1, function2, numerical=False):
""" Computes the Poisson bracket of two linear functionals on the space P^{OO}(S^2), of measures with a smooth
positive density function on the 2-sphere S^2, at a measure in P^{OO}(S^2).
The Poisson bracket on P^{OO}(S^2) is induced by a Poisson bivector field pi_{tau} on S^2.
Let (theta, phi) be spherical coordinates on S^2 such that
(theta, phi) |-> (sin(theta) * cos(phi), sin(theta) * sin(phi), cos(theta)).
Then pi_{tau} has the following representation,
pi_{tau} = (tau / sin(theta)) * d/d{theta} ^ d/d{phi}, ----------> (1)
for some conformal factor tau on S^2. Hence, the Poisson bracket on P^{OO}(S^2) is given by
{F_{f}, F_{h}}(mu_{rho}) = 1/(4*pi) * int_{0}^{2*pi} int_{0}^{pi} (df/d{theta} * dh/d{phi}
- dh/d{theta} * df/d{phi}) * tau * rho * sin(theta) d{theta1}d{theta2},
----------> (2)
where F_{f} and F_{h} are linear functionals on P^{OO}(S^2) induced by scalar functions f and h on S^2, and
mu_{rho} = rho * sin(theta) * |d{theta}^d{phi}| ----------> (3)
is a measure in P^{OO}(S^2).
Parameters
==========
tau: string literal expression
Represents the conformal factor tau in (1)
rho: string literal expression
Represents the density function rho in (1)
function1: string literal expression
Represents the function f in (2)
function2: string literal expression
Represents the function h in (2)
numerical: Boolean expression, optional
Indicates numerical computation. By default, numerical == False.
Returns: a symbolic expression or a tuple
=======
* A symbolic expression of the double integral in (2)
* A tuple (numerical approximation of the double integral in (2), estimated error)
"""
# Define the symbolic variables theta and phi
theta, phi = sym.symbols('theta phi')
# Convert the string literal expressions tau, rho, function1 and function2 into symbolic variables, in that order
tau = sym.sympify(tau)
rho = sym.sympify(rho)
ff = sym.sympify(function1)
hh = sym.sympify(function2)
# Compute the Poisson bracket of function1 and function2 induced by pi_{tau} in (1):
# (df/d{theta} * dh/d{phi} - dh/d{theta} * df/d{phi}) * tau
bracket_ff_hh = (sym.diff(ff, theta) * sym.diff(hh, phi) - sym.diff(hh, theta) * sym.diff(ff, phi)) * tau
# Compute the integrand of the double integral in (2)
integrand = bracket_ff_hh * rho * sym.sin(theta)
if numerical == True: # Indicate numerical computation
# Transform the symbolic variable 'integrand' into a NumPy function that allows a numerical evaluation
integrand = sym.lambdify([theta, phi], 1/(4*sym.pi) * integrand, 'numpy')
# Return a tuple: (numerical approximation of the double integral in (2), estimated error)
return dblquad(integrand, 0, 2*np.pi, lambda phi: 0, lambda phi: np.pi)
# Compute the the double integral in (2)
integrand = sym.integrate(integrand, (theta, 0, sym.pi))
integral = sym.integrate(integrand, (phi, 0, 2*sym.pi))
# Return a symbolic expression of the double integral in (2)
return 1/(4*sym.pi) * integral
| 5,349,413 |
def poly_in_gdf():
""" Fixture for a bounding box polygon. """
return make_poly_in_gdf()
| 5,349,414 |
def apply_inclusion_exclusion_criteria(
df: pd.DataFrame, col: str, criteria: List[List[str], List[str]]
) -> pd.Series:
"""Filter out files based on `criteria`, a nested list of row values to include or exclude, respectively
:param df: dataframe to filter
:type df: pd.DataFrame
:param col: column to filter
:type col: str
:param criteria: nested list containing row values to include or exclude. May be `None` or `['all']` to indicate that all values are included.
:type criteria: List[List[str]]
:return: filtered column of `df`
:rtype: pd.Series
"""
if criteria is None:
return df.loc[:, col]
# copy dataframe to be filtered
out = df.copy()
# join criteria
masks = ["|".join(c) for c in criteria]
# inclusion
if masks[0] != "all":
out = out.loc[out[col].str.contains(masks[0], na=False)]
# exclusion
out = out.loc[~out[col].str.contains(masks[1], na=False)]
return out
| 5,349,415 |
def prune_string(string):
"""Prune a string.
- Replace multiple consecutive spaces with a single space.
- Remove spaces after open brackets.
- Remove spaces before close brackets.
"""
return re.sub(
r" +(?=[\)\]\}])",
"",
re.sub(r"(?<=[\(\[\{]) +", "", re.sub(r" +", " ", string)),
)
| 5,349,416 |
def detect_face(MaybeImage):
"""
Take an image and return positional information for the largest face in it.
Args:
MaybeImage: An image grabbed from the local camera.
Returns:
Maybe tuple((bool, [int]) or (bool, str)): True and list of positional
coordinates of the largest face found. False and an error string if no
faces are found.
"""
if MaybeImage.success:
image = MaybeImage.result
else:
return MaybeImage
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') # Load face classifier
major_ver = (cv2.__version__).split('.')[0]
if int(major_ver) < 3:
flag_for_detect = cv2.cv.CV_HAAR_SCALE_IMAGE
else:
flag_for_detect = cv2.CASCADE_SCALE_IMAGE
# Detect faces in the image
# faces will be an iterable object
faces = faceCascade.detectMultiScale(
image=image,
scaleFactor=1.1,
minNeighbors=5,
minSize=(40, 40),
flags = flag_for_detect
)
try: # Assume largest face is the subject
face = faces[0] # [0] index is largest face.
return Maybe(True, face)
except IndexError:
return Maybe(False, "No faces detected. This may be due to low or uneven \
lighting.")
| 5,349,417 |
def test_wrong_relations():
"""docstring for test_wrong_relations"""
# GIVEN a individual with correct family info
sample_info = {
"sample_id": "1",
"sex": "male",
"phenotype": "affected",
"mother": "3",
"father": "2",
}
mother_info = {
"sample_id": "3",
"sex": "female",
"phenotype": "unaffected",
"mother": "0",
"father": "0",
}
father_info = {
"sample_id": "2",
"sex": "male",
"phenotype": "unaffected",
"mother": "0",
"father": "0",
}
samples = [sample_info, mother_info, father_info]
# Nothong should happend here
assert parse_individuals(samples)
# WHEN changing mother id in proband
sample_info["mother"] = "5"
# THEN a PedigreeError should be raised
with pytest.raises(PedigreeError):
parse_individuals(samples)
| 5,349,418 |
def compute_TVL1(prev, curr, bound=15):
"""Compute the TV-L1 optical flow."""
TVL1 = cv2.DualTVL1OpticalFlow_create()
flow = TVL1.calc(prev, curr, None)
assert flow.dtype == np.float32
flow = (flow + bound) * (255.0 / (2 * bound))
flow = np.round(flow).astype(int)
flow[flow >= 255] = 255
flow[flow <= 0] = 0
return flow
| 5,349,419 |
def draw_3d(xyz=None, species=None, project_directory=None, save_only=False):
"""
Draws the molecule in a "3D-balls" style.
Saves an image if a species and ``project_directory`` are provided.
Args:
xyz (str, dict, optional): The coordinates to display.
species (ARCSpecies, optional): xyz coordinates will be taken from the species.
project_directory (str): ARC's project directory to save the image in.
save_only (bool): Whether to only save an image without plotting it, ``True`` to only save.
"""
xyz = check_xyz_species_for_drawing(xyz, species)
ase_atoms = list()
for symbol, coord in zip(xyz['symbols'], xyz['coords']):
ase_atoms.append(Atom(symbol=symbol, position=coord))
ase_mol = Atoms(ase_atoms)
if not save_only:
display(view(ase_mol, viewer='x3d'))
if project_directory is not None and species is not None:
folder_name = 'rxns' if species.is_ts else 'Species'
geo_path = os.path.join(project_directory, 'output', folder_name, species.label, 'geometry')
if not os.path.exists(geo_path):
os.makedirs(geo_path)
ase_write(filename=os.path.join(geo_path, 'geometry.png'), images=ase_mol, scale=100)
| 5,349,420 |
def _un_partial_ize(func):
"""
Alter functions working on 1st arg being a callable, to descend it if it's a partial.
"""
@wraps(func)
def wrapper(fn, *args, **kw):
if isinstance(fn, (partial, partialmethod)):
fn = fn.func
return func(fn, *args, **kw)
return wrapper
| 5,349,421 |
def search_full_text(text, ipstreet_api_key):
"""sends input text to /full_text semantic search endpoint. returns json results"""
endpoint = 'https://api.ipstreet.com/v2/full_text'
headers = {'x-api-key': ipstreet_api_key}
payload = json.dumps({'raw_text': str(text),
'q': {
'start_date': '1976-01-01',
'start_date_type': 'application_date',
'end_date': '2017-03-10',
'end_date_type': 'application_date',
'applied': True,
'granted': True,
'expired': True,
'max_expected_results': 500,
'page_size': 500,
}
})
r = requests.post(endpoint, headers=headers, data=payload)
return r.json()
| 5,349,422 |
def main(filename, plotDir='Plots/'):
"""
"""
# Which pixels and sidebands?
pixelOffsets = Pointing.GetPixelOffsets('COMAP_FEEDS.dat')
# READ IN THE DATA
d = h5py.File(filename)
tod = d['spectrometer/tod']
mjd = d['spectrometer/MJD'][:]
if len(d['pointing/az'].shape) > 1:
az = d['pointing/az'][0,:]
el = d['pointing/el'][0,:]
else:
az = d['pointing/az'][:]
el = d['pointing/el'][:]
mjdpoint = d['pointing/MJD'][:]
slewDist = SlewDistance(az)
ra, dec, pa, az, el, mjd = Pointing.GetPointing(az, el, mjd,
mjdpoint, pixelOffsets,
lon=Pointing.comap_lon,
lat=Pointing.comap_lat)
# Calculate data sizes:
nHorns = tod.shape[0]
nSBs = tod.shape[1]
nFreqs = tod.shape[2]
nSamps = tod.shape[3]
# Calculate the position of Jupiter
clon, clat, diam = EphemNew.rdplan(mjd[0:1], 5,
Pointing.comap_lon*np.pi/180.,
Pointing.comap_lat*np.pi/180.)
EphemNew.precess(clon, clat, mjd[0:1])
# Loop over horns/SBs
P1out = None
prefix = filename.split('/')[-1].split('.')[0]
for iHorn in range(nHorns):
print('Processing Horn {:d}'.format(iHorn+1))
_tod = np.nanmean(np.nanmean(tod[iHorn,:,5:-5,:],axis=0),axis=0)
#Tim: Pass this function whatever chunk of time-ordered data you have in memory
P1, P1e, cross, mweight, weight, model = FitSource.FitTOD(_tod,
ra[0,:], # horn 0 because we want the relative offset from Focal Plane
dec[0,:],
clon*180./np.pi,
clat*180./np.pi,
pa[0,:],
prefix='{}_Horn{}'.format(prefix, iHorn+1),
plotDir=plotDir)
if isinstance(P1out, type(None)):
P1out = np.zeros((nHorns, len(P1)))
Peout = np.zeros((nHorns, len(P1e)))
mout = np.zeros(mweight.shape)
hout = np.zeros(weight.shape)
if not isinstance(P1, type(None)):
P1out[iHorn, :] = P1
Peout[iHorn, :] = P1e
mout += mweight*(model+1)**2
hout += weight*(model+1)**2
pyplot.imshow(mout/hout, extent=[-100/2. * 1.5, 100/2.*1.5,-100/2. * 1.5, 100/2.*1.5] )
pyplot.xlabel('Az offset (arcmin)')
pyplot.ylabel('EL offset (arcmin)')
pyplot.title('{}'.format(prefix))
pyplot.grid(True)
pyplot.savefig('{}/FeedPositions_{}.png'.format(plotDir, prefix), bbox_inches='tight')
pyplot.clf()
meanMJD = np.mean(mjd)
meanEl = np.median(el)
meanAz = np.median(az)
d.close()
print('SLEW DISTANCE', slewDist)
return P1out, Peout, mout/hout, meanMJD, meanEl, meanAz
| 5,349,423 |
def load_args_from_config(args, cfg):
"""Override the values of arguments from a config file
Args:
args: ArgumentParser object
cfg: a dictionary with arguments to be overridden
"""
for key in cfg:
if key in args:
type_of_key = type(args.__getattribute__(key))
args.__setattr__(key, type_of_key(cfg[key]))
| 5,349,424 |
def add_rows(df, row_list=[], column_list=[], append=False):
"""
add a list of rows by index number for a wide form dataframe
"""
df = df.filter(items=row_list, axis=0)
df = pd.DataFrame(df.sum()).T
return df
| 5,349,425 |
def coerce_data_type_value(context, presentation, data_type, entry_schema, constraints, value, # pylint: disable=unused-argument
aspect):
"""
Handles the ``_coerce_data()`` hook for complex data types.
There are two kinds of handling:
1. If we have a primitive type as our great ancestor, then we do primitive type coersion, and
just check for constraints.
2. Otherwise, for normal complex data types we return the assigned property values while making
sure they are defined in our type. The property definition's default value, if available,
will be used if we did not assign it. We also make sure that required definitions indeed end
up with a value.
"""
primitive_type = data_type._get_primitive_ancestor(context)
if primitive_type is not None:
# Must be coercible to primitive ancestor
value = coerce_to_primitive(context, presentation, primitive_type, constraints, value,
aspect)
else:
definitions = data_type._get_properties(context)
if isinstance(value, dict):
temp = OrderedDict()
# Fill in our values, but make sure they are defined
for name, v in value.iteritems():
if name in definitions:
definition = definitions[name]
definition_type = definition._get_type(context)
definition_entry_schema = definition.entry_schema
definition_constraints = definition._get_constraints(context)
temp[name] = coerce_value(context, presentation, definition_type,
definition_entry_schema, definition_constraints, v,
aspect)
else:
context.validation.report(
'assignment to undefined property "%s" in type "%s" in "%s"'
% (name, data_type._fullname, presentation._fullname),
locator=get_locator(v, value, presentation), level=Issue.BETWEEN_TYPES)
# Fill in defaults from the definitions, and check if required definitions have not been
# assigned
for name, definition in definitions.iteritems():
if (temp.get(name) is None) and hasattr(definition, 'default') \
and (definition.default is not None):
definition_type = definition._get_type(context)
definition_entry_schema = definition.entry_schema
definition_constraints = definition._get_constraints(context)
temp[name] = coerce_value(context, presentation, definition_type,
definition_entry_schema, definition_constraints,
definition.default, 'default')
if getattr(definition, 'required', False) and (temp.get(name) is None):
context.validation.report(
'required property "%s" in type "%s" is not assigned a value in "%s"'
% (name, data_type._fullname, presentation._fullname),
locator=presentation._get_child_locator('definitions'),
level=Issue.BETWEEN_TYPES)
value = temp
elif value is not None:
context.validation.report('value of type "%s" is not a dict in "%s"'
% (data_type._fullname, presentation._fullname),
locator=get_locator(value, presentation),
level=Issue.BETWEEN_TYPES)
value = None
return value
| 5,349,426 |
def Inst2Vec(
bytecode: str, vocab: vocabulary.VocabularyZipFile, embedding
) -> np.ndarray:
"""Transform an LLVM bytecode to an array of embeddings.
Args:
bytecode: The input bytecode.
vocab: The vocabulary.
embedding: The embedding.
Returns:
An array of embeddings.
"""
embed = lambda x: EmbedEncoded(x, embedding)
encode = lambda x: EncodeLlvmBytecode(x, vocab)
return embed(encode(PreprocessLlvmBytecode(bytecode)))
| 5,349,427 |
def extract_from_code(code, gettext_functions):
"""Extract strings from Python bytecode.
>>> from genshi.template.eval import Expression
>>> expr = Expression('_("Hello")')
>>> list(extract_from_code(expr, GETTEXT_FUNCTIONS))
[('_', u'Hello')]
>>> expr = Expression('ngettext("You have %(num)s item", '
... '"You have %(num)s items", num)')
>>> list(extract_from_code(expr, GETTEXT_FUNCTIONS))
[('ngettext', (u'You have %(num)s item', u'You have %(num)s items', None))]
:param code: the `Code` object
:type code: `genshi.template.eval.Code`
:param gettext_functions: a sequence of function names
:since: version 0.5
"""
def _walk(node):
if isinstance(node, _ast.Call) and isinstance(node.func, _ast.Name) \
and node.func.id in gettext_functions:
strings = []
def _add(arg):
if isinstance(arg, _ast_Str) \
and isinstance(_ast_Str_value(arg), unicode):
strings.append(_ast_Str_value(arg))
elif isinstance(arg, _ast_Str):
strings.append(unicode(_ast_Str_value(arg), 'utf-8'))
elif arg:
strings.append(None)
[_add(arg) for arg in node.args]
if hasattr(node, 'starargs'):
_add(node.starargs)
if hasattr(node, 'kwargs'):
_add(node.kwargs)
if len(strings) == 1:
strings = strings[0]
else:
strings = tuple(strings)
yield node.func.id, strings
elif node._fields:
children = []
for field in node._fields:
child = getattr(node, field, None)
if isinstance(child, list):
for elem in child:
children.append(elem)
elif isinstance(child, _ast.AST):
children.append(child)
for child in children:
for funcname, strings in _walk(child):
yield funcname, strings
return _walk(code.ast)
| 5,349,428 |
def sample_raw_locations(stacking_program, address_suffix=""):
"""
Samples the (raw) horizontal location of blocks in the stacking program.
p(raw_locations | stacking_program)
Args
stacking_program [num_blocks]
Returns [num_blocks]
"""
device = stacking_program[0].device
dist = pyro.distributions.Independent(
pyro.distributions.Normal(torch.zeros((len(stacking_program),), device=device), 1),
reinterpreted_batch_ndims=1,
)
return pyro.sample(f"raw_locations{address_suffix}", dist)
| 5,349,429 |
def get_raw_data(params, data_type=1):
"""Method to filter which report user wants."""
# class="table table-bordered"
data = None
raw_data = []
td_zeros = '<td>0</td>' * 12
tblanks = ['M', 'F'] * 6
blanks = ['0'] * 13
csvh = ['0 - 5 yrs', '', '6 - 10 yrs', '', '11 - 15 yrs', '',
'16 - 17 yrs', '', '18+ yrs', '', 'Sub-Total', '', '']
try:
report_type = int(params['report_id'])
if report_type == 6:
data, raw_data = get_ovc_values(params)
elif report_type == 5:
data, raw_data = get_raw_values(params)
elif report_type == 4:
# Other values
otherd, vls_ids = {}, {}
idata, idatas = {}, {}
otherd[1] = 'ALL OTHER DISEASES'
otherd[2] = 'FIRST ATTENDANCES'
otherd[3] = 'RE-ATTENDANCES'
otherd[4] = 'REFERRALS IN'
otherd[5] = 'REFERRALS OUT'
vals = get_dict(field_name=['new_condition_id'])
for vls in vals:
vls_ids[vls] = vls
if 'NCOD' in vals:
del vals['NCOD']
r_title = "{period_name}ly Health Report {unit_type}"
dt = '<table class="table table-bordered"><thead>'
dt += "<tr><th colspan='16'>%s" % (r_title)
dt += '</th></tr>'
dt += "<tr><th colspan='16'>{cci_si_name}</th></tr>"
dt += case_load_header(report_type=3)
dt += "</thead>"
# Fetch raw data
rdatas = get_institution_data(params, report_type)
rdata = get_totals(rdatas['data'], vls_ids)
if rdata:
idata = write_row(rdata)
idatas = write_row(rdata, is_raw=True)
# Get totals
itotals = col_totals(rdata)
itotal = write_row([itotals])
# Show the values
total_h = 0
diss_vals = {}
hel_head = ['', 'Health Report'] + [''] * 13
hel_title = ['', 'List of Diseases'] + tblanks + ['Total']
if rdata:
raw_data.append(hel_head)
raw_data.append(['', ''] + csvh)
raw_data.append(hel_title)
cnt = 1
other_items = {1: 'NCOD'}
for val in vals:
val_name = vals[val]
val_data = diss_vals[val] if val in diss_vals else 0
total_h += val_data
dt += '<tr><td>%s</td><td>%s</td>' % (str(cnt) + '.', val_name)
if val in idata:
dt += '%s' % (idata[val])
else:
dt += '<td></td>%s<td>0</td></tr>' % (td_zeros)
if val in idatas:
rd = idatas[val]
del rd[0:2]
else:
rd = blanks
if rdata:
raw_data.append([str(cnt) + '.', val_name] + rd)
cnt += 1
for oval in otherd:
oval_name = otherd[oval]
sval = other_items[oval] if oval in other_items else oval
dt += '<tr><td>%s</td><td>%s</td>' % (str(cnt) + '.', oval_name)
if sval in idata:
dt += '%s' % (idata[sval])
else:
dt += '<td></td>%s<td>0</td></tr>' % (td_zeros)
if sval in idatas:
rd = idatas[sval]
del rd[0:2]
else:
rd = blanks
if rdata:
raw_data.append([str(cnt) + '.', oval_name] + rd)
cnt += 1
if rdata:
del itotals[1]
raw_data.append([''] + itotals)
dt += '<tr><td></td><td>Total</td>'
dt += '%s' % (itotal['TOTAL'])
dt += '<table>'
data = dt
elif report_type == 1:
# KNBS List
ids = {}
ids['CSAB'] = 'Child offender'
ids['CCIP'] = 'Children on the streets'
ids['CSIC'] = 'Neglect'
ids['CIDC'] = 'Orphaned Child'
ids['CCIP'] = 'Children on the streets'
ids['CDIS'] = 'Abandoned'
ids['CHCP'] = 'Lost and found children'
ids['CSDS'] = 'Drug and Substance Abuse'
ids['CSNG'] = 'Physical abuse/violence'
ids['CDSA'] = 'Abduction'
ids['CCDF'] = 'Defilement'
ids['CTRF'] = 'Child Labour'
# Query just like case load
all_datas = get_data(params)
all_data = all_datas['data']
knb_ids, rdata, rdatas = {}, {}, {}
# Just send ids as ids for easier rendering later
# Have to get all ids else errors
case_categories = get_categories()
for knb_id in ids:
knb_ids[knb_id] = knb_id
data = get_totals(all_data, case_categories)
if data:
rdata = write_row(data)
rdatas = write_row(data, is_raw=True)
rtotals = col_totals(data)
rtotal = write_row([rtotals])
rtitle = 'KNBS REPORT %s %s' % (
params['month'], params['year'])
# Just add title whether there is data or not
knb_head = ['', rtitle.upper()] + [''] * 13
knb_title = ['', 'Case Category'] + tblanks + ['Total']
if data:
raw_data.append(knb_head)
raw_data.append(['', ''] + csvh)
raw_data.append(knb_title)
dt = '<table class="table table-bordered"><thead>'
dt += '<tr><th colspan="16">%s</th></tr>' % (rtitle.upper())
dt += case_load_header(report_type=4)
dt += "</thead>"
knbcnt = 1
if data:
for val in ids:
val_name = ids[val]
dt += '<tr><td>%s</td><td>%s</td>' % (knbcnt, val_name)
if val in rdata:
dt += '%s' % (rdata[val])
else:
dt += '<td></td>%s<td>0</td></tr>' % (td_zeros)
if val in rdatas:
rd = rdatas[val]
del rd[0:2]
else:
rd = blanks
raw_data.append([knbcnt, val_name] + rd)
knbcnt += 1
raw_data.append(rtotals)
dt += '<tr><td colspan="2"><b>Total</b></td>'
dt += '%s' % (rtotal['TOTAL'])
dt += '<table>'
data = dt
elif report_type == 3:
discs = {'AEES': 'AEES'}
dvals = {2: 'TANA', 4: 'TARR', 5: 'TRIN', 6: 'TARE'}
rvals = {4: 'TARR', 5: 'TRIN', 6: 'TARE'}
evals = {8: 'AEES', 9: 'AEAB', 10: 'TDER',
11: 'TDTR', 12: 'TDEX', 13: 'DTSI', 15: 'AEDE'}
svals = {8: 'AEES', 9: 'AEAB', 10: 'TDER',
11: 'TDTR', 12: 'TDEX', 13: 'DTSI', 14: '14'}
death_vals = {15: 'AEDE'}
# Get all types of discharges
discharges = get_dict(field_name=['discharge_type_id'])
for disc in discharges:
discs[disc] = disc
# This is it
pdatas = get_institution_data(params, report_type)
devals = {}
for dval in dvals:
deq = dvals[dval]
devals[deq] = deq
pdata = get_totals(pdatas['data'], devals)
odata = get_totals(pdatas['odata'], devals)
ddata = get_totals(pdatas['ddata'], discs)
edata = get_totals(pdatas['death'], death_vals)
ids = {2: 'New Admissions',
3: 'Returnees',
4: ' - Relapse',
5: ' - Transfer in',
6: ' - Return after escape',
7: 'Exits',
8: ' - Escapee',
9: ' - Abducted',
10: ' - Early Release',
11: ' - Released on License',
12: ' - Released on Expiry of Order',
13: ' - Transfer to another Institution',
14: ' - Other exits',
15: 'Death'}
r_title = "{cci_si_title} {period_name}ly Returns {unit_type}"
dt = '<table class="table table-bordered"><thead>'
dt += "<tr><th colspan='16'>%s" % (r_title)
dt += "</th></tr><tr><th colspan='16'>{cci_si_name}</th></tr>"
dt += case_load_header(report_type=2)
dt += "</thead>"
# This is it
popdata, popdatas = {}, {}
opdata, opdatas = {}, {}
dopdata, dopdatas = {}, {}
depdata, depdatas = {}, {}
osdata = []
if pdata:
all_returnees = get_others(pdata, rvals, 3, True)
pdata.append(all_returnees)
popdata = write_row(pdata)
popdatas = write_row(pdata, is_raw=True)
# Old data
if odata:
p1 = col_totals(odata)
osdata.append(p1)
opdata = write_row(osdata)
opdatas = write_row(osdata, is_raw=True)
# Discharge data
if ddata:
all_other = get_others(ddata, evals, 14)
ddata.append(all_other)
all_exits = get_others(ddata, svals, 7, True)
ddata.append(all_exits)
dopdata = write_row(ddata)
dopdatas = write_row(ddata, is_raw=True)
# Deaths as a type of discharge
if edata:
depdata = write_row(edata)
depdatas = write_row(edata, is_raw=True)
# Just add title whether there is data or not
pop_head = ['Institution Population'] + ['-'] * 14
pop_title = ['Category', 'Sub-category'] + tblanks + ['Total']
all_var = merge_two_dicts(popdata, dopdata)
all_rvar = merge_two_dicts(popdatas, dopdatas)
all_vars = merge_two_dicts(all_var, depdata)
all_rvars = merge_two_dicts(all_rvar, depdatas)
if pdata:
raw_data.append(pop_head)
raw_data.append(['', ''] + csvh)
raw_data.append(pop_title)
si_total = 0
# All totals
final_totals = get_final_totals(osdata, pdata, ddata)
ptotal = write_row([final_totals])
ptotal_raw = write_row([final_totals], is_raw=True)
td_pad = '</td><td>'
s_text = '<b>Total Children by End of Previous {period_name}</b>'
dt += '<tr><td colspan="3">%s' % (s_text)
if opdata:
o_data = opdata['TOTAL'].replace('<td></td>', '')
dt += o_data
raw_ol = opdatas['TOTAL']
del raw_ol[0:2]
raw_data.append(['From previous period', ''] + raw_ol)
else:
dt += '</td>%s<td>0</td></tr>' % (td_zeros)
ftotal = ptotal['TOTAL'].replace('<td></td>', '')
for val in ids:
vname = ids[val]
v_name = vname.replace(' - ', td_pad)
r_name = vname.replace(' - ', '')
val_name = v_name + td_pad if '<td>' not in v_name else v_name
vraw = [r_name, ''] if '<td>' not in v_name else ['', r_name]
val_data = 0
if val in dvals:
vd = dvals[val]
elif val in evals:
vd = evals[val]
else:
vd, val_data = str(val), 0
dt += '<tr><td width="1px"></td><td>%s</td>' % (val_name)
if vd in all_rvars:
rd = all_rvars[vd]
del rd[0:2]
else:
rd = blanks
if all_vars:
raw_data.append(vraw + rd)
if vd in all_vars:
my_val = all_vars[vd].replace('<td></td>', '')
dt += '%s' % (my_val)
else:
dt += '%s<td>0</td></tr>' % (td_zeros)
si_total += val_data
t_text = '<b>Total Children by End of Reporting {period_name}</b>'
dt += '<tr><td colspan="3">%s</td>' % (t_text)
dt += '%s' % (ftotal)
dt += '</table>'
if all_vars:
raw_data.append(ptotal_raw['TOTAL'])
data = dt
except Exception as e:
print('Error with raw data - %s' % (str(e)))
raise e
else:
return data, raw_data
| 5,349,430 |
def get_existing_cert(server, req_id, username, password, encoding='b64'):
"""
Gets a certificate that has already been created.
Args:
server: The FQDN to a server running the Certification Authority
Web Enrollment role (must be listening on https)
req_id: The request ID to retrieve
username: The username for authentication
pasword: The password for authentication
encoding: The desired encoding for the returned certificate.
Possible values are "bin" for binary and "b64" for Base64 (PEM)
Returns:
The issued certificate
Raises:
CouldNotRetrieveCertificateException: If something went wrong while fetching the cert
"""
headers = {
# We need certsrv to think we are a browser, or otherwise the Content-Type will be wrong
'User-Agent': 'Mozilla/5.0 certsrv (https://github.com/magnuswatn/certsrv)',
'Authorization':'Basic %s' % urllib2.base64.b64encode('%s:%s' % (username, password))
}
cert_url = 'https://%s/certsrv/certnew.cer?ReqID=%s&Enc=%s' % (server, req_id, encoding)
cert_req = urllib2.Request(cert_url, headers=headers)
response = urllib2.urlopen(cert_req)
response_content = response.read()
if response.headers.type != 'application/pkix-cert':
# The response was not a cert. Something must have gone wrong
try:
error = re.search('Disposition message:[^\t]+\t\t([^\r\n]+)', response_content).group(1)
except AttributeError:
error = 'An unknown error occured'
raise CouldNotRetrieveCertificateException(error, response_content)
else:
return response_content
| 5,349,431 |
def test_show_config():
"""
Arrange/Act: Run the `show_config` command.
Assert: The output includes the correct headding `COLUMN_NAMES`".
"""
runner: CliRunner = CliRunner()
result: Result = runner.invoke(cli.cli, ["show-config"])
assert result.exit_code == 0
assert "COLUMN_NAMES" in result.output.strip(), "COLUMN_NAMES should be in output."
| 5,349,432 |
def processCall(*aPositionalArgs, **dKeywordArgs):
"""
Wrapper around subprocess.call to deal with its absense in older
python versions.
Returns process exit code (see subprocess.poll).
"""
assert dKeywordArgs.get('stdout') == None;
assert dKeywordArgs.get('stderr') == None;
_processFixPythonInterpreter(aPositionalArgs, dKeywordArgs);
oProcess = subprocess.Popen(*aPositionalArgs, **dKeywordArgs);
return oProcess.wait();
| 5,349,433 |
def test_get_section_keys_invalid_section():
"""
gets all available keys in given section which is unavailable
of specified config store. it should raise an error.
"""
with pytest.raises(ConfigurationStoreSectionNotFoundError):
config_services.get_section_keys('application', 'missing_section')
| 5,349,434 |
def as_sparse_variable(x, name=None):
"""
Wrapper around SparseVariable constructor.
@param x: A sparse matrix. as_sparse_variable reads dtype and format
properties out of this sparse matrix.
@return: SparseVariable version of sp.
@todo Verify that sp is sufficiently sparse, and raise a warning if it is not
"""
if isinstance(x, gof.Apply):
if len(x.outputs) != 1:
raise ValueError("It is ambiguous which output of a multi-output Op has to be fetched.", x)
else:
x = x.outputs[0]
if isinstance(x, gof.Variable):
if not isinstance(x.type, SparseType):
raise TypeError("Variable type field must be a SparseType.", x, x.type)
return x
try:
return constant(x, name=name)
except TypeError:
raise TypeError("Cannot convert %s to SparseType" % x, type(x))
| 5,349,435 |
def _get_go2parents(go2parents, goid, goterm):
"""Add the parent GO IDs for one GO term and their parents."""
if goid in go2parents:
return go2parents[goid]
parent_goids = set()
for parent_goterm in goterm.parents:
parent_goid = parent_goterm.id
parent_goids.add(parent_goid)
parent_goids |= _get_go2parents(go2parents, parent_goid, parent_goterm)
go2parents[goid] = parent_goids
return parent_goids
| 5,349,436 |
def _axhspan() -> None:
"""水平方向に背景色を変更する
"""
x, y = _create_data()
pos_begin = (max(y) - min(y)) * 0.3 + min(y)
pos_end = (max(y) - min(y)) * 0.7 + min(y)
_ = plt.figure()
plt.plot(x, y)
plt.axhspan(pos_begin, pos_end, color="green", alpha=0.3)
plt.savefig("data/axhspan.png")
_clear_plot()
| 5,349,437 |
def test_span_confidence_score_extension_added(he_vocab):
"""
Check that confidence_score extension is available
"""
new_head = NERHead(nlp=None, name="test")
mock_doc = Doc(he_vocab, words=["שלום", "כיתה", "אלף"])
assert Span.has_extension("confidence_score")
| 5,349,438 |
def compute_conditional_statistics(x_test, x, kernel, ind):
"""
This version uses cho_factor and cho_solve - much more efficient when using JAX
Predicts marginal states at new time points. (new time points should be sorted)
Calculates the conditional density:
p(xₙ|u₋, u₊) = 𝓝(Pₙ @ [u₋, u₊], Tₙ)
:param x_test: time points to generate observations for [N]
:param x: inducing state input locations [M]
:param kernel: prior object providing access to state transition functions
:param ind: an array containing the index of the inducing state to the left of every input [N]
:return: parameters for the conditional mean and covariance
P: [N, D, 2*D]
T: [N, D, D]
"""
dt_fwd = x_test[..., 0] - x[ind, 0]
dt_back = x[ind + 1, 0] - x_test[..., 0]
A_fwd = kernel.state_transition(dt_fwd)
A_back = kernel.state_transition(dt_back)
Pinf = kernel.stationary_covariance()
Q_fwd = Pinf - A_fwd @ Pinf @ A_fwd.T
Q_back = Pinf - A_back @ Pinf @ A_back.T
A_back_Q_fwd = A_back @ Q_fwd
Q_mp = Q_back + A_back @ A_back_Q_fwd.T
jitter = 1e-8 * np.eye(Q_mp.shape[0])
chol_Q_mp = cho_factor(Q_mp + jitter, lower=True)
Q_mp_inv_A_back = cho_solve(chol_Q_mp, A_back) # V = Q₋₊⁻¹ Aₜ₊
# The conditional_covariance T = Q₋ₜ - Q₋ₜAₜ₊ᵀQ₋₊⁻¹Aₜ₊Q₋ₜ == Q₋ₜ - Q₋ₜᵀAₜ₊ᵀL⁻ᵀL⁻¹Aₜ₊Q₋ₜ
T = Q_fwd - A_back_Q_fwd.T @ Q_mp_inv_A_back @ Q_fwd
# W = Q₋ₜAₜ₊ᵀQ₋₊⁻¹
W = Q_fwd @ Q_mp_inv_A_back.T
P = np.concatenate([A_fwd - W @ A_back @ A_fwd, W], axis=-1)
return P, T
| 5,349,439 |
def int_to_bytes(value: int) -> bytes:
"""
Encode an integer to an array of bytes.
:param value: any integer
:return: integer value representation as bytes
"""
return value.to_bytes(length=BYTES_LENGTH, byteorder=BYTES_ORDER)
| 5,349,440 |
def check_invalid_args_sequential(config):
"""Sanity check for some command-line arguments specific to training on
the copy task.
Args:
config (argparse.Namespace): Parsed command-line arguments.
"""
if config.first_task_input_len <= 0:
raise ValueError('"first_task_input_len" must be a strictly positive '+
'integer.')
if config.input_len_step < 0:
raise ValueError('"input_len_step" must be a positive integer.')
if config.input_len_variability < 0:
raise ValueError('"input_len_variability" must be a positive integer.')
if config.seq_width <= 0:
raise ValueError('"seq_width" must be a strictly positive integer.')
if config.pat_len!=-1. and config.pat_len < 0:
raise ValueError('"pat_len" must be a positive integer.')
if config.permute_width or config.permute_time or config.scatter_pattern:
# Note, these are design choices that we made for this set of tasks.
# The code should not break if you deviate from these conditions.
if config.input_len_variability != 0:
warnings.warn('For permuted or scatter tasks, the lengths of the ' +
'sequences has to be identical. "input_len_variability" will '+
'automatically be set to zero.')
config.input_len_variability = 0
if config.input_len_step != 0:
warnings.warn('For permuted or scatter tasks, the lengths of the ' +
'sequences in different tasks has to be identical. '+
'"input_len_step" will automatically be set to zero.')
config.input_len_step = 0
if not (config.permute_width or config.permute_time) and \
hasattr(config, 'permute_xor') and config.permute_xor:
raise ValueError('Option "permute_xor" only applicable if ' +
'permutations are used.')
if config.scatter_pattern and config.pat_len == -1:
raise ValueError('"scatter_pattern" is not compatible with "pat_len' +
'==-1". Please provide a new "pat_len" to specify the length '+
'of the output patterns.')
if config.permute_xor_iter == 1 and config.permute_xor_separate:
warnings.warn('Option "permute_xor_separate" doesn\'t have an effect ' +
'if "permute_xor_iter" is not greater than 1.')
if config.random_pad and config.pat_len==-1.:
warnings.warn('The option "random_pad" has no effect if "pat_len" '+
'is equal to -1.')
if config.pad_after_stop and config.pat_len==-1.:
warnings.warn('The option "pad_after_stop" is not compatible with ' +
'"pat_len" equal to -1.')
if config.pairwise_permute:
if not (config.permute_time or config.permute_xor or \
config.permute_width):
raise ValueError('Option "pairwise_permute" only applicable if ' +
'permutations are used.')
if config.num_tasks > 1:
warnings.warn('The option "pairwise_permute" leads to ' +
'permutations that are identical for different tasks.')
if config.revert_output_seq:
if config.num_tasks > 1:
warnings.warn('The option "revert_output_seq" leads to ' +
'permutations that are identical for different tasks.')
if config.permute_time or config.permute_width:
raise ValueError('Option "revert_output_seq" is not compatible ' +
'with permutations.')
if config.input_len_variability != 0:
raise NotImplementedError('The current implementation only ' +
'supports reverting outputs when all sequences have the ' +
'same length.')
| 5,349,441 |
def generate_1d_trajectory_distribution(
n_demos, n_steps, initial_offset_range=3.0, final_offset_range=0.1,
noise_per_step_range=20.0, random_state=np.random.RandomState(0)):
"""Generates toy data for testing and demonstration.
Parameters
----------
n_demos : int
Number of demonstrations
n_steps : int
Number of steps
initial_offset_range : float, optional (default: 3)
Range of initial offset from cosine
final_offset_range : float, optional (default: 0.1)
Range of final offset from cosine
noise_per_step_range : float, optional (default: 20)
Factor for noise in each step
random_state : RandomState, optional (default: seed 0)
Random state
Returns
-------
T : array, shape (n_steps,)
Times
Y : array, shape (n_demos, n_steps, 1)
Demonstrations (positions)
"""
T = np.linspace(0, 1, n_steps)
Y = np.empty((n_demos, n_steps, 1))
A = create_finite_differences_matrix_1d(n_steps, dt=1.0 / (n_steps - 1))
cov = np.linalg.inv(A.T.dot(A))
L = np.linalg.cholesky(cov)
for demo_idx in range(n_demos):
Y[demo_idx, :, 0] = np.cos(2 * np.pi * T)
if initial_offset_range or final_offset_range:
initial_offset = initial_offset_range * (random_state.rand() - 0.5)
final_offset = final_offset_range * (random_state.rand() - 0.5)
Y[demo_idx, :, 0] += np.linspace(
initial_offset, final_offset, n_steps)
if noise_per_step_range:
noise_per_step = (noise_per_step_range
* L.dot(random_state.randn(n_steps)))
Y[demo_idx, :, 0] += noise_per_step
return T, Y
| 5,349,442 |
def encrypt(binary_plaintext, binary_key):
"""Generate binary ciphertext from binary plaintext with AES."""
padded_plaintext = pad_plaintext(binary_plaintext, 128)
subkeys = key_schedule(binary_key)
final_blocks = []
for block in block_split(padded_plaintext, 128):
block_matrix = binary_to_matrix(block)
block_matrix = add_round_key(block_matrix, subkeys[0])
for round in xrange(1, 10):
block_matrix = byte_sub(block_matrix)
block_matrix = shift_rows(block_matrix)
block_matrix = mix_columns(block_matrix, COLUMN_MIX)
block_matrix = add_round_key(block_matrix, subkeys[round])
block_matrix = byte_sub(block_matrix)
block_matrix = shift_rows(block_matrix)
block_matrix = add_round_key(block_matrix, subkeys[-1])
final_blocks.append(matrix_to_binary(block_matrix))
return ''.join(final_blocks)
| 5,349,443 |
def emit_event(project_slug, action_slug, payload, sender_name, sender_secret,
event_uuid=None):
"""Emit Event.
:param project_slug: the slug of the project
:param action_slug: the slug of the action
:param payload: the payload that emit with action
:param sender_name: name that identified the sender
:parma sender_secret: secret string
:return: dict with task_id and event_uuid
raise MissingSender if sender does not exist
raise WrongSenderSecret if sender_secret is wrong
raise NotAllowed if sender is not allowed to emit action to project
"""
project_graph = graph.get_project_graph(project_slug)
project_graph.verify_sender(sender_name, sender_secret)
action = project_graph.get_action(action_slug)
project = project_graph.project
# execute event
event_uuid = event_uuid or uuid4()
event = {'uuid': event_uuid, 'project': project['slug'], 'action': action['slug']}
res = exec_event(event, action['webhooks'], payload)
logger.info('EMIT %s "%s" "%s" %s',
event_uuid, project_slug, action_slug, json.dumps(payload))
return dict(
task=dict(
id=res.id,
),
event=dict(
uuid=event_uuid,
),
)
| 5,349,444 |
def add_item(cart_id: str, item: CartItem):
"""
Endpoint. Add item to cart.
:param str cart_id: cart id
:param CartItem item: pair of name and price
:return: dict with cart, item and price
:rtype: dict
"""
logger.info(f'Request@/add_item/{cart_id}')
return cart.add_item(cart_id=cart_id, item=item)
| 5,349,445 |
def _inst2earth(advo, reverse=False, rotate_vars=None, force=False):
"""
Rotate data in an ADV object to the earth from the instrument
frame (or vice-versa).
Parameters
----------
advo : The adv object containing the data.
reverse : bool (default: False)
If True, this function performs the inverse rotation
(earth->inst).
rotate_vars : iterable
The list of variables to rotate. By default this is taken from
advo.props['rotate_vars'].
force : Do not check which frame the data is in prior to
performing this rotation.
"""
if reverse: # earth->inst
# The transpose of the rotation matrix gives the inverse
# rotation, so we simply reverse the order of the einsum:
sumstr = 'jik,j...k->i...k'
cs_now = 'earth'
cs_new = 'inst'
else: # inst->earth
sumstr = 'ijk,j...k->i...k'
cs_now = 'inst'
cs_new = 'earth'
if rotate_vars is None:
if 'rotate_vars' in advo.attrs:
rotate_vars = advo.rotate_vars
else:
rotate_vars = ['vel']
cs = advo.coord_sys.lower()
if not force:
if cs == cs_new:
print("Data is already in the '%s' coordinate system" % cs_new)
return
elif cs != cs_now:
raise ValueError(
"Data must be in the '%s' frame when using this function" %
cs_now)
if hasattr(advo, 'orientmat'):
omat = advo['orientmat'].values
else:
if 'vector' in advo.inst_model.lower():
orientation_down = advo['orientation_down']
omat = _calc_omat(advo['heading'].values, advo['pitch'].values,
advo['roll'].values, orientation_down)
# Take the transpose of the orientation to get the inst->earth rotation
# matrix.
rmat = np.rollaxis(omat, 1)
_dcheck = rotb._check_rotmat_det(rmat)
if not _dcheck.all():
warnings.warn("Invalid orientation matrix (determinant != 1) at indices: {}."
.format(np.nonzero(~_dcheck)[0]), UserWarning)
for nm in rotate_vars:
n = advo[nm].shape[0]
if n != 3:
raise Exception("The entry {} is not a vector, it cannot "
"be rotated.".format(nm))
advo[nm].values = np.einsum(sumstr, rmat, advo[nm])
advo = rotb._set_coords(advo, cs_new)
return advo
| 5,349,446 |
def maja_get_subdirectories(search_directory):
"""
Search in search directory all the subdirs
TODO: use logging
:param search_directory:
:type search_directory:
"""
return [os.path.join(search_directory, name) for name in os.listdir(search_directory)
if os.path.isdir(os.path.join(search_directory, name))]
| 5,349,447 |
def get_artist_songs_genius(artist_id=None):
"""
Wrapper for the /artists/:id/songs Genius API endpoint
Returns songs for a given artist ID or 1 if an error occurred.
"""
if artist_id is None:
logger.debug("get_artist_song_genius was not passed correct params.")
logger.debug("We are going to use the artist ID of the Gorillaz. 860.")
artist_id = "860"
search_url = base_url + "artists/" + str(artist_id) + "/songs"
headerz = {"Authorization": "Bearer " + str(api_token)}
r = requests.get(search_url, headers=headerz)
if r.status_code == 200:
logger.debug("The call to artists/id/songs was successful!\n")
results = r.json()
song_ids = []
if "next_page" in results["response"]:
logger.debug("Recursively trying to get all of artist's songs.\n")
# Start the recursion to get all the songs
next_page = results["response"]["next_page"]
songs_to_add = get_the_next_page_of_artist_songs(next_page,
artist_id)
# set song_ids to our initial results + the recursion results.
song_ids = results["response"]["songs"] + songs_to_add
return song_ids
else:
logger.debug("The call was not successful!")
logger.debug("The r.status_code is: " + str(r.status_code) + "\n")
logger.debug("The r.text is: " + str(r.text))
return 1
| 5,349,448 |
def _setup_api_changes():
"""
Setups *Colour* API changes.
"""
global API_CHANGES
for renamed in API_CHANGES['Renamed']:
name, access = renamed
API_CHANGES[name.split('.')[-1]] = Renamed(name, access) # noqa
API_CHANGES.pop('Renamed')
| 5,349,449 |
def example_broadcast_data(nm3_modem, message):
"""Example: $B - Broadcast Data."""
print('Example: Broadcast Data')
sent_bytes_count = nm3_modem.send_broadcast_message(message)
if sent_bytes_count == -1:
print(' Error')
else:
# Pause for the modem to finish the transmission
time.sleep(4.0)
print(' Bytes Transmitted={:02d}'.format(sent_bytes_count))
| 5,349,450 |
def kepler(k, r, v, tofs, numiter=350, **kwargs):
"""Propagates Keplerian orbit.
Parameters
----------
k : ~astropy.units.Quantity
Standard gravitational parameter of the attractor.
r : ~astropy.units.Quantity
Position vector.
v : ~astropy.units.Quantity
Velocity vector.
tofs : ~astropy.units.Quantity
Array of times to propagate.
numiter : int, optional
Maximum number of iterations, default to 35.
Returns
-------
rr : ~astropy.units.Quantity
Propagated position vectors.
vv : ~astropy.units.Quantity
Propagated velocity vectors.
Raises
------
RuntimeError
If the algorithm didn't converge.
Note
-----
This algorithm is based on Vallado implementation, and does basic Newton
iteration on the Kepler equation written using universal variables. Battin
claims his algorithm uses the same amount of memory but is between 40 %
and 85 % faster.
"""
k = k.to(u.km ** 3 / u.s ** 2).value
r0 = r.to(u.km).value
v0 = v.to(u.km / u.s).value
tofs = tofs.to(u.s).value
results = [_kepler(k, r0, v0, tof, numiter=numiter) for tof in tofs]
# TODO: Rewrite to avoid iterating twice
return (
[result[0] for result in results] * u.km,
[result[1] for result in results] * u.km / u.s,
)
| 5,349,451 |
def get_latency_of_one_partition(
partition: Partition, node_to_latency_mapping: Dict[Node, NodeLatency]
) -> PartitionLatency:
"""Given a partiton and its nodes' latency, return a PartitionLatency for this partition"""
def get_top_nodes(partition: Partition) -> List[Node]:
"""Given a partition, return a list of nodes on the top bfs level"""
top_nodes: List[Node] = []
for node in partition.nodes:
# Skip placeholder and get_attr nodes
if node.op in {"placeholder", "get_attr"}:
continue
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# If a node has no input nodes in this partition,
# or its input nodes in this partition are placeholders and get_attrs
# this node is on the top bfs level in this partition
if not any(
[
n in partition.nodes and n.op not in {"placeholder", "get_attr"}
for n in input_nodes
]
):
top_nodes.append(node)
return top_nodes
def dfs_helper(node: Node, partition_latency) -> PartitionLatency:
"""Given a top node of a partition, this function returns
the latency of the critical path in the partition
"""
node_latency = node_to_latency_mapping[node]
# Calculate the current overall latency of the partition
overall_latency_sec = partition_latency.overall_latency_sec + max(
node_latency.computer_latency_sec, node_latency.mem_latency_sec
)
# Update the mem latency of this path
mem_latency_sec = (
partition_latency.mem_latency_sec + node_latency.mem_latency_sec
)
# Update the compute latency of this path
computer_latency_sec = (
partition_latency.computer_latency_sec + node_latency.computer_latency_sec
)
# Get all users of this node that are in this partition
users = set(node.users).intersection(partition.nodes)
if users:
max_latency = PartitionLatency(
mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
)
for n in users:
# Get new partition latency recursively
new_partition_latency = dfs_helper(
n,
PartitionLatency(
mem_latency_sec, computer_latency_sec, overall_latency_sec
),
)
if (
new_partition_latency.overall_latency_sec
> max_latency.overall_latency_sec
):
max_latency = new_partition_latency
return max_latency
# If there is no user, the node is at bottom of the partition
return PartitionLatency(
mem_latency_sec, computer_latency_sec, overall_latency_sec
)
# Main part starts
# Get all top level nodes of this partition
top_nodes = get_top_nodes(partition)
critical_path_latency = PartitionLatency(
mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
)
# Go through all top nodes and find the largest latency (critical pass latency)
for node in top_nodes:
partition_latency = dfs_helper(
node,
PartitionLatency(
mem_latency_sec=0.0, computer_latency_sec=0.0, overall_latency_sec=0.0
),
)
if (
partition_latency.overall_latency_sec
> critical_path_latency.overall_latency_sec
):
critical_path_latency = partition_latency
return critical_path_latency
| 5,349,452 |
def test_multiple_sweeps(multi_sweep_type, ccp4, dials_data, run_in_tmpdir):
"""
Run xia2 with various different multiple-sweep options.
Run xia2 using procrunner and look for errors or timeouts. Run it with a reduced
number of reflections per degree required for profile modelling and turn off the
Xtriage analysis, since we won't have enough reflections for the default settings
of either to be successful.
Args:
multi_sweep_type: Parameter governing multiple-sweep behaviour to be set True.
dials_data: DIALS custom Pytest fixture for access to test data.
run_in_tmpdir: DIALS custom Pytest fixture to run this test in a temporary
directory.
"""
# Use as input the first fifteen images of the first two sweeps of a typical
# multiple-sweep data set.
data_dir = dials_data("l_cysteine_dials_output")
images = [data_dir / f"l-cyst_{sweep:02d}_00001.cbf:1:15" for sweep in (1, 2)]
command = [
# Obviously, we're going to run xia2.
"xia2",
# Set one of the multiple-sweep flags.
f"{multi_sweep_type}=True",
# Reduce the required number of reflections per degree for profile modelling
# because we don't have enough in these data.
"min_spots.per_degree=10",
# Don't run the Xtriage analysis — we don't have enough reflections overall.
"xtriage_analysis=False",
]
result = procrunner.run(command + [f"image={image}" for image in images])
assert not result.returncode
| 5,349,453 |
def nearest_dy(lon,lat,t,gs,dy,tr = [0,0],box = [0,0],time_vec = False,space_array = False):
"""
give this a dy object and a gs object,
the nearest point to the supplied lon lat will be returned
tr is a time range option [time points previous, after]
if tr > 0 time_vec=True will return a rs/H/WAVES/SWH/swh_arrays/SWH_10vector of the time point, False is nanmean
box is a spatial range option [range in x, range in y]
if there is a box, space_array=True returns the whole box, False is nanmean
"""
y,x = nearest_xy(lon,lat,gs)
out_array = dy[t-tr[0]:t+tr[1]+1,x-box[0]:x+box[0]+1,y-box[1]:y+box[1]+1]
if time_vec and space_array:
return out_array
elif time_vec:
return np.nanmean(out_array,axis = (1,2))
elif space_array:
return np.nanmean(out_array,axis = 0)
else:
return np.nanmean(out_array)
| 5,349,454 |
def get_return_type() -> None:
"""Prompt the user for the return datatype of the function.
:return return_type: {str}
"""
return_type = None # function or method
while return_type is None or return_type == "":
return_type = prompt(
"return type? [bool|dict|float|int|list|str|tuple]: ",
completer=datatype_completer,
)
return_type = return_type.strip()
if return_type is None or return_type == "":
return_type = "None"
break
else:
if return_type.lower().startswith("b"):
return_type = "bool"
elif return_type.lower().startswith("d"):
return_type = "dict"
elif return_type.lower().startswith("f"):
return_type = "float"
elif return_type.lower().startswith("i"):
return_type = "int"
elif return_type.lower().startswith("l"):
return_type = "list"
elif return_type.lower().startswith("s"):
return_type = "str"
elif return_type.lower().startswith("t"):
return_type = "tuple"
else:
continue
break
return return_type
| 5,349,455 |
def solve_capcha(capcha_str):
"""Function which calculates the solution to part 1
Arguments
---------
capcha_str : str, a string of numbers
Returns
-------
total : int, the sum of adjacent matches
"""
capcha = [int(cc) for cc in list(capcha_str)]
total = 0
for ii in range(len(capcha)):
if capcha[ii] == capcha[ii - 1]:
total += capcha[ii]
return total
| 5,349,456 |
def compose(fs: Union[ModuleList, Sequence[Callable]]) -> F:
"""
Compose functions as a pipeline function.
Args:
fs (``Sequence[Callable]`` | :class:`~torch.nn.ModuleList`): The functions input for composition.
Returns:
:class:`~fn.func.F`: The composed output function.
Examples::
>>> f = lambda x: x + 1
>>> g = lambda x: x * 2
>>> h = lambda x: x ** 2
>>> x = 1
>>> h(g(f(x))) == compose([f, g, h])(x)
True
"""
return reduce(_ >> _, fs, F())
| 5,349,457 |
def nanargmin(a, axis=None):
"""
Return the indices of the minimum values in the specified axis ignoring
NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
cannot be trusted if a slice contains only NaNs and Infs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
a, mask = _replace_nan(a, np.inf)
res = np.argmin(a, axis=axis)
if mask is not None:
mask = np.all(mask, axis=axis)
if np.any(mask):
raise ValueError("All-NaN slice encountered")
return res
| 5,349,458 |
def check_right_flank(seq_right, list_rep, verbose=False):
"""
Check if start of right flank sequence contains last repetitive sequence.
:param seq_right: str - right flank sequence
:param list_rep: list(Repetition) - list of Repetitions(seq, num)
:param verbose: bool - be verbose
:return: seq_right - str - updated right flank sequence
rep_list - list(Repetition) - updated list of Repetitions(seq, num)
"""
rep_list = deepcopy(list_rep)
last = rep_list[-1]
last_seq = re.sub('N', '.', last.seq)
if re.match(last_seq, seq_right):
if verbose:
print('Repetitive sequence find in right flank region. Adding this sequence into repetitions.')
while re.match(last_seq, seq_right):
# cut repetitive sequence from flank sequence and add it to the list of repetitive sequences
seq_right = seq_right[len(last_seq):]
last.num += 1
return seq_right, rep_list
| 5,349,459 |
def is_path_creatable(pathname):
"""
if any previous level of parent folder exists, returns true
"""
if not is_path_valid(pathname):
return False
pathname = os.path.normpath(pathname)
pathname = os.path.dirname(os.path.abspath(pathname))
# recursively to find the previous level of parent folder existing
while not is_path_exists(pathname):
pathname_new = os.path.dirname(os.path.abspath(pathname))
if pathname_new == pathname:
return False
pathname = pathname_new
return os.access(pathname, os.W_OK)
| 5,349,460 |
def forward_application_conj_reject():
"""forward application conjunction constraint -- parse should fail
>>> test(r"Fred|NNP|(NP\NP)/(NP\NP) and|CC|conj cars|NNS|NP")
>>>
"""
| 5,349,461 |
def readout_gg(_X, X, O):
"""
Graph Gathering implementation. (The none shown in the equation)
_X: final node embeddings.
X: initial node features.
O: desired output dimension.
"""
val1 = dense(tf.concat([_X, X], axis=2), O, use_bias=True)
val1 = tf.nn.sigmoid(val1)
val2 = dense(_X, O, use_bias=True)
out = tf.multiply(val1, val2)
out = tf.reduce_sum(out, axis=1)
out = tf.nn.relu(out)
return out
| 5,349,462 |
def plot_taylor_axes(axes, cax, option):
"""
Plot axes for Taylor diagram.
Plots the x & y axes for a Taylor diagram using the information
provided in the AXES dictionary returned by the
GET_TAYLOR_DIAGRAM_AXES function.
INPUTS:
axes : data structure containing axes information for target diagram
cax : handle for plot axes
option : data structure containing option values. (Refer to
GET_TAYLOR_DIAGRAM_OPTIONS function for more information.)
option['colcor'] : CORs grid and tick labels color (Default: blue)
option['colrms'] : RMS grid and tick labels color (Default: green)
option['colstd'] : STD grid and tick labels color (Default: black)
option['numberpanels'] : number of panels (quadrants) to use for Taylor
diagram
option['tickrms'] : RMS values to plot gridding circles from
observation point
option['titlecor'] : title for CORRELATION axis
option['titlerms'] : title for RMS axis
option['titlestd'] : title fot STD axis
OUTPUTS:
ax: returns a list of handles of axis labels
Author: Peter A. Rochford
Acorn Science & Innovation
[email protected]
Created on Dec 3, 2016
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
[email protected]
"""
ax = []
axlabweight = "bold"
if option["numberpanels"] == 1:
# Single panel
if option["titlestd"] == "on":
ttt = plt.ylabel("test", fontsize=14)
x = -0.15 * axes["rmax"]
y = 0.8 * axes["rmax"]
handle = plt.text(
x,
y,
"Standard Deviation",
rotation=90,
color=option["colstd"],
fontweight=axlabweight,
fontsize=plt.get(ttt, "fontsize"),
horizontalalignment="center",
)
ax.append(handle)
if option["titlecor"] == "on":
pos1 = 45
DA = 15
lab = "Correlation Coefficient"
c = np.fliplr([np.linspace(pos1 - DA, pos1 + DA, len(lab))])[0]
dd = 1.1 * axes["rmax"]
for ii, ith in enumerate(c):
handle = plt.text(
dd * np.cos(ith * np.pi / 180),
dd * np.sin(ith * np.pi / 180),
lab[ii],
)
handle.set(
rotation=ith - 90,
color=option["colcor"],
horizontalalignment="center",
verticalalignment="bottom",
fontsize=plt.get(ax[0], "fontsize"),
fontweight=axlabweight,
)
ax.append(handle)
if option["titlerms"] == "on":
pos1 = option["tickrmsangle"] + (180 - option["tickrmsangle"]) / 2
DA = 15
pos1 = 160
lab = "RMSD"
c = np.fliplr([np.linspace(pos1 - DA, pos1 + DA, len(lab))])[0]
# Find optimal placement of label
itick = -1
ratio = 1.0
while ratio > 0.7:
itick += 1
ratio = (option["axismax"] - option["tickrms"][itick]) / option[
"axismax"
]
dd = 0.7 * option["tickrms"][itick] + 0.3 * option["tickrms"][itick + 1]
# Write label in a circular arc
for ii, ith in enumerate(c):
xtextpos = axes["dx"] + dd * np.cos(ith * np.pi / 180)
ytextpos = dd * np.sin(ith * np.pi / 180)
handle = plt.text(xtextpos, ytextpos, lab[ii])
handle.set(
rotation=ith - 90,
color=option["colrms"],
horizontalalignment="center",
verticalalignment="top",
fontsize=plt.get(ax[0], "fontsize"),
fontweight=axlabweight,
)
ax.append(handle)
else:
# Double panel
if option["titlestd"] == "on":
ttt = plt.ylabel("test", fontsize=14)
x = 0
y = -0.15 * axes["rmax"]
handle = plt.text(
x,
y,
"Standard Deviation",
rotation=0,
color=option["colstd"],
fontweight=axlabweight,
fontsize=plt.get(ttt, "fontsize"),
horizontalalignment="center",
)
ax.append(handle)
if option["titlecor"] == "on":
pos1 = 90
DA = 25
lab = "Correlation Coefficient"
c = np.fliplr([np.linspace(pos1 - DA, pos1 + DA, len(lab))])[0]
dd = 1.1 * axes["rmax"]
for ii, ith in enumerate(c):
handle = plt.text(
dd * np.cos(ith * np.pi / 180),
dd * np.sin(ith * np.pi / 180),
lab[ii],
)
handle.set(
rotation=ith - 90,
color=option["colcor"],
horizontalalignment="center",
verticalalignment="bottom",
fontsize=plt.get(ax[0], "fontsize"),
fontweight=axlabweight,
)
ax.append(handle)
if option["titlerms"] == "on":
pos1 = 160
DA = 10
lab = "RMSD"
c = np.fliplr([np.linspace(pos1 - DA, pos1 + DA, len(lab))])[0]
dd = 1.05 * option["tickrms"][0]
for ii, ith in enumerate(c):
xtextpos = axes["dx"] + dd * np.cos(ith * np.pi / 180)
ytextpos = dd * np.sin(ith * np.pi / 180)
handle = plt.text(xtextpos, ytextpos, lab[ii])
handle.set(
rotation=ith - 90,
color=option["colrms"],
horizontalalignment="center",
verticalalignment="bottom",
fontsize=plt.get(ax[0], "fontsize"),
fontweight=axlabweight,
)
ax.append(handle)
# VARIOUS ADJUSTMENTS TO THE PLOT:
cax.set_aspect("equal")
plt.axis("off")
plt.gcf().patch.set_facecolor("w")
# set axis limits
if option["numberpanels"] == 2:
axislim = [axes["rmax"] * x for x in [-1.15, 1.15, 0, 1.15]]
plt.axis(axislim)
plt.plot([-axes["rmax"], axes["rmax"]], [0, 0], color=axes["tc"], linewidth=2)
plt.plot([0, 0], [0, axes["rmax"]], color=axes["tc"])
else:
axislim = [axes["rmax"] * x for x in [0, 1.15, 0, 1.15]]
plt.axis(axislim)
plt.plot([0, axes["rmax"]], [0, 0], color=axes["tc"], linewidth=2)
plt.plot([0, 0], [0, axes["rmax"]], color=axes["tc"], linewidth=2)
return ax
| 5,349,463 |
def list_timezones():
"""Return a list of all time zones known to the system."""
l=[]
for i in xrange(parentsize):
l.append(_winreg.EnumKey(tzparent, i))
return l
| 5,349,464 |
def keyword_decorator(deco):
"""Wrap a decorator to optionally takes keyword arguments."""
@functools.wraps(deco)
def new_deco(fn=None, **kwargs):
if fn is None:
@functools.wraps(deco)
def newer_deco(fn):
return deco(fn, **kwargs)
return newer_deco
else:
return deco(fn, **kwargs)
return new_deco
| 5,349,465 |
def get_permutations(expr, debug=False):
"""
Returns the permutations of a MatAdd expression for lengths 2 to len(expr).
For example, for A + B + C + D, we return:
[A+B, A+C, A+D, B+C, B+D, C+D, A+B+C, A+B+D, A+C+D, B+C+D, A+B+C+D]
"""
from symgp.superexpressions import SuperMatAdd
import itertools
if isinstance(expr, MatrixSymbol) or isinstance(expr, Transpose) or isinstance(expr, Inverse):
return [expr]
if not isinstance(expr, MatAdd):
raise Exception("Function only works for MatAdd expressions")
expr_args = expr.args
expr_perms = []
for i in range(2,len(expr_args)+1):
expr_perms.extend([SuperMatAdd(*e).doit() for e in itertools.combinations(expr_args,i)])
return expr_perms
| 5,349,466 |
def test_wrong_add_param(conn, ipaddr):
""" Test passing wrong parameter for add method. """
with pytest.raises(ClosedConnection):
cli = LDAPClient("ldap://%s" % ipaddr)
LDAPConnection(cli).add(bonsai.LDAPEntry("cn=dummy"))
with pytest.raises(TypeError):
conn.add("wrong")
| 5,349,467 |
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0.0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1.0, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
q1 *= -1.0
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
| 5,349,468 |
def doctest2md(lines):
"""
Convert the given doctest to a syntax highlighted markdown segment.
"""
is_only_code = True
lines = unindent(lines)
for line in lines:
if not line.startswith('>>> ') and not line.startswith('... ') and line not in ['>>>', '...']:
is_only_code = False
break
if is_only_code:
orig = lines
lines = []
for line in orig:
lines.append(line[4:])
return lines
| 5,349,469 |
def generate(epsilon=0.0, model=None, phase="dev", args=None):
"""Run one training session with a particular set of hyperparameters."""
args, device = parse_args()
criterion = nn.CrossEntropyLoss()
dataloaders, idx_to_class = get_cifar_loader()
# dataloaders, idx_to_class = get_loader(args)
model.to(device)
model.eval()
running_corrects = 0.0
adv_corrects = 0.0
counter = np.zeros(10)
# Iterate over data.
for inputs, labels in tqdm(dataloaders[phase]):
inputs = inputs.to(device)
labels = labels.to(device)
inputs.requires_grad = True
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
model.zero_grad()
loss.backward()
data_grad = inputs.grad.data
perturbed_data = fgsm_attack(inputs, epsilon, data_grad)
adv_outputs = model(perturbed_data)
_, adv_preds = torch.max(adv_outputs, 1)
save_images(
perturbed_data, labels, preds, adv_preds, idx_to_class, counter, phase
)
# statistics
rc = torch.sum(preds == labels.data)
running_corrects += rc.item() / preds.shape[0]
# adversarial statistics
arc = torch.sum(adv_preds == labels.data)
adv_corrects += arc.item() / preds.shape[0]
original_acc = running_corrects / len(dataloaders[phase])
adversarial_acc = adv_corrects / len(dataloaders[phase])
print(
"Original Accuracy: {:.4f} \t Adversarial Accuracy: {:.4f}".format(
original_acc, adversarial_acc
)
)
| 5,349,470 |
def get_return_value(total, cash_given):
"""show how much money you owe to customer after they give you a bill."""
return Decimal(Decimal(total) - Decimal(cash_given)).quantize(Decimal('.01'))
| 5,349,471 |
def initlog(logfile=None, level=None, log_stdout=True):
"""
Initialize the log, default log level is NOTSET, it will write the log
message into logfile, and also print onto the screen.
If set log_stdout to False, will not print the log message onto the screen.
"""
log_levels = {'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARN,
'warning': logging.WARNING,
'error': logging.ERROR,
'fatal': logging.FATAL,
'critical': logging.CRITICAL}
log = logging.getLogger()
if level not in log_levels:
print("ERROR: Invalid log level specified")
print("ERROR: Try to use the default one: debug")
level = 'debug'
if logfile is None and not log_stdout:
print("ERROR: At least one of logfile and log_stdout is required")
raise Exception('Specify logfile or log_stdout for logging')
log_level = log_levels.get(level, logging.NOTSET)
log.setLevel(log_level)
# Customize the log format
fmt = logging.Formatter('%(asctime)s %(levelname)-5.5s: %(message)s',
'%Y-%m-%d %H:%M:%S')
# Write the log message into logfile
if logfile:
file_log_handler = logging.FileHandler(logfile)
log.addHandler(file_log_handler)
file_log_handler.setFormatter(fmt)
# Print the log message onto the screen
if log_stdout:
screen_log_handler = logging.StreamHandler()
log.addHandler(screen_log_handler)
screen_log_handler.setFormatter(fmt)
return log
| 5,349,472 |
def show_predictions(scores, target='y', threshold=0.5, path_out=False, verbose=True, figsize=(7, 200)):
"""This function will plot which have been correctly classified. The input is
single dict containing labels as keys and information on each model as values
in the order [auc_score, ids_test, y_true, y_pred].
all_ids: List, IDs of all samples as strings.
model_dict: Dict, containing model name as key and [auc_score, ids_test, y_true, y_pred] as value.
path_out: String, path where to save plot.
show: If True, show plot.
"""
all_ids = scores.index.tolist()
N, M = scores.shape
y_true = scores[target]
# Set up figure to hold IDs vs model type
f, id_fig = plt.subplots(figsize=figsize)
id_fig.margins(0.01, 0.01)
plt.ylabel('Samples (IDs)', fontsize=14)
plt.xlabel('Models', fontsize=14)
plt.title('Correctly classified samples', fontsize=20)
plt.yticks(range(len(all_ids)), all_ids, fontsize=12)
plt.grid(which='major', linestyle='dashed', linewidth=0.1)
plt.rc('axes', axisbelow=True)
cmap = plt.get_cmap('tab20', M)
# Coordinates and legend
counts = [0 for item in all_ids]
how_many_correct = dict(zip(all_ids, counts))
all_ids = dict(zip(all_ids, list(range(len(all_ids)))))
xticks = []
height = 0
legend = []
# Run through each model
missing_counts = {}
for col in scores.columns:
if col != target:
y_pred = scores[col].dropna(how='any')
# Find correct IDs
ids_test = []
pred_labels = [1 if v >= threshold else 0 for v in y_pred]
for ID, true, pred in zip(y_pred.index, y_true, pred_labels):
if true == round(pred):
ids_test.append(ID)
# Count item
how_many_correct[ID] += 1
# Get correct classifications
xticks.append(col)
y = [all_ids[i] for i in ids_test]
x = [height]*len(y)
# Plot correct IDs
plot_ids = id_fig.scatter(x=x, y=y, s=15, label=col)
# Plot x for missing IDs
missing = []
for ID in all_ids:
if ID not in missing_counts.keys():
missing_counts[ID] = 0
if ID not in y_pred.index:
missing.append(ID)
missing_counts[ID] += 1
if len(missing) > 0:
y = [all_ids[i] for i in missing]
x = [height]*len(y)
id_fig.scatter(x=x, y=y, marker='x', color='black')
legend.append(height)
height += 1
plt.xticks(legend, xticks, fontsize=12, rotation=90)
plt.tight_layout()
plt.show()
if path_out:
plt.savefig(path_out, dpi=1000, transparent=True)
return how_many_correct, missing_counts
| 5,349,473 |
def listen_remove(card, interval, card_id):
""" Listens for a card to be placed on the reader """
# Screen.wrapper(datascreen)
while 1:
screensaverstate = 1
if not card.select():
# data = json.dumps({"card_info":
# [{"card_id": card_id}, {"timedate": get_time()}, {"action": "Removed"}]})
# print(data)
break
#print "Waiting: Card Removal"
time.sleep(interval)
| 5,349,474 |
def det(a, **kwargs):
"""
Compute the determinant of arrays, with broadcasting.
Parameters
----------
a : (NDIMS, M, M) array
Input array. Its inner dimensions must be those of a square 2-D array.
Returns
-------
det : (NDIMS) array
Determinants of `a`
See Also
--------
slogdet : Another representation for the determinant, more suitable
for large matrices where underflow/overflow may occur
Notes
-----
Numpy broadcasting rules apply.
The determinants are computed via LU factorization using the LAPACK
routine _getrf.
Implemented for single, double, csingle and cdouble. Numpy conversion
rules apply.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.allclose(-2.0, det(a))
True
>>> a = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]] ])
>>> np.allclose(-2.0, det(a))
True
"""
return _impl.det(a, **kwargs)
| 5,349,475 |
def SegStart(ea):
"""
Get start address of a segment
@param ea: any address in the segment
@return: start of segment
BADADDR - the specified address doesn't belong to any segment
"""
seg = idaapi.getseg(ea)
if not seg:
return BADADDR
else:
return seg.startEA
| 5,349,476 |
def plot_lollipops(index, points, line, ax, ecolor='g', capsize=0, fmt='none'):
"""Make a floaters-and-sinkers plot.
`points` : The data points as a dataframe.
"""
delta = points - line
ax.errorbar(index, points, [delta, zeros(len(index))],
ecolor='g', capsize=0, fmt=fmt, linewidth=0.5)
| 5,349,477 |
def test_get_pipeline_yaml_simple():
"""Pipeline yaml loads with simple yaml."""
file = io.StringIO('1: 2\n2: 3')
pipeline = pypyr_yaml.get_pipeline_yaml(file)
assert pipeline == {1: 2, 2: 3}
| 5,349,478 |
def update_school_term(request):
"""
修改周期的开始时间和结束时间
:param request:
:return:
"""
operation_object = None
try:
if request.method == 'POST':
object_form = SchoolTermUpdateForm(request.POST)
if object_form.is_valid():
update_id = object_form.cleaned_data.get('update_id', None)
if update_id:
school_terms = SchoolTerm.objects.filter(id=update_id)
if len(school_terms) == 1:
form_object_school_term = form_to_obj(object_form.cleaned_data, school_terms[0])
form_object_school_term.save()
operation_object = form_object_school_term
result['status'] = True
result['message'] = '修改成功!'
result['data'] = json.dumps({}, ensure_ascii=False)
else:
result['status'] = False
result['message'] = '系统异常,请稍后尝试或联系管理员!'
result['data'] = ''
else:
print(type(object_form.errors), object_form.errors) # errors类型是ErrorDict,里面是ul,li标签
result['status'] = False
result['message'] = '系统异常,请稍后尝试或联系管理员!错误提示:' + type(object_form.errors) + "," + object_form.errors
result['data'] = ''
else:
result['status'] = False
result['message'] = '系统异常,请稍后尝试或联系管理员!'
result['data'] = ''
except Exception as e:
result['status'] = False
result['message'] = "系统异常:" + str(e)
result['data'] = ''
result["level"] = log_level_edit
save_operation_log(request, inspect.stack()[0][3], operation_object.__str__(True), result)
return JsonResponse(result, safe=False)
| 5,349,479 |
def set_simple_fault_geometry_3D(w, src):
"""
Builds a 3D polygon from a node instance
"""
assert "simpleFaultSource" in src.tag
geometry_node = src.nodes[get_taglist(src).index("simpleFaultGeometry")]
fault_attrs = parse_simple_fault_geometry(geometry_node)
build_polygon_from_fault_attrs(w, fault_attrs)
| 5,349,480 |
def main():
"""Run when called from the command line"""
parser = ArgumentParser(
prog="brp",
description="rename batches of files at one time",
)
parser.add_argument(
"-V",
"--version",
action="version",
version=f"%(prog)s {__version__}",
)
parser.add_argument("filename", nargs="+", help="list of files to rename")
parser.add_argument(
"-a",
"--auto",
dest="autofiles",
nargs="*",
help="automated file to run",
metavar="FILE",
)
cli_args = parser.parse_intermixed_args()
# pylint: disable=not-callable
renamer = BatchRenamer(*cli_args.filename, autofiles=cli_args.autofiles)
renamer()
| 5,349,481 |
def test_vectorized():
"""See that heatindex and windchill can do lists"""
temp = datatypes.temperature([0, 10], "F")
sknt = datatypes.speed([30, 40], "MPH")
val = meteorology.windchill(temp, sknt).value("F")
assert abs(val[0] - -24.50) < 0.01
| 5,349,482 |
def chown(
path: Pathable, owner: str, flags: t.Optional[str] = None, sudo: bool = False
) -> ChangeList:
"""Change a path's owner."""
path = _to_path(path)
needs_sudo_w = need_sudo_to_write(path)
needs_sudo_r = need_sudo_to_read(path)
if needs_sudo_r and not sudo:
raise NeedsSudoException(f"chown {path}")
curr_owner = _run(
f"stat -c '%U:%G' {path}", check=True, sudo=needs_sudo_r
).stdout.decode.strip()
if ":" not in curr_owner:
curr_owner = curr_owner.split(":", 1)[0]
if curr_owner != owner:
if needs_sudo_w and not sudo:
raise NeedsSudoException(f"chown {owner} {path}")
_run(f"chown {flags} {owner}", sudo=needs_sudo_w, check=True)
return [cl(ChownModify, path, owner, curr_owner, flags)]
return []
| 5,349,483 |
def metric_dist(endclasses, metrics='all', cols=2, comp_groups={}, bins=10, metric_bins={}, legend_loc=-1,
xlabels={}, ylabel='count', title='', indiv_kwargs={}, figsize='default',
v_padding=0.4, h_padding=0.05, title_padding=0.1, **kwargs):
"""
Plots the histogram of given metric(s) separated by comparison groups over a set of scenarios
Parameters
----------
endclasses : dict
Dictionary of metrics with structure {'scen':{'metric':value}}
metrics : list, optional
list of metrics in the dictionary to plot
cols : int, optional
columns to use in the figure. The default is 2.
comp_groups : dict, optional
Dictionary for comparison groups (if more than one) with structure:
{'group1':('scen1', 'scen2'), 'group2':('scen3', 'scen4')} Default is {}
If a legend is shown, group names are used as labels.
bins : int
Number of bins to use (for all plots). Default is None
metric_bins : dict,
Dictionary of number of bins to use for each metric with structure {'metric':num}
Default is {}
legend_loc : int, optional
Specifies the plot to place the legend on, if runs are being compared. Default is -1 (the last plot)
To remove the legend, give a value of False
xlabels : dict, optional
Label for the x-axes with structure {'metric':'label'}
ylabel : str, optional
Label for the y-axes. Default is 'time'
title : str, optional
overall title for the plot. Default is ''
indiv_kwargs : dict, optional
dict of kwargs with structure {comp1:kwargs1, comp2:kwargs2}, where
where kwargs is an individual dict of keyword arguments for the
comparison group comp (or scenario, if not aggregated) which overrides
the global kwargs (or default behavior).
figsize : tuple (float,float)
x-y size for the figure. The default is 'default', which dymanically gives 3 for each column and 2 for each row
v_padding : float
vertical padding between subplots as a fraction of axis height
h_padding : float
horizontal padding between subplots as a fraction of axis width
title_padding : float
padding for title as a fraction of figure height
**kwargs : kwargs
keyword arguments to mpl.hist e.g. bins, etc
"""
#Sort into comparison groups
if not comp_groups: groupmetrics = {'default':endclasses}
else: groupmetrics = {group:{ec:cl for ec,cl in endclasses.items() if ec in groupscens} for group, groupscens in comp_groups.items()}
template = [*endclasses.values()][0]
if metrics=='all': plot_values = [i for i in template.keys()]
else: plot_values = [i for i in template.keys() if i in metrics]
num_plots = len(plot_values)
if num_plots==1: cols=1
rows = int(np.ceil(num_plots/cols))
if figsize=='default': figsize=(cols*3, 2*rows)
fig, axs = plt.subplots(rows,cols, sharey=True, sharex=False, figsize=figsize)
if type(axs)==np.ndarray: axs = axs.flatten()
else: axs=[axs]
num_bins = bins
for i, plot_value in enumerate(plot_values):
ax = axs[i]
xlabel = xlabels.get(plot_value, plot_value)
if type(xlabel)==str: ax.set_xlabel(xlabel)
else: ax.set_xlabel(' '.join(xlabel))
ax.grid(axis='y')
fulldata = [ec[plot_value] for endc in groupmetrics.values() for ec in endc.values()]
bins = np.histogram(fulldata, metric_bins.get(plot_value, num_bins))[1]
if not i%cols: ax.set_ylabel(ylabel)
for group, endclasses in groupmetrics.items():
local_kwargs = {**kwargs, **indiv_kwargs.get(group,{})}
x = [ec[plot_value] for ec in endclasses.values()]
ax.hist(x, bins, label=group, **local_kwargs)
multiplot_legend_title(groupmetrics, axs, ax, legend_loc, title,v_padding, h_padding, title_padding)
return fig, axs
| 5,349,484 |
def get_colors(df, colormap=None, vmin=None, vmax=None, axis=1):
"""
Function to automatically gets a colormap for all the values passed in,
Have the option to normalise the colormap.
:params:
values list(): list of int() or str() that have all the values that need a color to be map
to. In case of a list() of str(), the try/except use the range(len()) to map a colour
colormap cm(): type of colormap that need to be used. All can be found here:
https://matplotlib.org/examples/color/colormaps_reference.html
vmin, vmax int(): Number to normalise the return of the colourmap if needed a Normalised colourmap
:return:
colormap cm.colormap(): An array of RGBA values
Original version found on stackerOverflow (w/o the try/except) but cannot find it back
"""
if colormap is None:
colormap = plt.cm.RdBu
if axis == 0:
values = df.index
elif axis == 1:
values = df.columns
norm = plt.Normalize(vmin, vmax)
try:
return colormap(norm(values))
except (AttributeError, TypeError): # May happen when gives a list of categorical values
return colormap(norm(range(len(values))))
| 5,349,485 |
def proxy_channels(subreddits):
"""
Helper function to proxy submissions and posts.
Args:
subreddits (list of praw.models.Subreddit):
A list of subreddits
Returns:
list of ChannelProxy: list of proxied channels
"""
channels = {
channel.name: channel
for channel in Channel.objects.filter(
name__in=[subreddit.display_name for subreddit in subreddits]
)
}
return [
ChannelProxy(subreddit, channels[subreddit.display_name])
for subreddit in subreddits
if subreddit.display_name in channels
]
| 5,349,486 |
def anno2map(anno):
"""
anno: {
'file' ==> file index
'instances': [
{ 'class_name':
'class_idx':
'silhouette':
'part': [(name, mask), ...]
},
...
]
}
"""
height, width = anno.instances[0].silhouette.shape
cls_mask = np.zeros((height, width), dtype=np.uint8)
inst_mask = np.zeros((height, width), dtype=np.uint8)
part_mask = np.zeros((height, width), dtype=np.uint8)
for i, inst in enumerate(anno.instances):
assert height == inst.silhouette.shape[0] and width == inst.silhouette.shape[1]
cls_mask[inst.silhouette.astype(np.bool)] = inst.class_idx
inst_mask[inst.silhouette.astype(np.bool)] = i
for pname, pmask in inst.part:
assert pname in PASCAL_PART2ID_[inst.class_idx-1], f'The part {pname} is undefined in {inst.class_name}'
assert inst.silhouette[pmask.astype(np.bool)].all(), 'The part region is not a complete subregion of the object'
# if not inst.silhouette[pmask].all():
# print(f'Warning: [{anno.file}: {pname}] The part region is not a complete subregion of the object')
pid = PASCAL_PART2ID_[inst.class_idx-1][pname]
part_mask[pmask.astype(np.bool)] = pid
return cls_mask, inst_mask, part_mask
| 5,349,487 |
def string_to_bool(val: str):
"""Convert a homie string bool to a python bool"""
return val == STATE_ON
| 5,349,488 |
def profile(request):
"""
Update a User profile using built in Django Users Model if the user is logged in
otherwise redirect them to registration version
"""
if request.user.is_authenticated():
obj = get_object_or_404(TolaUser, user=request.user)
form = RegistrationForm(request.POST or None, instance=obj,initial={'username': request.user})
if request.method == 'POST':
if form.is_valid():
form.save()
messages.error(request, 'Your profile has been updated.', fail_silently=False)
return render(request, "registration/profile.html", {
'form': form, 'helper': RegistrationForm.helper
})
else:
return HttpResponseRedirect("/accounts/register")
| 5,349,489 |
def run_kmeans(chunksets, cluster_options={}):
"""
Produces files {{prefix}}.clusters, and {{prefix}}.cluster.stats
:chunksets : list of named tuple with attributes 'directory' and 'prefix'
:returns name of the centroid for each cluster
"""
kmeans_options_dict={ "--nclusters" : 10, # number of output clusters
"--min_cluster_size" : 1, # minimum cluster size
"--max_iterations" : 9, # max iterations for each random starting partition to converge to a final partition
"--total_seeds" : 9, # number of different starting partitions to tr
"--nthreads": 4
}
#update kmeans_options_dict with argument cluster_options
kmeans_options_dict.update({k:v for k, v in cluster_options.items() if k in kmeans_options_dict})
kmeans_options = " ".join([str(item) for pair in kmeans_options_dict.items() for item in pair])
fixed_options = " --cluster_rmsd --fine_parallel --sse3 --binary_coords"
for chunkset in chunksets:
io_options = " -i {0} -o {0}".format(chunkset.prefix)
commands = """cd {}
fast_protein_cluster {} {} {}
""".format(chunkset.directory, fixed_options, kmeans_options, io_options)
subprocess.check_output(commands.replace('\n',';'), shell=True)
| 5,349,490 |
def conj(Q):
"""Returns the conjugate of a dual quaternion.
"""
res = cs.SX.zeros(8)
res[0] = -Q[0]
res[1] = -Q[1]
res[2] = -Q[2]
res[3] = Q[3]
res[4] = -Q[4]
res[5] = -Q[5]
res[6] = -Q[6]
res[7] = Q[7]
return res
| 5,349,491 |
def benchmark_summary(benchmark_snapshot_df):
"""Creates summary table for a benchmark snapshot with columns:
|fuzzer|time||count|mean|std|min|25%|median|75%|max|
"""
groups = benchmark_snapshot_df.groupby(['fuzzer', 'time'])
summary = groups['edges_covered'].describe()
summary.rename(columns={'50%': 'median'}, inplace=True)
return summary.sort_values(('median'), ascending=False)
| 5,349,492 |
def is_pio_job_running(*target_jobs: str) -> bool:
"""
pass in jobs to check if they are running
ex:
> result = is_pio_job_running("od_reading")
> result = is_pio_job_running("od_reading", "stirring")
"""
with local_intermittent_storage("pio_jobs_running") as cache:
for job in target_jobs:
if cache.get(job, b"0") == b"1":
return True
return False
| 5,349,493 |
def benchmark(skip, params=(3,2,0)):
"""Time computation of the image at various different resolutions"""
for res in range(100,1010,skip):
minComputeDuration = 100000000
minCopyDuration = 100000000
for itr in range(5):
(output, (computeDur, copyDur)) = renderOrbitals(params, res)
if max(output) > 0.0:
minComputeDuration = min(minComputeDuration, computeDur)
minCopyDuration = min(minCopyDuration, copyDur)
# At some resolutions, on my mobile GPU, I get a totally black image
# I have no idea why this happens, but this sits here to discard
# those results, since their timing seems to be somewhat inaccurate.
if minComputeDuration < 100000000:
print "{0},{1},{2}".format(res, minComputeDuration, minCopyDuration)
| 5,349,494 |
def encode(text, encoding='utf-8'):
"""
Returns a unicode representation of the string
"""
if isinstance(text, basestring):
if not isinstance(text, unicode):
text = unicode(text, encoding, 'ignore')
return text
| 5,349,495 |
def calculate_sparsity(df: pd.DataFrame) -> tuple:
"""Calculate the data sparsity based on ratings and reviews.
Args:
df ([pd.DataFrame]): DataFrame with counts of `overall` and `reviewText`
measured against total `reviewerID` * `asin`.
Returns:
[tuple]: Tuple of data sparsity wrt. ratings (`overall`) and reviews (`reviewText`).
"""
# no. of ratings
rating_numerator = df["overall"].count()
review_numerator = df["reviewText"].count()
# number of users and items
num_users = df["reviewerID"].nunique()
num_items = df["asin"].nunique()
denominator = num_users * num_items
rating_sparsity = (1.0 - (rating_numerator * 1.0) / denominator) * 100
review_sparsity = (1.0 - (review_numerator * 1.0) / denominator) * 100
return rating_sparsity, review_sparsity
| 5,349,496 |
def send_multi_domain_data_request(req_session):
""" Group Market Data request by Domain type """
# Note: that I dont need to group by domain, I could just
# iterate through the file and request each one individually
# but I felt this was neater!
grouped = defaultdict(list)
# Create lists grouped by Domain Type
for domain, ric in domain_ric_list:
grouped[domain].append(ric)
# For each Domain type group, call the data request method
for i, (domain, rics) in enumerate(grouped.items()):
send_single_domain_data_request(req_session, domain, rics)
| 5,349,497 |
def char_fun_est(
train_data,
paras=[3, 20], n_trees = 200, uv = 0, J = 1, include_reward = 0, fixed_state_comp = None):
"""
For each cross-fitting-task, use QRF to do prediction
paras == "CV_once": use CV_once to fit
get_CV_paras == True: just to get paras by using CV
Returns
-------
a list of four estimated fun, and a list of four true y vectors
"""
char_funs = []
X1, y1 = get_pairs(train_data, is_forward = 1, J = J,
include_reward = include_reward, fixed_state_comp = fixed_state_comp)
X2, y2 = get_pairs(train_data, is_forward = 0, J = J,
include_reward = include_reward, fixed_state_comp = fixed_state_comp)
X, y = [X1, X2], [y1, y2]
if paras in ["CV", "CV_once"]:
for i in range(2):
rfqr = RandomForestQuantileRegressor(random_state=0, n_estimators = n_trees)
gd = GridSearchCV(estimator = rfqr, param_grid = param_grid,
cv = 5, n_jobs = n_jobs, verbose=0)
gd.fit(X[i], y[i])
best_paras = gd.best_params_
if paras == "CV_once": # only return forward
return [best_paras['max_depth'], best_paras['min_samples_leaf']]
elif paras == "CV":
print("best_paras:", best_paras)
# use the optimal paras and the whole dataset
rfqr1 = RandomForestQuantileRegressor(
random_state=0,
n_estimators = n_trees,
max_depth=best_paras['max_depth'],
min_samples_leaf=best_paras['min_samples_leaf'],
n_jobs = n_jobs)
char_funs.append(rfqr1.fit(X[i], y[i]))
else: # pre-specified paras
max_depth, min_samples_leaf = paras
for i in range(2):
char_funs.append(
RandomForestQuantileRegressor(
random_state=0, n_estimators = n_trees,
max_depth = max_depth, min_samples_leaf = min_samples_leaf,
n_jobs = n_jobs).fit( X[i], y[i]))
return char_funs
| 5,349,498 |
def create_tables_for_import(volume_id, namespace):
"""Create the import or permanent obs_ tables and all the mult tables they
reference. This does NOT create the target-specific obs_surface_geometry
tables because we don't yet know what target names we have."""
volume_id_prefix = volume_id[:volume_id.find('_')]
instrument_name = VOLUME_ID_PREFIX_TO_INSTRUMENT_NAME[volume_id_prefix]
if instrument_name is None:
instrument_name = 'GB'
mission_abbrev = VOLUME_ID_PREFIX_TO_MISSION_ABBREV[volume_id_prefix]
mission_name = MISSION_ABBREV_TO_MISSION_TABLE_SFX[mission_abbrev]
mult_table_schema = import_util.read_schema_for_table('mult_template')
# This is an awful hack because this one mult table has an extra field
# in it. Yuck! XXX
mult_target_name_table_schema = (
import_util.read_schema_for_table(
'mult_target_name_template'))
table_schemas = {}
table_names_in_order = []
for table_name in TABLES_TO_POPULATE:
table_name = table_name.replace('<INST>', instrument_name.lower())
table_name = table_name.replace('<MISSION>', mission_name.lower())
if table_name.startswith('obs_surface_geometry__'):
# Note that we aren't replacing <TARGET> here because we don't know
# the target name! We're only using this schema to get field names,
# data source, source order, etc. The real use of the schema will be
# later when we finally create and insert into the correct table for
# each target.
table_schema = import_util.read_schema_for_table(
'obs_surface_geometry_target')
else:
table_schema = import_util.read_schema_for_table(table_name)
if table_schema is None:
continue
table_schemas[table_name] = table_schema
table_names_in_order.append(table_name)
if table_name.startswith('obs_surface_geometry__'):
# Skip surface geo tables until they are needed
continue
# Create the referenced mult_ tables
for table_column in table_schema:
if table_column.get('put_mults_here', False):
continue
field_name = table_column['field_name']
pi_form_type = table_column.get('pi_form_type', None)
if pi_form_type is not None and pi_form_type.find(':') != -1:
pi_form_type = pi_form_type[:pi_form_type.find(':')]
if pi_form_type in GROUP_FORM_TYPES:
mult_name = import_util.table_name_mult(table_name, field_name)
if mult_name in MULT_TABLES_WITH_TARGET_GROUPING:
schema = mult_target_name_table_schema
else:
schema = mult_table_schema
if (impglobals.DATABASE.create_table(namespace, mult_name,
schema) and
namespace == 'import'):
_CREATED_IMP_MULT_TABLES.add(mult_name)
impglobals.DATABASE.create_table(namespace, table_name,
table_schema)
return table_schemas, table_names_in_order
| 5,349,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.