content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def request_user_input(prompt='> '):
"""Request input from the user and return what has been entered."""
return raw_input(prompt)
| 5,347,200 |
def get_clinic_qs():
""" Returns a list of clinic uuid values for clinics whose patients
will receive follow up reminder messages
"""
q = Q()
for clinic in MESSAGE_CLINICS:
q = q | Q(name__iexact=clinic)
return list(Clinic.objects.filter(q).values_list('uuid', flat=True))
| 5,347,201 |
def NPnm(n, m, x):
"""Eq:II.77 """
return sqrt( (2*n+1)/2 * abs(nmFactorial(n,m)) ) * lpmv(m, n, x)
| 5,347,202 |
def all_columns_empty():
"""All columns are empty ... test will demoonstrate this edge case can be handled"""
return [[] for i in range(0, 100)]
| 5,347,203 |
def ping(host, destination, repeat_count, vrf_name):
"""Execute Ping RPC over NETCONF."""
# create NETCONF provider
provider = NetconfServiceProvider(address=host,
port=830,
username='admin',
password='admin',
protocol='ssh')
executor = ExecutorService() # create executor service
ping = xr_ping_act.Ping() # create ping RPC object
ping.input.destination = ping.input.Destination()
ping.input.destination.destination = destination
ping.input.destination.repeat_count = repeat_count
ping.input.destination.vrf_name = vrf_name
ping.output = executor.execute_rpc(provider, ping, ping.output)
return dict(success_rate=int(str(ping.output.ping_response.ipv4[0].success_rate)),
rtt_min=int(str(ping.output.ping_response.ipv4[0].rtt_min)),
rtt_avg=int(str(ping.output.ping_response.ipv4[0].rtt_avg)),
rtt_max=int(str(ping.output.ping_response.ipv4[0].rtt_max)))
| 5,347,204 |
def compute_shape_index(mesh) -> np.ndarray:
"""
Computes shape index for the patches. Shape index characterizes the shape
around a point on the surface, computed using the local curvature around each
point. These values are derived using PyMesh's available geometric
processing functionality.
Parameters
----------
mesh: Mesh
Instance of the pymesh Mesh type. The mesh is constructed by using
information on vertices and faces.
Returns
-------
si: np.ndarray,
Shape index for each vertex
"""
n1 = mesh.get_attribute("vertex_nx")
n2 = mesh.get_attribute("vertex_ny")
n3 = mesh.get_attribute("vertex_nz")
normals = np.stack([n1, n2, n3], axis=1)
mesh.add_attribute("vertex_mean_curvature")
H = mesh.get_attribute("vertex_mean_curvature")
mesh.add_attribute("vertex_gaussian_curvature")
K = mesh.get_attribute("vertex_gaussian_curvature")
elem = np.square(H) - K
# In some cases this equation is less than zero, likely due to the method
# that computes the mean and gaussian curvature. set to an epsilon.
elem[elem < 0] = 1e-8
k1 = H + np.sqrt(elem)
k2 = H - np.sqrt(elem)
# Compute the shape index
si = (k1 + k2) / (k1 - k2)
si = np.arctan(si) * (2 / np.pi)
return si
| 5,347,205 |
def edit_frame(frame: ndarray, y: int) -> Tuple[ndarray, ndarray]:
"""
Parameters
----------
frame : (is row-major)
y
Returns
-------
(frame, cut)
"""
np.random.uniform(-1, 1, size=20000000) # 20000000@6cores
cut = cv.cvtColor(frame[[y], :], cv.COLOR_BGR2GRAY)[0, :]
# Convert OpenCV colors to PyQtGraph colors
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
return frame, cut
| 5,347,206 |
def get_location(uniprot_id: str) -> Location: # pragma: no cover
"""Queries the UniProt database for a subcellular location with the id `uniprot_id`
and returns a `Location` object"""
g: LocationRDF = get_location_graph(uniprot_id)
return Location.from_location_rdf(g)
| 5,347,207 |
def clean(params):
"""
Clean current folder
remove saved model and training log
"""
if os.path.isfile(params.vocab_file):
os.remove(params.vocab_file)
if os.path.isfile(params.map_file):
os.remove(params.map_file)
if os.path.isdir(params.ckpt_path):
shutil.rmtree(params.ckpt_path)
if os.path.isdir(params.summary_path):
shutil.rmtree(params.summary_path)
if os.path.isdir(params.result_path):
shutil.rmtree(params.result_path)
if os.path.isdir("log"):
shutil.rmtree("log")
if os.path.isdir("__pycache__"):
shutil.rmtree("__pycache__")
if os.path.isfile(params.config_file):
os.remove(params.config_file)
if os.path.isfile(params.vocab_file):
os.remove(params.vocab_file)
| 5,347,208 |
def analyzeSentiment(folderNum):
"""Get the review and perform sentiment analysis."""
while True:
carMake, carModel, reviewPath = getReview()
if type(carMake) != Exception and carMake is not None:
if folderNum == 0:
carModel = '-'.join(carModel)
else:
carModel = carModel[0]
if folderNum == 2 and (reviewPath == '' or reviewPath[-1] == '0'):
continue
carMake, carModel = carMake.lower(), carMake.lower()
with open(reviewPath) as fileHandle:
reviewText = fileHandle.read()
reviewText = reviewText.replace('\'', '')
reviewBlob = TextBlob(reviewText)
reviewPolarity = reviewBlob.sentiment.polarity
reviewSubjectivity = reviewBlob.sentiment.subjectivity
insertQry = """INSERT INTO reviews2 (car_make, car_model,
review_polarity, review_subjectivity, review_text)
VALUES ('{}', '{}', {}, {}, '{}')""".format(
carMake, carModel, reviewPolarity, reviewSubjectivity,
reviewText)
cur.execute(insertQry)
else:
break
| 5,347,209 |
def test_paper_size(pdf, option_type, config):
"""
Проверка размера страницы.
"""
for pg in pdf:
assert point_to_mm(pg.rect.width) == dict_get(
config, [option_type, "paper_size", "width"]
)
assert point_to_mm(pg.rect.height) == dict_get(
config, [option_type, "paper_size", "height"]
)
| 5,347,210 |
def _gaussian2d_rot_no_bg(p,x,y):
"""
Required Arguments:
p -- (m) [A,x0,y0,FWHMx,FWHMy,theta]
x -- (n x o) ndarray of coordinate positions for dimension 1
y -- (n x o) ndarray of coordinate positions for dimension 2
Outputs:
f -- (n x o) ndarray of function values at positions (x,y)
"""
theta=p[5]
x0prime, y0prime, xprime, yprime=_2d_coord_transform(theta,p[1],p[2],x,y)
newp=copy.copy(p)#this copy was needed so original parameters set isn't changed
newp[1]=x0prime
newp[2]=y0prime
f=_gaussian2d_no_bg(newp[:5],xprime,yprime)
return f
| 5,347,211 |
def queue_get_all(q):
"""
Used by report builder to extract all items from a
:param q: queue to get all items from
:return: hash of merged data from the queue by pid
"""
items = {}
maxItemsToRetreive = 10000
for numOfItemsRetrieved in range(0, maxItemsToRetreive):
try:
if numOfItemsRetrieved == maxItemsToRetreive:
break
new = q.get_nowait()
pid = new.pid
ts = new.timestamp
msg = new.msg
if pid not in items:
items[pid] = ''
old = items[pid]
new = '{0}\n[{1}]{2}'.format(old, ts, msg)
items[pid] = new
except Empty:
break
return items
| 5,347,212 |
def sample_account(self, profile, company, **params):
"""Create and return a sample customer"""
defaults = {
"balance": 0,
"account_name": "string",
"account_color": "string"
}
defaults.update(params)
return Account.objects.create(
profile=profile,
company=company,
**defaults
)
| 5,347,213 |
def extract_acqtime_and_physio_by_slice(log_fname, nSlices, nAcqs, acqTime_firstImg, TR=1000):
"""
:param log_fname:
:param nSlices:
:param nAcqs:
:return: repsAcqTime: ((SC+all slices) x Nacq x (PulseOx, Resp)
timePhysio: N_pulseOx_points x ((PulseOx, Resp)
valuesPhysio: N_pulseOx_points x ((PulseOx, Resp)
"""
# repsAcqTime: ((SC+all slices) x Nacq x (PulseOx, Resp)
# timePhysio: N_pulseOx_points x ((PulseOx, Resp)
# valuesPhysio: N_pulseOx_points x ((PulseOx, Resp)
repsAcqTime = np.zeros((1+nSlices, nAcqs, 2))
# pulseOx ----------------------------
if os.path.exists(log_fname+'.puls'):
print('Processing pulseOx log: '+log_fname+'.puls')
if 'slr' in os.path.basename(log_fname):
print('\t[\'slr\'-type physiolog]')
time_puls, puls_values, epi_acqtime_puls, epi_event_puls, acq_window_puls = dsc_extract_physio.read_physiolog(log_fname+'.puls', sampling_period=20) # extract physio signal
reps_table_puls, slices_table_puls = dsc_extract_physio.sort_event_times(epi_acqtime_puls, epi_event_puls) # sort event times
nrep_pulseOxLog = np.sum(reps_table_puls[:, 1])
if nAcqs != nrep_pulseOxLog:
os.error('Number of repetitions in image is different from the number of repetitions recorded in pulseOx physiolog.')
# get acquisition time for each slice
repsAcqTime[1:, :, 0] = np.squeeze(slices_table_puls[np.where(reps_table_puls[:, 1] == 1), :]).T
else:
print('\t[\'CMRR\'-type physiolog]')
time_puls, trigger_start_times_puls, trigger_end_times_puls, puls_values, acq_window_puls, acqStartTime_puls = dsc_extract_physio.read_physiolog_cmrr(log_fname+'.puls')
triggerStartTimes_imgOnly_puls = dsc_extract_physio.extract_acqTimes_cmrr(trigger_start_times_puls, acqTime_firstImg, acqStartTime_puls, trigger_end_times_puls)
repsAcqTime[1:, :, 0] = np.tile(triggerStartTimes_imgOnly_puls, (nSlices, 1)) + np.tile(TR/nSlices * np.arange(0, nSlices), (nAcqs, 1)).T
else:
print('\nNo log found for pulseOx.')
repsAcqTime[1:, :, 0] = TR*np.tile(np.arange(0, nAcqs), (nSlices, 1)) + np.tile(TR/nSlices*np.arange(0, nSlices), (nAcqs, 1)).T
time_puls = np.arange(np.min(repsAcqTime), np.max(repsAcqTime), step=20)
puls_values = None
# take the mean acquisition time across slices for the whole rep (SC)
repsAcqTime[0, :, 0] = np.mean(repsAcqTime[1:nSlices, :, 0], axis=0)
# respiration ----------------------------
if os.path.exists(log_fname+'.resp'):
print('Processing respiration log: '+log_fname+'.resp')
if 'slr' in os.path.basename(log_fname):
print('\t[\'slr\'-type physiolog]')
time_resp, resp_values, epi_acqtime_resp, epi_event_resp, acq_window_resp = dsc_extract_physio.read_physiolog(log_fname+'.resp', sampling_period=20) # extract physio signal
reps_table_resp, slices_table_resp = dsc_extract_physio.sort_event_times(epi_acqtime_resp, epi_event_resp) # sort event times
nrep_respLog = np.sum(reps_table_resp[:, 1])
if nAcqs != nrep_respLog:
os.error('Number of repetitions in image is different from the number of repetitions recorded in respiration physiolog.')
# get acquisition time for each slice
repsAcqTime[1:, :, 1] = np.squeeze(slices_table_resp[np.where(reps_table_resp[:, 1] == 1), :]).T
else:
print('\t[\'CMRR\'-type physiolog]')
time_resp, trigger_start_times_resp, trigger_end_times_resp, resp_values, acq_window_resp, acqStartTime_resp = dsc_extract_physio.read_physiolog_cmrr(log_fname+'.resp')
else:
print('\nNo log found for respiration.\n')
repsAcqTime[1:, :, 1] = TR*np.tile(np.arange(0, nAcqs), (nSlices, 1)) + np.tile(TR/nSlices*np.arange(0, nSlices), (nAcqs, 1)).T
time_resp = np.arange(np.min(repsAcqTime), np.max(repsAcqTime), step=20)
resp_values = None
# take the mean acquisition time across slices for the whole rep (SC)
repsAcqTime[0, :, 1] = np.mean(repsAcqTime[1:nSlices, :, 1], axis=0)
# merge the two physiological signal into one array each (for time and physio values)
if time_puls.size > time_resp.size:
time_resp = np.hstack((time_resp, time_puls[time_resp.size:]))
resp_values = np.pad(resp_values, (0, puls_values.size - resp_values.size), 'reflect')
elif time_puls.size < time_resp.size:
time_puls = np.hstack((time_puls, time_resp[time_puls.size:]))
puls_values = np.pad(puls_values, (0, resp_values.size - puls_values.size), 'reflect')
timePhysio = np.vstack((time_puls, time_resp)).T
valuesPhysio = np.vstack((puls_values, resp_values)).T
return repsAcqTime, timePhysio, valuesPhysio
| 5,347,214 |
def get_funghi_type_dict(funghi_dict):
"""
Parameters
----------
funghi_dict: dict {str: list of strs}
is the name: html lines dict created by get_funghi_book_entry_dict_from_html()
Return
------------
dict {str: FunghiType}
each entry contains a mushroom name and the corresponding FunghiType created with generate_funghi()
"""
funghis = {}
for funghi_name in funghi_dict:
funghis[funghi_name] = generate_funghi(funghi_dict, funghi_name)
return funghis
| 5,347,215 |
def init(item):
"""
Initializes any data on the parent item if necessary
"""
for component in item.components:
if component.defines('init'):
component.init(item)
| 5,347,216 |
def default_error_mesg_fmt(exc, no_color=False):
"""Generate a default error message for custom exceptions.
Args:
exc (Exception): the raised exception.
no_color (bool): disable colors.
Returns:
str: colorized error message.
"""
return color_error_mesg('{err_name}: {err_mesg}', {
'err_name': Color(exc.__class__.__name__, '*red'),
'err_mesg': Color(str(exc), 'white')
}, no_color)
| 5,347,217 |
def fit_model(model, generator, n_epochs, batches_per_epoch):
"""Fit model with data generator
model : tf.keras.Model
generator : yield (batch_x, batch_y)
"""
model.fit(generator, epochs=n_epochs, steps_per_epoch=batches_per_epoch)
| 5,347,218 |
def test_get_package_energy_with_only_pkg_rapl_api_return_correct_value(fs_pkg_one_socket):
"""
Create a RaplDevice instance on a machine with package rapl api with on one socket
configure it to monitor package domain
use the `get_energy` method and check if:
- the returned list contains one element
- this element is the power consumption of the package on socket 0
"""
device = RaplDevice()
device.configure([RaplPackageDomain(0)])
assert device.get_energy() == [fs_pkg_one_socket.domains_current_energy['package_0']]
| 5,347,219 |
def check_values_on_diagonal(matrix):
"""
Checks if a matrix made out of dictionary of dictionaries has values on diagonal
:param matrix: dictionary of dictionaries
:return: boolean
"""
for line in matrix.keys():
if line not in matrix[line].keys():
return False
return True
| 5,347,220 |
def volat(path):
"""volat
Data loads lazily. Type data(volat) into the console.
A data.frame with 558 rows and 17 variables:
- date. 1947.01 to 1993.06
- sp500. S&P 500 index
- divyld. div. yield annualized rate
- i3. 3 mo. T-bill annualized rate
- ip. index of industrial production
- pcsp. pct chg, sp500, ann rate
- rsp500. return on sp500: pcsp + divyld
- pcip. pct chg, IP, ann rate
- ci3. i3 - i3[\_n-1]
- ci3\_1. ci3[\_n-1]
- ci3\_2. ci3[\_n-2]
- pcip\_1. pcip[\_n-1]
- pcip\_2. pcip[\_n-2]
- pcip\_3. pcip[\_n-3]
- pcsp\_1. pcip[\_n-1]
- pcsp\_2. pcip[\_n-2]
- pcsp\_3. pcip[\_n-3]
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `volat.csv`.
Returns:
Tuple of np.ndarray `x_train` with 558 rows and 17 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'volat.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/wooldridge/volat.csv'
maybe_download_and_extract(path, url,
save_file_name='volat.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 5,347,221 |
def read_quantity(string):
"""
convert a string to a quantity or vectorquantity
the string must be formatted as '[1, 2, 3] unit' for a vectorquantity,
or '1 unit' for a quantity.
"""
if "]" in string:
# It's a list, so convert it to a VectorQuantity.
# The unit part comes after the list.
# The list itself must consist of floats only!
values = list(
map(
float,
string[1:].split('] ')[0].split(',')
)
)
unit = find_unit(string.split('] ')[1].split(' '))
quantity = new_quantity(values, unit)
else:
value = float(string.split(' ')[0])
unit = find_unit(string.split(' ')[1:])
quantity = new_quantity(value, unit)
return quantity
| 5,347,222 |
def loop():
"""
The main loop for monitoring and alerting on services
"""
# Setup major objects
config = get_config('monitor')
logger = get_logger(level=config.grab('level', section='logging'),
location=config.grab('location', section='logging'),
max_size=config.grab('max_size', section='logging'),
rollover_count=config.grab('rollover_count', section='logging')
)
dispatcher = Dispatcher()
_monitor = config.grab_many(section='services')
services = {}
for member in _monitor:
services[_member] = Service(name=_member, processes=monitor[_member])
# Setup looping & alerting parmaters
events = {}
SEC_TO_MIN = 60
loop_run_frequency = config.grab('frequency')
alert_frequency = config.grab('rate') * SEC_TO_MIN
event_reset_period = config.grab('reset_after') * SEC_TO_MIN
# Start the dispatcher
child_pipe, pipe = Pipe(duplex=False)
dispatcher.run(config, logger, child_pipe)
# Run monitoring loop
start_alert_period = time.time()
while True:
start_run_time = time.time()
for member in services:
new_pids, dead_pids = member.status()
if dead_pids:
for name in dead_pids:
for pid in dead_pids[name]:
new_event = Event(member, name, pid)
if new_event in events:
# Event has overriden __hash__; that's why this works
events[new_event].bump() # add occurance to event
else:
pipe.send(new_event) # push to dispatcher for alerting
if new_pids:
for name in dead_pids:
for pid in dead_pids[name]:
# It's spam to notify of a new pid ASAP
new_event = Event(member, name, pid)
events[new_event].bump()
# remove events that have been 'green' for long enough
for event in events:
if time.time() - event.last_event >= event_reset_period:
events.pop(event, None)
# Send periodic alerts
if time.time() - start_alert_period >= alert_frequency:
for event in events:
pipe.send(event)
start_alert_period = time.time()
# time to nap
ran_for = time.time() - start_run_time
delta = loop_run_frequency - ran_for
time.sleep(max(0, delta))
| 5,347,223 |
async def stop_service(name: str) -> None:
""" stop service """
task = TASKS.get(name)
if task is None:
raise Exception(f"No such task {name}")
return task.cancel()
| 5,347,224 |
def rand_bbox(img_shape, lam, margin=0., count=None):
""" Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
bbox_area = (yh - yl) * (xh - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return yl, yh, xl, xh, lam
| 5,347,225 |
def main():
"""Main."""
parser = argparse.ArgumentParser(description='Get Redis master IP')
parser.add_argument('--cluster', '-c', help='cluster name', required=True)
parser.add_argument('--subcluster', '-s', help='subcluster name', default='redisdb')
parser.add_argument('--db', '-d', help='redis db name', required=True)
parser.add_argument('--debug', help='debug mode', action='store_true')
args = parser.parse_args()
global CONFIG_FILES, CONFIG, SECRETS, DEBUG
DEBUG = args.debug
CONFIG_FILES, CONFIG, SECRETS = common.read_redis_configs(DEBUG)
get_current_master(args.cluster, args.subcluster, args.db)
| 5,347,226 |
def get_current_dir():
"""
Get the directory of the executed Pyhton file (i.e. this file)
"""
# Resolve to get rid of any symlinks
current_path = Path(__file__).resolve()
current_dir = current_path.parent
return current_dir
| 5,347,227 |
def concat_output_files(
input_directory=None,
output_directory=None,
reference_sample=None,
project_id=None,
):
"""This function will concatenate all chromosome output files
into a single file for viewing on P-distance Graphing Tool.
It will also remove the reference sample data as it is only
0's and uniformative."""
logging.debug(f"Starting concatenation process")
DATE = str(datetime.date.today()).replace('-', '_')
# set Paths()
input_directory = Path(input_directory)
if not output_directory:
output_directory = Path(input_directory) / "p_distance_output"
else:
output_directory = Path(output_directory)
# Collect output files
output_files = [f for f in output_directory.iterdir() if f.is_file()]
pprint_list = "\n".join([str(f) for f in output_files])
logging.debug(f"Files to concatenate:\n{pprint_list}\n")
# Read files into dataframe
dfs_to_concat = [pd.read_csv(f) for f in output_files]
# Concatenate the files into a single directory
concat_dfs = pd.concat(dfs_to_concat)
# Drop reference column from final dataset
concat_dfs.drop(columns=reference_sample, inplace=True)
# Create output filename and output to file
output_filename = output_directory / f"{reference_sample}_{project_id}_{DATE}.csv"
concat_dfs.to_csv(output_filename, index=False)
return
| 5,347,228 |
def build_gun_dictionary(filename):
"""Build a dictionary of gun parameters from an external CSV file:
- Key: the gun designation (e.g. '13.5 in V' or '12 in XI')
- Value: a list of parameters, in the order:
* caliber (in inches)
* maxrange (maximum range in yards)
* longtohit (chance to hit per gun and minute at long range)
* longmin (minimum range considered to be long)
* effectivetohit (chance to hit per gun and minute at effective range)
* effectivemin (minimum range considered to be effective)
* shorttohit (chance to hit per gun and minute at short range)
"""
gundict = {}
with open(filename) as sourcefile:
reader = csv.reader(sourcefile, delimiter=",")
next(reader)
for row in reader:
gundata = list(row)
gundict[gundata[0]] = list(map(float, gundata[1:]))
return gundict
| 5,347,229 |
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Test score")
plt.legend(loc="best")
return plt
| 5,347,230 |
def fetch_events_AHEAD(base_url='http://www.ahead-penn.org'):
"""
Penn Events for Penn AHEAD
"""
page_soup = BeautifulSoup(requests.get(
urljoin(base_url, '/events')).content, 'html.parser')
events = []
event_table = page_soup.find('div', attrs={'id': 'main-content'})
all_events = event_table.find_all('div', attrs={'class': 'views-row'})
for event in all_events:
event_url = urljoin(base_url, event.find('a')['href'])
event_soup = BeautifulSoup(requests.get(
event_url).content, 'html.parser')
title = event_soup.find('h1', attrs={'class': 'title'})
title = title.text.strip() if title is not None else ''
date = event_soup.find('span', attrs={'class': 'date-display-single'})
date = date.text.strip() if date is not None else ''
starttime, endtime = find_startend_time(date)
location = event_soup.find('div', attrs={
'class': 'field field-name-field-location field-type-text field-label-hidden'})
location = location.text.strip() if location is not None else ''
details = event_soup.find('div', attrs={
'class': 'field field-name-body field-type-text-with-summary field-label-hidden'})
details = details.text.strip() if details is not None else ''
events.append({
'title': title,
'speaker': '',
'date': date,
'location': location,
'description': details,
'starttime': starttime,
'endtime': endtime,
'url': event_url,
'owner': 'Penn AHEAD',
})
return events
| 5,347,231 |
def assert_records_equal_nonvolatile(first, second, volatile_fields, indent=0):
"""Compare two test_record tuples, ignoring any volatile fields.
'Volatile' fields include any fields that are expected to differ between
successive runs of the same test, mainly timestamps. All other fields
are recursively compared.
"""
if isinstance(first, dict) and isinstance(second, dict):
if set(first) != set(second):
logging.error('%sMismatching keys:', ' ' * indent)
logging.error('%s %s', ' ' * indent, first.keys())
logging.error('%s %s', ' ' * indent, second.keys())
assert set(first) == set(second)
for key in first:
if key in volatile_fields:
continue
try:
assert_records_equal_nonvolatile(first[key], second[key],
volatile_fields, indent + 2)
except AssertionError:
logging.error('%sKey: %s ^', ' ' * indent, key)
raise
elif hasattr(first, '_asdict') and hasattr(second, '_asdict'):
# Compare namedtuples as dicts so we get more useful output.
assert_records_equal_nonvolatile(first._asdict(), second._asdict(),
volatile_fields, indent)
elif hasattr(first, '__iter__') and hasattr(second, '__iter__'):
for idx, (fir, sec) in enumerate(itertools.izip(first, second)):
try:
assert_records_equal_nonvolatile(fir, sec, volatile_fields, indent + 2)
except AssertionError:
logging.error('%sIndex: %s ^', ' ' * indent, idx)
raise
elif (isinstance(first, records.RecordClass) and
isinstance(second, records.RecordClass)):
assert_records_equal_nonvolatile(
{slot: getattr(first, slot) for slot in first.__slots__},
{slot: getattr(second, slot) for slot in second.__slots__},
volatile_fields, indent)
elif first != second:
logging.error('%sRaw: "%s" != "%s"', ' ' * indent, first, second)
assert first == second
| 5,347,232 |
def forkpty(*args, **kwargs): # real signature unknown
"""
Fork a new process with a new pseudo-terminal as controlling tty.
Returns a tuple of (pid, master_fd).
Like fork(), return pid of 0 to the child process,
and pid of child to the parent process.
To both, return fd of newly opened pseudo-terminal.
"""
pass
| 5,347,233 |
def column_to_index(ref):
"""
カラムを示すアルファベットを0ベース序数に変換する。
Params:
column(str): A, B, C, ... Z, AA, AB, ...
Returns:
int: 0ベース座標
"""
column = 0
for i, ch in enumerate(reversed(ref)):
d = string.ascii_uppercase.index(ch) + 1
column += d * pow(len(string.ascii_uppercase),i)
return column-1
| 5,347,234 |
def cols_to_tanh(df, columns):
"""Transform column data with hyperbolic tangent and return new columns of prefixed data.
Args:
df: Pandas DataFrame.
columns: List of columns to transform.
Returns:
Original DataFrame with additional prefixed columns.
"""
for col in columns:
df['tanh_' + col] = np.tanh(df[col])
return df
| 5,347,235 |
def draw_png_heatmap_graph(obs, preds_dict, gt, mixes, network_padding_logits, trackwise_padding, plt_size, draw_prediction_track, plot_directory, log_file_name,
multi_sample, global_step, graph_number, fig_dir, csv_name, rel_destination, parameters, padding_mask='None', distance=0):
"""
:param obs:
:param preds_dict:
:param gt:
:param mixes:
:param network_padding_logits:
:param trackwise_padding:
:param plt_size:
:param draw_prediction_track:
:param plot_directory:
:param log_file_name:
:param multi_sample:
:param global_step:
:param graph_number:
:param fig_dir:
:param csv_name:
:param rel_destination:
:param parameters:
:param padding_mask: "None", "GT" or "Network"
:return:
"""
##FIXME
gt_padding_bool = trackwise_padding
#
#padding_bool = np.argmax(padding_logits, axis=1) == 1
# 'results/20180412-104825/plots_img_final'
legend_str = []
fig = plt.figure(figsize=plt_size)
plt.plot(gt[:, 0], gt[:, 1], 'b-', zorder=3, label="Ground Truth")
plt.plot(gt[:, 0], gt[:, 1], 'bo', zorder=3, ms=2)
legend_str.append(['Ground Truth'])
plt.plot(obs[:, 0], obs[:, 1], 'g-', zorder=4, label="Observations")
plt.plot(obs[:, 0], obs[:, 1], 'go', zorder=4, ms=2)
legend_str.append(['Observations'])
plot_colors = ['r', 'c', 'm', 'y', 'k']
plot_colors_idx = 0
first_RNN = True
for name, preds in preds_dict.iteritems():
# The input is designed for multiple future tracks. If only 1 is produced, the axis is missing. So reproduce it.
# This is the most common case (one track)
if len(preds.shape) < 3:
preds = np.array([preds])
if name == 'RNN' and not draw_prediction_track:
continue
else:
for j in range(preds.shape[0]):
prediction = preds[j]
# `Real data'
if 'multipath' in name:
plot_color = 'w'
if first_RNN:
first_RNN = False
label_name = "RNN Proposed"
else:
label_name = None
else:
plot_color = plot_colors[plot_colors_idx]
plot_colors_idx += 1
label_name = name
if len(prediction) is not len(gt_padding_bool):
padding_amount = len(gt_padding_bool) - len(prediction)
if padding_amount < 0:
prediction = prediction[:len(gt_padding_bool), :]
else:
prediction = np.pad(prediction, [[0, padding_amount], [0, 0]], 'edge')
plt.plot(prediction[~gt_padding_bool, 0], prediction[~gt_padding_bool, 1],
plot_color + 'o', ms=2, zorder=5)
plt.plot(prediction[~gt_padding_bool, 0], prediction[~gt_padding_bool, 1],
plot_color + '-', ms=1, zorder=5, label=label_name)
# Padding `fake' data
#plt.plot(prediction[gt_padding_bool, 0], prediction[gt_padding_bool, 1],
# plot_color + 'x', ms=2, zorder=5)
#legend_str.append([name + ' Pred'])
plt.legend()
if 'relative' in parameters['ibeo_data_columns'][0]:
x_range = (-20, 20)
y_range = (-10, 30)
x_range = (-18, 18)
y_range = (-8, 28)
elif 'queen-hanks' in csv_name:
x_range = (3, 47)
y_range = (-17, 11)
elif 'leith-croydon' in csv_name:
x_range = (-35, 10)
y_range = (-30, 15)
elif 'roslyn-crieff' in csv_name:
x_range = (-31, -10)
y_range = (-15, 8)
elif 'oliver-wyndora' in csv_name:
x_range = (-28, -8)
y_range = (-12, 6)
elif 'orchard-mitchell' in csv_name:
x_range = (-32, -5)
y_range = (-23, 5)
dx, dy = 0.5, 0.5
x = np.arange(min(x_range), max(x_range), dx)
y = np.flip(np.arange(min(y_range), max(y_range), dy), axis=0) # Image Y axes are down positive, map axes are up positive.
xx, yy = np.meshgrid(x, y)
xxyy = np.c_[xx.ravel(), yy.ravel()]
extent = np.min(x), np.max(x), np.min(y), np.max(y)
# Return probability sum here.
heatmaps = None
plot_time = time.time()
for sampled_mix, sampled_padding_logits in zip(mixes, network_padding_logits):
# Sleep in process to improve niceness.
time.sleep(0.05)
#print "len sampled_mix: " + str(len(sampled_mix))
sample_time = time.time()
network_padding_bools = np.argmax(sampled_padding_logits, axis=1) == 1
timeslot_num = 0
for timeslot, n_padded, gt_padded in zip(sampled_mix, network_padding_bools, gt_padding_bool):
if 'Network' in padding_mask and n_padded:
continue
if 'GT' in padding_mask and gt_padded:
continue
#print "timeslot_num " + str(timeslot_num)
gaussian_heatmaps = []
gaus_num = 0
for gaussian in timeslot:
##FIXME does not check padding_logit
gaus_num += 1
#print gaus_num
pi, mu1, mu2, s1, s2, rho = gaussian
cov = np.array([[s1 * s1, rho * s1 * s2], [rho * s1 * s2, s2 * s2]])
norm = scipy.stats.multivariate_normal(mean=(mu1, mu2), cov=cov)
zz = norm.pdf(xxyy)
zz *= pi
zz = zz.reshape((len(xx), len(yy[0])))
gaussian_heatmaps.append(zz)
gaussian_heatmaps /= np.max(gaussian_heatmaps) # Normalize such that each timestep has equal weight
#heatmaps.extend(gaussian_heatmaps) # This explodes
#TODO Does not work!
save_each_timestep = False
if save_each_timestep:
import copy
timestep_plt = copy.deepcopy(plt)
timestep_plt.imshow(gaussian_heatmaps, cmap=plt.cm.viridis, alpha=.7, interpolation='bilinear', extent=extent,
zorder=1)
timestep_plt.legend()
distance_str = ('n' if distance < 0 else 'p') + "%02i" % abs(distance+50)
fig_name = padding_mask + '-' + str(graph_number) + '-' + distance_str + '-' + ("no_pred_track-" if draw_prediction_track is False else "") + str(
multi_sample) + "-" + log_file_name + '-' + str(global_step) + '-' + rel_destination + 't_' + str(timeslot_num) + '.png'
fig_path = os.path.join(fig_dir, fig_name)
timestep_plt.savefig(fig_path, bbox_inches='tight')
if heatmaps is None:
heatmaps = gaussian_heatmaps
else:
heatmaps += gaussian_heatmaps
timeslot_num += 1
#print "Time for this sample: " + str(time.time() - sample_time)
#print "Time for gaussian plot of one track: " + str(time.time() - plot_time)
# Its about 7 seconds per plot
final_heatmap = sum(heatmaps) if heatmaps is not None else None
if 'relative' in parameters['ibeo_data_columns'][0]:
_ = 0 # Blank line to preserve lower logic flow
image_filename = 'intersection_diagram_background.png'
background_img = plt.imread(os.path.join('images', image_filename))
plt.imshow(background_img, zorder=0, # x_range = (-20, 20) y_range = (-10, 30)
extent=extent)#[-20, 20, -10, 30])
elif 'queen-hanks' in csv_name:
x_range = (3, 47)
y_range = (-17, 11)
elif 'leith-croydon' in csv_name:
x_range = (-35, 10)
y_range = (-30, 15)
elif 'leith-croydon' in csv_name:
image_filename = 'leith-croydon.png'
background_img = plt.imread(os.path.join('images', image_filename))
plt.imshow(background_img, zorder=0,
extent=[-15.275 - (147.45 / 2), -15.275 + (147.45 / 2), -3.1 - (77 / 2), -3.1 + (77 / 2)])
if final_heatmap is not None:
plt.imshow(final_heatmap, cmap=plt.cm.viridis, alpha=.7, interpolation='bilinear', extent=extent, zorder=1)
plt.legend()
plt.xlabel("x (metres)")
plt.ylabel("y (metres)")
distance_str = ('n' if distance < 0 else 'p') + "%02i" % abs(distance+50)
fig_name = padding_mask + '-' + str(graph_number) + '-' + distance_str + '-' + ("no_pred_track-" if draw_prediction_track is False else "") + str(
multi_sample) + "-" + log_file_name + '-' + str(global_step) + '-' + rel_destination + '.png'
fig_path = os.path.join(fig_dir, fig_name)
plt.savefig(fig_path, bbox_inches='tight')
print "Finished plotting " + fig_name
# Now inject into tensorboard
fig.canvas.draw()
fig_s = fig.canvas.tostring_rgb()
fig_data = np.fromstring(fig_s, np.uint8)
fig_data = fig_data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# This last string return allows images to be saved in tensorboard. I don't use it anymore, and I want the threads
# to run in the background, so I dropped the return value.
s = StringIO.StringIO()
plt.imsave(s, fig_data, format='png')
fig_data = s.getvalue()
plt.close()
return None
| 5,347,236 |
def main(mainscreen):
"""Sets up application settings and initializes windows objects"""
if (len(sys.argv) < 2):
exit("No input folder specified")
logger = utils.setup_logger('simple',
'simple.log',
extra={'filename': __file__})
# check yaml files
checker = YamlChecker(utils.format_directory_path(sys.argv[1]))
commits, deletes = checker.files_safe()
initialize_curses_settings()
# variables
pos = newpos = 0
hkeys = dict([
(49, 0),
(50, 1),
(51, 2)
])
vkeys = [
curses.KEY_UP,
curses.KEY_DOWN
]
# tabs and child windows
# tm = TabsManager(mainscreen)
# user input
'''
char = mainscreen.getch()
while char != ord('q'):
if char in keys.keys() or char in hkeys.keys():
if char in keys.keys():
newpos = pos + keys[char]
if newpos < 0:
newpos = tm.count - 1
if newpos > tm.count - 1:
newpos = 0
elif char in hkeys.keys():
newpos = hkeys[char]
tm.update(pos, newpos)
pos = newpos
elif char in vkeys:
if char == vkeys[0]:
tm.active.child.datahead.scroll_dn()
#tm.active.child.refresh(tm.active.child.datapos, -1)
tm.active.child.toggle_on()
if char == vkeys[1]:
tm.active.child.datahead.scroll_up()
#tm.active.child.refresh(tm.active.child.datapos, 1)
tm.active.child.toggle_on()
char = mainscreen.getch()
'''
char = mainscreen.getch()
# print(chr(27) + "[2J")
# sys.stderr.write("\x1b2J\x1b[H")
| 5,347,237 |
def stdev_time(arr1d, stdev):
"""
detects breakpoints through multiple standard deviations and divides breakpoints into timely separated sections
(wanted_parts)
- if sigma = 1 -> 68.3%
- if sigma = 2 -> 95.5%
- if sigma = 2.5 -> 99.0%
- if sigma = 3 -> 99.7%
- if sigma = 4 -> 99.9%
----------
arr1d: numpy.array
1D array representing the time series for one pixel
stdev: float
number multiplied with standard deviation to define the probability space for a breakpoint
Returns
----------
numpy.int32
0 = no breakpoint over time
15 = breakpoint in the 1st section
16 = breakpoint in the 2nd section
17 = breakpoint in the 3rd section
18 = breakpoint in the 4th section
19 = breakpoint in the 5th section
31 = breakpoint in the 1st AND 2nd section
32 = breakpoint in the 1st AND 3rd section
33 = breakpoint in the 1st AND 4th section OR breakpoint in the 2nd AND 3rd section
34 = breakpoint in the 1st AND 5th section OR 2nd AND 4th section
35 = breakpoint in the 2nd section AND 5th section OR 3rd AND 4th section
36 = breakpoint in the 3rd AND 5th section
37 = breakpoint in the 4th AND 5th section
48 = breakpoint in the 1st, 2nd AND 3rd section
49 = breakpoint in the 1st, 2nd AND 4th section
50 = breakpoint in the 1st, 2nd AND 5th section OR 1st, 3rd AND 4th section
51 = breakpoint in the 1st, 3rd AND 5th section OR 2nd, 3rd AND 4th section
52 = breakpoint in the 1st, 3rd AND 5th section OR 2nd, 3rd AND 5th section
53 = breakpoint in the 2nd, 4th AND 5th section
54 = breakpoint in the 3rd, 4th AND 5th section
66 = breakpoint in the 1st, 2nd, 3rd AND 4th section
67 = breakpoint in the 1st, 2nd, 3rd AND 5th section
68 = breakpoint in the 1st, 2nd, 4th AND 5th section
69 = breakpoint in the 1st, 3rd, 4th AND 5th section
70 = breakpoint in the 2nd, 3rd , 4th AND 5th section
85 = breakpoints in all section
"""
import numpy as np
time_series = arr1d
arr_shape = arr1d.shape[0]
time_series_index = np.indices((arr_shape,))[0]
# internal function to split time series in n sub time series
def split_list(alist, wanted_parts=1): # based on: https://stackoverflow.com/a/752562
length = len(alist)
return [alist[i * length // wanted_parts: (i + 1) * length // wanted_parts]
for i in range(wanted_parts)]
# split time series and list of time series indices in 4 subarrays
time_series_split = split_list(time_series, wanted_parts=5)
time_series_index_split = split_list(time_series_index, wanted_parts=5)
# calculate linear regression for each time series subarray
mini_list = []
sigma_list = []
for i in range(0, len(time_series_index_split)):
mea = np.mean(time_series_split[i])
std_mea = stdev * np.std(time_series_split[i])
mini = min(time_series_split[i])
sigma = mea - std_mea
i += 1
mini_list = [mini_list, mini]
sigma_list = [sigma_list, sigma] # weird list append, cause .append doesnt work with multiprocessing
# check for dropping slope values from one fifth of time series to next
temp = 0
if mini_list[0][0][0][0][1] < sigma_list[0][0][0][0][1]:
temp = temp + 15
if mini_list[0][0][0][1] < sigma_list[0][0][0][1]:
temp = temp + 16
if mini_list[0][0][1] < sigma_list[0][0][1]:
temp = temp + 17
if mini_list[0][1] < sigma_list[0][1]:
temp = temp + 18
if mini_list[1] < sigma_list[1]:
temp = temp + 19
if temp == 0:
return 0
return temp
| 5,347,238 |
def test_create_build(mocker, expected_class, build_object_type):
"""
Given:
- server_type of the server we run the build on: XSIAM or XSOAR.
When:
- Running 'configure_an_test_integration_instances' script and creating Build object
Then:
- Assert there the rigth Build object created: XSIAMBuild or XSOARBuild.
"""
build = create_build_object_with_mock(mocker, build_object_type)
assert isinstance(build, expected_class)
| 5,347,239 |
def main(_, **settings):
"""
This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings, route_prefix="/api")
# Initialise the broadcast view before c2cwsgiutils is initialised. This allows to test the
# reconfiguration on the fly of the broadcast framework
config.add_route("broadcast", r"/broadcast", request_method="GET")
config.add_view(
lambda request: broadcast_view(), route_name="broadcast", renderer="fast_json", http_cache=0
)
config.include(c2cwsgiutils.pyramid.includeme)
models.init(config)
config.scan("c2cwsgiutils_app.services")
health_check = HealthCheck(config)
health_check.add_db_session_check(models.DBSession, at_least_one_model=models.Hello)
health_check.add_url_check("http://localhost:8080/api/hello")
health_check.add_url_check(name="fun_url", url=lambda _request: "http://localhost:8080/api/hello")
health_check.add_custom_check("fail", _failure, 2)
health_check.add_custom_check("fail_json", _failure_json, 2)
health_check.add_alembic_check(models.DBSession, "/app/alembic.ini", 1)
return config.make_wsgi_app()
| 5,347,240 |
def get_available_modules():
"""Return list of modules shipped with OnRamp.
Returns:
List of module shipped with OnRamp
"""
def verify_module_path(x):
return os.path.isdir(os.path.join(_shipped_mod_dir, x))
return [{
'mod_id': None,
'mod_name': name,
'installed_path': None,
'state': 'Available',
'error': None,
'source_location': {
'type': 'local',
'path': os.path.normpath(os.path.join(_shipped_mod_dir, name))
}
} for name in filter(verify_module_path,
os.listdir(_shipped_mod_dir))]
| 5,347,241 |
def GetVarLogMessages(max_length=256 * 1024,
path='/var/log/messages',
dut=None):
"""Returns the last n bytes of /var/log/messages.
Args:
max_length: Maximum characters of messages.
path: path to /var/log/messages.
dut: a cros.factory.device.device_types.DeviceInterface instance, None for
local.
"""
return file_utils.TailFile(path, max_length, dut)
| 5,347,242 |
def _write_file(response, writer, size_limit=None):
"""Write download results to disk.
"""
size = 0
with writer.open_file('wb') as fp:
for chunk in response.iter_content(chunk_size=4096):
if chunk: # filter out keep-alive chunks
fp.write(chunk)
size += len(chunk)
if size_limit is not None and size > size_limit:
raise DatasetTooBig(limit=size_limit)
| 5,347,243 |
def find_index(predicate, List):
"""
(a → Boolean) → [a] → [Number]
Return the index of first element that satisfy the
predicate
"""
for i, x in enumerate(List):
if predicate(x):
return i
| 5,347,244 |
def base_user():
"""Base user"""
User.query.delete()
user = User(
username="testuser",
name="testname",
email="[email protected]",
password="1234",
birth_date="2000-01-01",
)
user.save()
yield user
| 5,347,245 |
def logger(
wrapped: Callable[..., str], instance: Any, args: Any, kwargs: Dict[str, Any]
) -> str:
"""Handle logging for :class:`anndata.AnnData` writing functions of :class:`cellrank.estimators.BaseEstimator`."""
log, time = kwargs.pop("log", True), kwargs.pop("time", None)
msg = wrapped(*args, **kwargs)
if log:
logg.info(msg, time=time)
return msg
| 5,347,246 |
def search_front():
"""
Search engine v0.1
- arguments:
- q: query to search (required)
"""
q = request.args.get('q', None)
if not q:
return flask.jsonify({'status': 'error', 'message': 'Missing query'}), 400
res = dict()
cursor = db.run(r.table(PRODUCTS_TABLE).pluck('shop').distinct())
shops = [c for c in cursor]
reg = build_regex(q)
cursor = db.run(r.table(PRODUCTS_TABLE).filter(lambda doc:
doc['name'].match(reg.decode('utf-8'))
).order_by('price'))
data = [c for c in cursor]
d = {'shops': shops,'data': data}
return flask.jsonify({'status': 'ok', 'data': d}), 200
| 5,347,247 |
def assert_dir_structure(data_dir, out_dir):
""" Asserts that the data_dir exists and the out_dir does not """
if not os.path.exists(data_dir):
raise OSError("Invalid data directory '%s'. Does not exist." % data_dir)
if os.path.exists(out_dir):
raise OSError("Output directory at '%s' already exists." % out_dir)
| 5,347,248 |
def insert_rare_words(sentence: str) -> str:
"""
attack sentence by inserting a trigger token in the source sentence.
"""
words = sentence.split()
insert_pos = randint(0, len(words))
insert_token_idx = randint(0, len(WORDS)-1)
words.insert(insert_pos, WORDS[insert_token_idx])
return " ".join(words)
| 5,347,249 |
def _can_be_quoted(loan_amount, lent_amounts):
"""
Checks if the borrower can obtain a quote. To this aim, the loan amount should be less than or
equal to the total amounts given by lenders.
:param loan_amount: the requested loan amount
:param lent_amounts: the sum of the amounts given by lenders
:return: True if the borrower can get a quote, False otherwise
"""
return sum(lent_amounts) - loan_amount >= 0;
| 5,347,250 |
def load_specs_from_docstring(docstring):
"""Get dict APISpec from any given docstring."""
# character sequence used by APISpec to separate
# yaml specs from the rest of the method docstring
yaml_sep = "---"
if not docstring:
return {}
specs = yaml_utils.load_yaml_from_docstring(docstring)
# extract summary out of docstring and make it part of specs
summary = docstring.split(yaml_sep)[0] if yaml_sep in docstring else docstring
if (
summary
and not any(key in yaml_utils.PATH_KEYS for key in specs.keys())
and "summary" not in specs
):
specs["summary"] = summary.strip() # sanitize
return specs
| 5,347,251 |
def get_bt_mac_lsb_offset(any_path,config_file):
"""
Obains the offset of the BT_MAC LSB from the BASE_MAC LSB by sdkconfig inspection.
"""
mac_sdkconfig_string='CONFIG_NUMBER_OF_UNIVERSAL_MAC_ADDRESS'
sdkconfig=os.path.join(any_path,config_file)
config_lines=open(sdkconfig).readlines()
for line in config_lines:
if mac_sdkconfig_string in line:
split_line=line.split('=')
if '4' in split_line[1]:
return 2
elif '2' in split_line[1]:
return 1
else:
print("Unable to find valid value of sdkconfig variable {mac_var}"
.format(mac_var=mac_sdkconfig_string))
sys.exit(1)
| 5,347,252 |
def get_transform(V1, V2, pair_ix, transform=None, use_ransac=True):
"""
Estimate parameters of an `~skimage.transform` tranformation given
a list of coordinate matches.
Parameters
----------
V1, V2 : [N,2] arrays
Coordinate lists. The transform is applied to V1 to match V2.
pair_ix : [M,2] array
Indices of matched pairs.
transform : `~skimage.transform` transformation.
Transformation to fit to the matched pairs. If `None`, defaults to
`~skimage.transform.SimilarityTransform`.
Returns
-------
tf : `transform`
Fitted transformation.
dx : [M,2] array
X & Y differences between the transformed V1 list and V2.
rms : (float, float)
Standard deviation of the residuals in X & Y.
"""
import skimage.transform
from skimage.measure import ransac
if transform is None:
transform = skimage.transform.SimilarityTransform
if use_ransac:
tf, inliers = ransac((V1[pair_ix[:,0],:], V2[pair_ix[:,1],:]),
transform, min_samples=3,
residual_threshold=3, max_trials=100)
dx = tf(V1[pair_ix[:,0],:]) - V2[pair_ix[:,1],:]
rms = np.std(dx[inliers,:], axis=0)
else:
tf = transform()
tf.estimate(V1[pair_ix[:,0],:], V2[pair_ix[:,1],:])
dx = tf(V1[pair_ix[:,0],:]) - V2[pair_ix[:,1],:]
rms = np.std(dx, axis=0)
return tf, dx, rms
| 5,347,253 |
def no_trajectory_dct():
""" Dictionary expected answer """
return ()
| 5,347,254 |
def transform_fn(net, data, input_content_type, output_content_type):
"""
Transform a request using the Gluon model. Called once per request.
:param net: The Gluon model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
"""
ctx = mx.cpu()
parsed = json.loads(data)
trained_net, customer_index, product_index = net
users = pd.DataFrame({'customer_id': parsed['customer_id']}).merge(customer_index, how='left')['user'].values
items = pd.DataFrame({'product_id': parsed['product_id']}).merge(product_index, how='left')['item'].values
predictions = trained_net(nd.array(users).as_in_context(ctx), nd.array(items).as_in_context(ctx))
response_body = json.dumps(predictions.asnumpy().tolist())
return response_body, output_content_type
| 5,347,255 |
def catergorizeItems(
actions: list[argparse.Action],
) -> Generator[c2gtypes.Item, None, None]:
"""Catergorise each action and generate json."""
for action in actions:
if isinstance(action, _MutuallyExclusiveGroup):
yield buildRadioGroup(action)
elif isinstance(action, (_StoreTrueAction, _StoreFalseAction)):
yield actionToJson(action, "Bool")
elif isinstance(action, _CountAction):
yield actionToJson(action, "Counter")
elif action.choices:
yield actionToJson(action, "Dropdown")
elif isinstance(action.type, argparse.FileType):
yield actionToJson(action, "File")
else:
yield actionToJson(action, "TextBox")
| 5,347,256 |
def print_pandas_dataset(d):
"""
Given a Pandas dataFrame show the dimensions sizes
:param d: Pandas dataFrame
:return: None
"""
print("rows = %d; columns=%d" % (d.shape[0], d.shape[1]))
print(d.head())
| 5,347,257 |
def superuser_required(method):
"""
Decorator to check whether user is super user or not
If user is not a super-user, it will raise PermissionDenied or
403 Forbidden.
"""
@wraps(method)
def _wrapped_view(request, *args, **kwargs):
if request.user.is_superuser is False:
raise PermissionDenied
return method(request, *args, **kwargs)
return _wrapped_view
| 5,347,258 |
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_key = 'data_download_2' if data_download_v2_is_enabled() else 'data_download'
section_data = {
'section_key': section_key,
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': str(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': str(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': str(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': str(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': str(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': str(course_key)}),
'list_proctored_results_url': reverse(
'get_proctored_exam_results', kwargs={'course_id': str(course_key)}
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': str(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': str(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': str(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': str(course_key)}),
'course_has_survey': True if course.course_survey_name else False, # lint-amnesty, pylint: disable=simplifiable-if-expression
'course_survey_results_url': reverse(
'get_course_survey_results', kwargs={'course_id': str(course_key)}
),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': str(course_key)}),
'export_ora2_submission_files_url': reverse(
'export_ora2_submission_files', kwargs={'course_id': str(course_key)}
),
'export_ora2_summary_url': reverse('export_ora2_summary', kwargs={'course_id': str(course_key)}),
}
if not access.get('data_researcher'):
section_data['is_hidden'] = True
return section_data
| 5,347,259 |
def reshape_kb_mask_to_keys_size(kb_mask, kb_keys, kb_total):
"""
TODO document TODO move to helpers
"""
if not isinstance(kb_keys, tuple):
kb_keys = (kb_keys,)
keys_dim = product([keys.shape[1] for keys in kb_keys])
kb_pad_len = kb_total - keys_dim
assert kb_pad_len >= 0, f"kb dim of mask {kb_mask.shape}, with product of keys ={keys_dim} appears to be larger than self.kb_total={kb_total} => increase self.kb_total[-1] = {self.kb_total}"
# FIXME why is this sometimes a tuple of filled pad tensors instead of one? TODO
kb_mask_padding = torch.full((kb_mask.shape[0], kb_pad_len), fill_value=False)
if type(kb_mask_padding) == tuple:
assert False, kb_mask_padding
if len(kb_mask_padding.shape) < 2:
assert False, ((kb_mask.shape[0], kb_pad_len), kb_mask_padding.shape)
assert len(kb_mask.shape) == 2, kb_mask.shape
kb_mask_padded = torch.cat([ kb_mask, kb_mask_padding.to( dtype = kb_mask.dtype, device = kb_mask.device )], dim=1)
kb_mask = kb_mask_padded.unsqueeze(1)
### end setup proj keys and mask dimensions
return kb_mask
| 5,347,260 |
def tcache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time with support tags.
Usage::
{% tcache [expire_time] [fragment_name] [tags='tag1,tag2'] %}
.. some expensive processing ..
{% endtcache %}
This tag also supports varying by a list of arguments:
{% tcache [expire_time] [fragment_name] [var1] [var2] .. [tags=tags] %}
.. some expensive processing ..
{% endtcache %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endtcache',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise template.TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
tags = None
if len(tokens) > 3 and 'tags=' in tokens[-1]:
tags = parser.compile_filter(tokens[-1][5:])
del tokens[-1]
return CacheNode(nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(token) for token in tokens[3:]],
tags
)
| 5,347,261 |
def bpg_compress(input_image_p, q, tmp_dir=None, chroma_fmt='444'):
""" Int -> image_out_path :: str """
assert 'png' in input_image_p
if tmp_dir:
input_image_name = os.path.basename(input_image_p)
output_image_bpg_p = os.path.join(tmp_dir, input_image_name).replace('.png', '_tmp_bpg.bpg')
else:
output_image_bpg_p = input_image_p.replace('.png', '_tmp_bpg.bpg')
subprocess.call([BPGENC, '-q', str(q), input_image_p, '-o', output_image_bpg_p, '-f', chroma_fmt])
return output_image_bpg_p
| 5,347,262 |
def location_edit(type_, id_, location_name, location_type, date, user,
description=None, latitude=None, longitude=None):
"""
Update a location.
:param type_: Type of TLO.
:type type_: str
:param id_: The ObjectId of the TLO.
:type id_: str
:param location_name: The name of the location to change.
:type location_name: str
:param location_type: The type of the location to change.
:type location_type: str
:param date: The location date to edit.
:type date: str
:param user: The user setting the new description.
:type user: str
:param description: The new description.
:type description: str
:param latitude: The new latitude.
:type latitude: str
:param longitude: The new longitude.
:type longitude: str
:returns: dict with key 'success' (boolean) and 'message' (str) if failed.
"""
crits_object = class_from_id(type_, id_)
if not crits_object:
return {'success': False, 'message': 'Cannot find %s.' % type_}
crits_object.edit_location(location_name,
location_type,
date,
description=description,
latitude=latitude,
longitude=longitude)
try:
crits_object.save(username=user)
return {'success': True}
except ValidationError, e:
return {'success': False, 'message': "Invalid value: %s" % e}
| 5,347,263 |
def G2(species_index, eta, Rs):
"""G2 function generator.
This is a radial function between an atom and atoms with some chemical
symbol. It is defined in cite:khorshidi-2016-amp, eq. 6. This version is
scaled a little differently than the one Behler uses.
Parameters
----------
species_index : integer
species index for this function. Elements that do not have this index will
be masked out
eta : float
The gaussian width
Rs : float
The gaussian center or shift
Returns
-------
The g2 function with the cosine_cutoff function integrated into it.
"""
def g2(config, distances, atom_mask, species_masks):
distances = np.array(distances)
atom_mask = np.array(atom_mask)
species_masks = np.array(species_masks)
# Mask out non-species contributions
smask = species_masks[:, species_index][:, None]
distances *= smask
distances *= atom_mask
distances *= atom_mask[:, None]
Rc = config.get('cutoff_radius', 6.5)
result = np.where(distances > 0,
np.exp(-eta * ((distances - Rs)**2 / Rc**2)), 0.0)
result *= cosine_cutoff(config, distances, atom_mask)
gsum = np.sum(result, (1, 2))
return gsum[:, None]
g2.__desc__ = 'g2({species_index}, eta={eta}, Rs={Rs})'.format(**locals())
return g2
| 5,347,264 |
def get_portfolio_output(id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPortfolioResult]:
"""
Resource Type definition for AWS::ServiceCatalog::Portfolio
"""
...
| 5,347,265 |
async def async_setup_platform(hass, config, async_add_entities, _discovery_info=None):
"""Set up an Unifi Protect Switch."""
data = hass.data[UPV_DATA]
if not data:
return
ir_on = config.get(CONF_IR_ON)
if ir_on == "always_on":
ir_on = "on"
ir_off = config.get(CONF_IR_OFF)
if ir_off == "led_off":
ir_off = "autoFilterOnly"
elif ir_off == "always_off":
ir_off = "off"
switches = []
for switch_type in config.get(CONF_MONITORED_CONDITIONS):
for camera in data.devices:
switches.append(UnifiProtectSwitch(data, camera, switch_type, ir_on, ir_off))
async_add_entities(switches, True)
| 5,347,266 |
def convert_leg_pose_to_motor_angles(robot_class, leg_poses):
"""Convert swing-extend coordinate space to motor angles for a robot type.
Args:
robot_class: This returns the class (not the instance) for the robot.
Currently it supports minitaur, laikago and mini-cheetah.
leg_poses: A list of leg poses in [swing,extend] or [abduction, swing,
extend] space for all 4 legs. The order is [abd_0, swing_0, extend_0,
abd_1, swing_1, extend_1, ...] or [swing_0, extend_0, swing_1, extend_1,
...]. Zero swing and zero extend gives a neutral standing pose for all the
robots. For minitaur, the conversion is fully accurate, for laikago and
mini-cheetah the conversion is approximate where swing is reflected to hip
and extend is reflected to both knee and the hip.
Returns:
List of motor positions for the selected robot. The list include 8 or 12
motor angles depending on the given robot type as an argument. Currently
laikago and mini-cheetah has motors for abduction which does not exist for
minitaur robot.
Raises:
ValueError: Conversion fails due to wrong inputs.
"""
if len(leg_poses) not in [8, 12]:
raise ValueError("Dimension of the leg pose provided is not 8 or 12.")
neutral_motor_angles = get_neutral_motor_angles(robot_class)
motor_angles = leg_poses
# If it is a robot with 12 motors but the provided leg pose does not contain
# abduction, extend the pose to include abduction.
if len(neutral_motor_angles) == 12 and len(leg_poses) == 8:
for i in _ABDUCTION_ACTION_INDEXES:
motor_angles.insert(i, 0)
# If the robot does not have abduction (minitaur) but the input contains them,
# ignore the abduction angles for the conversion.
elif len(neutral_motor_angles) == 8 and len(leg_poses) == 12:
del leg_poses[::3]
# Minitaur specific conversion calculations using minitaur-specific safety
# limits.
if str(robot_class) == str(laikago.Laikago):
swing_scale = 1.0
extension_scale = 1.0
# Laikago specific conversion multipliers.
swing_scale = _LAIKAGO_SWING_CONVERSION_MULTIPLIER
extension_scale = _LAIKAGO_EXTENSION_CONVERSION_MULTIPLIER
else:
motor_angles = robot_class.convert_leg_pose_to_motor_angles(leg_poses)
return motor_angles
| 5,347,267 |
def download_from_s3(s3_url: str, cache_dir: str = None, access_key: str = None,
secret_access_key: str = None, region_name: str = None):
"""
Download a "folder" from s3 to local. Skip already existing files. Useful for downloading all files of one model
The default and recommended authentication follows boto3's trajectory of checking for ENV variables,
.aws/credentials etc. (see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html).
However, there's also the option to pass `access_key`, `secret_access_key` and `region_name` directly
as this is needed in some enterprise enviroments with local s3 deployments.
:param s3_url: Url of the "folder" in s3 (e.g. s3://mybucket/my_modelname)
:param cache_dir: Optional local directory where the files shall be stored.
If not supplied, we'll use a subfolder in torch's cache dir (~/.cache/torch/farm)
:param access_key: Optional S3 Access Key
:param secret_access_key: Optional S3 Secret Access Key
:param region_name: Optional Region Name
:return: local path of the folder
"""
if cache_dir is None:
cache_dir = FARM_CACHE
logger.info(f"Downloading from {s3_url} to {cache_dir}")
if access_key or secret_access_key:
assert secret_access_key and access_key, "You only supplied one of secret_access_key and access_key. We need both."
session = boto3.Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_access_key,
region_name=region_name
)
s3_resource = session.resource('s3')
else:
s3_resource = boto3.resource('s3')
bucket_name, s3_path = split_s3_path(s3_url)
bucket = s3_resource.Bucket(bucket_name)
objects = bucket.objects.filter(Prefix=s3_path)
if not objects:
raise ValueError("Could not find s3_url: {s3_url}")
for obj in objects:
path, filename = os.path.split(obj.key)
path = os.path.join(cache_dir, path)
# Create local folder
if not os.path.exists(path):
os.makedirs(path)
# Download file if not present locally
if filename:
filepath = os.path.join(path, filename)
if os.path.exists(filepath):
logger.info(f"Skipping {obj.key} (exists locally)")
else:
logger.info(f"Downloading {obj.key} to {filepath} (size: {obj.size/1000000} MB)")
bucket.download_file(obj.key, filepath)
return path
| 5,347,268 |
def get_all_records(session):
"""
return all records
"""
result = session.query(Skeleton).all()
skeletons = convert_results(result)
return skeletons
| 5,347,269 |
def pwm_to_boltzmann_weights(prob_weight_matrix, temp):
"""Convert pwm to boltzmann weights for categorical distribution sampling."""
weights = np.array(prob_weight_matrix)
cols_logsumexp = []
for i in range(weights.shape[1]):
cols_logsumexp.append(scipy.special.logsumexp(weights.T[i] / temp))
for i in range(weights.shape[0]):
for j in range(weights.shape[1]):
weights[i, j] = np.exp(weights[i, j] / temp - cols_logsumexp[j])
return weights
| 5,347,270 |
def mutation_delete_music_composition(identifier: str):
"""Returns a mutation for deleting a MusicComposition.
Args:
identifier: The identifier of the MusicComposition.
Returns:
The string for the mutation for deleting the music composition object based on the identifier.
"""
return format_mutation("DeleteMusicComposition", {"identifier": identifier})
| 5,347,271 |
def coords_from_gaia(gaia_id):
"""Returns table of Gaia DR2 data given a source_id."""
from astroquery.gaia import Gaia
import warnings
warnings.filterwarnings('ignore', module='astropy.io.votable.tree')
adql = 'SELECT gaia.source_id, ra, dec FROM gaiadr2.gaia_source AS gaia WHERE gaia.source_id={0}'.format(gaia_id)
job = Gaia.launch_job(adql)
table = job.get_results()
coords = (table['ra'].data[0], table['dec'].data[0])
return coords
| 5,347,272 |
def edgeplot(P,T,E,sz = 1):
""" Plots mesh with edges outlined.
Parameters
----------
P : (n,3) float array
A point cloud.
T : (m,3) int array
List of vertex indices for each triangle in the mesh.
E : (k,1) int array
List of edge point indices.
sz : float, default is 1.0
Scaling factor for final plot.
Returns
-------
None
"""
#seeking alternative to points3d.
mlab.triangular_mesh(P[:,0],P[:,1],P[:,2],T,color =(1,0,0))
mlab.points3d(P[E,0],P[E,1],P[E,2],color = (0,0,1), scale_mode = 'none',scale_factor = sz)
return
| 5,347,273 |
def partition(smilist,ratio=0.7):
"""
A function to create test/ train split list
:param smilist: smiles (list)
:param ratio: test set split fraction (float)
Return type: traininglist, testlist (list)
"""
from random import shuffle, random
import numpy as np
shuffle(smilist, random)
trainlen = int(np.floor( len(smilist)*ratio ) )
return smilist[0:trainlen],smilist[trainlen:]
| 5,347,274 |
def align(reference, query):
"""
do a pairwise alignment of the query to the reference, outputting up to 10000 of the highest-scoring alignments.
:param reference: a STRING of the reference sequence
:param query: a STRING of the query sequence
:return: a list of up to 10000 Alignment objects
"""
alns = pairwise2.align.localms(reference, query, 1, -1, -2, -1) # match, mismatch, gap-open, gap-extension
alignments = []
for aln in alns:
al1, al2, score, begin, end = aln
alignments.append(Alignment(gappy_r=al1, gappy_q=al2))
return alignments
| 5,347,275 |
def cleanup_drained_instance(key):
"""Deletes the given drained Instance.
Args:
key: ndb.Key for a models.Instance entity.
"""
instance = key.get()
if not instance:
return
if instance.deleted:
return
if not instance.url:
logging.warning('Instance URL unspecified: %s', key)
return
instance_group_manager = instance.instance_group_manager.get()
if not instance_group_manager:
logging.warning(
'InstanceGroupManager does not exist: %s',
instance.instance_group_manager,
)
return
instance_template_revision = instance_group_manager.key.parent().get()
if not instance_template_revision:
logging.warning(
'InstanceTemplateRevision does not exist: %s',
instance_group_manager.key.parent(),
)
return
instance_template = instance_template_revision.key.parent().get()
if not instance_template:
logging.warning(
'InstanceTemplate does not exist: %s',
instance_template_revision.key.parent(),
)
return
if instance_group_manager.key not in instance_template_revision.drained:
if instance_template_revision.key not in instance_template.drained:
logging.warning('Instance is not drained: %s', key)
return
now = utils.utcnow()
if not exists(instance.url):
# When the instance isn't found, assume it's deleted.
if instance.deletion_ts:
metrics.instance_deletion_time.add(
(now - instance.deletion_ts).total_seconds(),
fields={
'zone': instance.instance_group_manager.id(),
},
)
set_instance_deleted(key, True)
metrics.send_machine_event('DELETION_SUCCEEDED', instance.hostname)
| 5,347,276 |
def output_name(ncfile):
"""output_name.
Args:
ncfile:
"""
ncfile_has_datetime = re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}', ncfile)
if ncfile_has_datetime:
forecast_time = ncfile_has_datetime.group()
else:
raise Exception("ncfile doesn't have datetime data.")
outname = (forecast_time + "apcp")
return outname
| 5,347,277 |
def _replacement_func_decorator(
fn=None,
name=None,
help="",
args=None):
"""
Replaces xlo.func in jupyter but removes arguments which do not make sense
when called from jupyter
"""
def decorate(fn):
spec = _FuncDescription(fn, name or fn.__name__, help, args)
publish_display_data(
{ "xloil/data": _serialise(spec) },
{ 'type': "FuncRegister" }
)
return fn
return decorate if fn is None else decorate(fn)
| 5,347,278 |
def stress_x_component(coordinates, prisms, pressure, poisson, young):
"""
x-component of the stress field.
Parameters
----------
coordinates : 2d-array
2d numpy array containing ``y``, ``x`` and ``z`` Cartesian cordinates
of the computation points. All coordinates should be in meters.
prisms : 2d-array
2d array containing the Cartesian coordinates of the prism(s). Each
line contains the coordinates of a prism in following order: y1, y2,
x1, x2, z2 and z1. All coordinates should be in meters.
pressure : 1d array
1d array containing the pressure of each prism in MPa.
poisson : float
Poisson’s ratio.
young : float
Young’s modulus in MPa.
Returns
-------
result : array
x-component of the stress field generated by the prisms at the
computation points.
"""
s_xz1 = field_component(
coordinates, prisms, pressure, poisson, young, kernel='s_xz1'
)
s_xz2 = field_component(
coordinates, prisms, pressure, poisson, young, kernel='s_xz2'
)
s_xzz2 = field_component(
coordinates, prisms, pressure, poisson, young, kernel='s_xzz2'
)
result = s_xz1 + s_xzz2 + s_xz2
result *= young/(1 + poisson)
return result
| 5,347,279 |
def process_time_data(flag, last_time, model_params_dict_raw, time_data_raw):
"""
This is a helper function that takes the raw time data from the model
file and replaces it with the correct value in the params file.
:param flag:
:param last_time:
:param model_params_dict_raw:
:param time_data_raw:
:return:
"""
low_time_used = False
if "_" in flag and int(flag.split("_")[1]) == 1:
# There is no time constraint
low_time_used = True
if "inst" in time_data_raw:
temp_time = str(float(last_time) + 1)
while temp_time in times:
temp_time += 10
time_data = temp_time
else:
if low_time_used:
time_data = get_param_value_bounded(time_data_raw, last_time)
else:
if time_data_raw in model_params_dict_raw.keys():
time_data = get_param_value_un_bounded(model_params_dict_raw, time_data_raw)
else:
time_data = time_data_raw
return time_data
| 5,347,280 |
def i_obtain_a_group1_http_error(step, error_code):
""" Assertions to check if HTTP response status has got the expected error code """
assert_equals(str(world.response.status_code), error_code, 'RESPONSE BODY: {}'.format(world.response.content))
| 5,347,281 |
def test_profile_mixed_error(recwarn):
"""Warn if both affine and transform are passed"""
warnings.simplefilter('always')
profile = Profile(affine='foo', transform='bar')
assert len(recwarn) == 1
assert recwarn.pop(DeprecationWarning)
assert 'affine' not in profile
assert profile['transform'] == 'bar'
| 5,347,282 |
def create_pos_data(data, parser):
"""
creating the positive fh numeric dataset. performing another cleaning.
:param data: suspected fh examples
:param parser: parser used for the word tokenization
:return: all positive examples (after the cleaning), will be used
for creating the negative dataset
"""
pos_data = []
pos_examples = []
for entry in tqdm(data):
try:
a = map(unicode, parser.word_tokenize(entry[4].encode('utf-8')))
s, e = num_clean(a, entry[-1][1])
if s is not None and (s != entry[-1][1][0] or e != entry[-1][1][1]):
s, e = num_clean(a, [s, e])
if s is not None:
s_nlp = nlp_split(unicode(SEP.join(a)))
s, e = find_boundaries(s_nlp, s_nlp[s])
if s >= e:
continue
if s > 0 and (e - s) == 1 and s_nlp[s - 1].pos_ in ['NOUN', 'PROPN'] and s_nlp[s].head == s_nlp[s - 1]:
continue
# time like examples - removing
if ':' in s_nlp[s:e].text:
continue
# the one token in uppercase is often classified as NOUN
if s_nlp[s].text.lower() != 'one' and s_nlp[s].pos_ != 'NUM':
continue
pos_data.append((a, (s, e)))
new_entry = entry[:-1]
target = (' '.join(a[s:e]), (s, e))
new_entry = new_entry + (target,)
pos_examples.append(new_entry)
except:
print entry[4]
pos_data, pos_examples = remove_dups(pos_data, pos_examples)
return pos_examples, pos_data
| 5,347,283 |
def call(subcommand, args): # pylint: disable=unused-argument
"""Call a subcommand passing the args."""
KytosConfig.check_versions()
func = getattr(WebAPI, subcommand)
func(args)
| 5,347,284 |
async def absent(hub, ctx, name, resource_uri, connection_auth=None, **kwargs):
"""
.. versionadded:: 2.0.0
Ensure a diagnostic setting does not exist for the specified resource uri.
:param name: The name of the diagnostic setting.
:param resource_uri: The identifier of the resource.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure diagnostic setting is absent:
azurerm.monitor.diagnostic_setting.absent:
- name: my_setting
- resource_uri: my_resource
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
setting = await hub.exec.azurerm.monitor.diagnostic_setting.get(
ctx, name, resource_uri, azurerm_log_level="info", **connection_auth
)
if "error" in setting:
ret["result"] = True
ret["comment"] = "Diagnostic setting {0} was not found.".format(name)
return ret
if ctx["test"]:
ret["comment"] = "Diagnostic setting {0} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": setting,
"new": {},
}
return ret
deleted = await hub.exec.azurerm.monitor.diagnostic_setting.delete(
ctx, name, resource_uri, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Diagnostic setting {0} has been deleted.".format(name)
ret["changes"] = {"old": setting, "new": {}}
return ret
ret["comment"] = "Failed to delete diagnostic setting {0}!".format(name)
return ret
| 5,347,285 |
def test_synonym_mapping(model, threshold):
"""Test that synonym mapping is working. Could use some more tests for this function."""
set1 = set(['A', 'B', 'C'])
set2 = set(['A', 'D', 'E'])
mapping = ehnfer.commands.synonym_mapping(set1, set2, model, threshold)
# See function doc for why this is suboptimal
# Same behavior as before
assert frozenset(['A', 'B']) in mapping
assert frozenset(['A', 'D']) in mapping
assert frozenset(['A', 'C']) in mapping
assert frozenset(['A', 'B']) in mapping
| 5,347,286 |
def print_mission_breakdown(results,filename='mission_breakdown.dat', units="imperial"):
"""This creates a file showing mission information.
Assumptions:
None
Source:
N/A
Inputs:
results.segments.*.conditions.
frames.
inertial.position_vector [m]
inertial.time [s]
aerodynamics.lift_coefficient [-]
weights.total [kg]
freestream.
mach_number [-]
pressure [Pa]
filename (optional) <string> Determines the name of the saved file
units (option) <string> Determines the type of units used in the output, options are imperial and si
Outputs:
filename Saved file with name as above
Properties Used:
N/A
"""
imperial = False
SI = False
if units.lower()=="imperial":
imperial = True
elif units.lower()=="si":
SI = True
else:
print("Incorrect system of units selected - choose 'imperial' or 'SI'")
return
fid = open(filename,'w') # Open output file
fid.write('Output file with mission profile breakdown\n\n') #Start output printing
k1 = 1.727133242E-06 # constant to airspeed conversion
k2 = 0.2857142857 # constant to airspeed conversion
TotalRange = 0
i = 0
for key in results.segments.keys(): #loop for all segments
segment = results.segments[key]
if imperial:
HPf = -segment.conditions.frames.inertial.position_vector[-1,2] / Units.ft #Final segment Altitude [ft]
HPi = -segment.conditions.frames.inertial.position_vector[0,2] / Units.ft #Initial segment Altitude [ft]
elif SI:
HPf = -segment.conditions.frames.inertial.position_vector[-1, 2] / Units.m # Final segment Altitude [m]
HPi = -segment.conditions.frames.inertial.position_vector[0, 2] / Units.m # Initial segment Altitude [m]
CLf = segment.conditions.aerodynamics.lift_coefficient[-1] #Final Segment CL [-]
CLi = segment.conditions.aerodynamics.lift_coefficient[0] #Initial Segment CL [-]
Tf = segment.conditions.frames.inertial.time[-1]/ Units.min #Final Segment Time [min]
Ti = segment.conditions.frames.inertial.time[0] / Units.min #Initial Segment Time [min]
Wf = segment.conditions.weights.total_mass[-1] #Final Segment weight [kg]
Wi = segment.conditions.weights.total_mass[0] #Initial Segment weight [kg]
if imperial:
Dist = (segment.conditions.frames.inertial.position_vector[-1,0] - segment.conditions.frames.inertial.position_vector[0,0] ) / Units.nautical_miles #Distance [nm]
elif SI:
Dist = (segment.conditions.frames.inertial.position_vector[-1, 0] -
segment.conditions.frames.inertial.position_vector[0, 0]) / Units.km # Distance [km]
TotalRange = TotalRange + Dist
Mf = segment.conditions.freestream.mach_number[-1] # Final segment mach number
Mi = segment.conditions.freestream.mach_number[0] # Initial segment mach number
# Aispeed conversion: KTAS to KCAS
atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
atmo_data = atmosphere.compute_values(0)
p0 = atmo_data.pressure
deltai = segment.conditions.freestream.pressure[0] / p0
deltaf = segment.conditions.freestream.pressure[-1]/ p0
VEi = Mi*(340.294*np.sqrt(deltai)) #Equivalent airspeed [m/s]
QCPOi = deltai*((1.+ k1*VEi**2/deltai)**3.5-1.) #
VCi = np.sqrt(((QCPOi+1.)**k2-1.)/k1) #Calibrated airspeed [m/s]
if imperial:
KCASi = VCi / Units.knots #Calibrated airspeed [knots]
elif SI:
KCASi = VCi #Calibrated airspeed [m/s]
VEf = Mf*(340.294*np.sqrt(deltaf)) #Equivalent airspeed [m/s]
QCPOf = deltaf*((1.+ k1*VEf**2/deltaf)**3.5-1.)
VCf = np.sqrt(((QCPOf+1.)**k2-1.)/k1) #m/s #Calibrated airspeed [m/s]
if imperial:
KCASf = VCf / Units.knots #Calibrated airspeed [knots]
elif SI:
KCASf = VCf
# String formatting
CLf_str = str('%15.3f' % CLf) + '|'
CLi_str = str('%15.3f' % CLi) + '|'
HPf_str = str('%7.0f' % HPf) + '|'
HPi_str = str('%7.0f' % HPi) + '|'
Dist_str = str('%9.0f' % Dist) + '|'
Wf_str = str('%8.0f' % Wf) + '|'
Wi_str = str('%8.0f' % Wi) + '|'
T_str = str('%7.1f' % (Tf-Ti)) + '|'
Fuel_str= str('%8.0f' % (Wi-Wf)) + '|'
Mi_str = str('%7.3f' % Mi) + '|'
Mf_str = str('%7.3f' % Mf) + '|'
KCASi_str = str('%7.1f' % KCASi) + '|'
KCASf_str = str('%7.1f' % KCASf) + '|'
Segment_str = '%- 31s |' % key
if i == 0: #Write header
if imperial:
fid.write( ' FLIGHT PHASE | ALTITUDE | WEIGHT | DIST. | TIME | FUEL | SPEED |\n')
fid.write( ' | From | To |Initial | Final | | | |Inicial| Final |Inicial| Final |\n')
fid.write( ' | ft | ft | kg | kg | nm | min | kg | KCAS | KCAS | Mach | Mach |\n')
fid.write( ' | | | | | | | | | | | |\n')
elif SI:
fid.write(' FLIGHT PHASE | ALTITUDE | WEIGHT | DIST. | TIME | FUEL | SPEED |\n')
fid.write(' | From | To |Initial | Final | | | |Initial| Final |Initial| Final |\n')
fid.write(' | m | m | kg | kg | km | min | kg | m/s | m/s | Mach | Mach |\n')
fid.write(' | | | | | | | | | | | |\n')
# Print segment data
fid.write( Segment_str+HPi_str+HPf_str+Wi_str+Wf_str+Dist_str+T_str+Fuel_str+KCASi_str+KCASf_str+Mi_str+Mf_str+'\n')
i = i+1
#Summary of results [nm]
TotalFuel = results.segments[0].conditions.weights.total_mass[0] - results.segments[-1].conditions.weights.total_mass[-1] #[kg]
TotalTime = (results.segments[-1].conditions.frames.inertial.time[-1][0] - results.segments[0].conditions.frames.inertial.time[0][0]) #[min]
fid.write(2*'\n')
if imperial:
fid.write(' Total Range (nm) ........... '+ str('%9.0f' % TotalRange)+'\n')
elif SI:
fid.write(' Total Range (km) ........... ' + str('%9.0f' % TotalRange) + '\n')
fid.write(' Total Fuel (kg) ........... '+ str('%9.0f' % TotalFuel)+'\n')
fid.write(' Total Time (hh:mm) ........ '+ time.strftime(' %H:%M', time.gmtime(TotalTime))+'\n')
# Print timestamp
fid.write(2*'\n'+ 43*'-'+ '\n' + datetime.datetime.now().strftime(" %A, %d. %B %Y %I:%M:%S %p"))
fid.close
#done!
return
| 5,347,287 |
def save_sentence_json(framenet_path, save_root, num_samples=100_000):
"""Save sentence data as individual files as json
Every sample has two documents, a sentence from
Framenet as well as the frame definition. This
can be used for text summarization Seq2Seq models.
Note:
As a proof of concept, this only saves the first
`num_samples`.
Args:
framenet_path: path to the `fndata-1.7` dataset
save_root: root path to save individual sentence csvs
num_samples: number of samples to save
"""
sentences = load_sentences(framenet_path)
root = Path(save_root)
root.mkdir(parents=True, exist_ok=True)
for idx in tqdm(range(num_samples)):
sent = sentences[idx]
# Name the file based on frame type
path = root.joinpath(f"{sent.frame.name}_{idx}.json")
save_json(sent, path)
| 5,347,288 |
def create_account(input_params:dict):
"""
Creates account in ldap db.
"""
ldap_connection = None
try:
ldap_connection = connect_to_ldap_server(input_params['--ldapuser'],
input_params['--ldappasswd'])
add_keys_to_dictionary(input_params)
if not is_account_present(input_params['--account_name'], ldap_connection):
for item in g_create_func_table:
dn, attrs = item['func'](item['key'],input_params)
ldif = modlist.addModlist(attrs)
ldap_connection.add_s(dn,ldif)
disconnect_from_ldap(ldap_connection)
except Exception as e:
if ldap_connection:
disconnect_from_ldap(ldap_connection)
raise e
| 5,347,289 |
def get_ngrok() -> str or None:
"""Sends a `GET` request to api/tunnels to get the `ngrok` public url.
See Also:
Checks for output from get_port function. If nothing, then `ngrok` isn't running.
However as a sanity check, the script uses port number stored in env var to make a `GET` request.
Returns:
str or None:
- On success, returns the `ngrok` public URL.
- On failure, returns None to exit function.
"""
if validate := get_port():
port = validate.split('.')[-1]
else:
if not (port := environ.get('PORT')):
return
try:
response = get(f'http://{ip}:{port}/api/tunnels')
except InvalidURL:
return
except ConnectionError:
return
tunnel = load(response.content.decode(), Loader=FullLoader)['tunnels']
return tunnel[0].get('public_url')
| 5,347,290 |
def contacts_per_person_symptomatic_60x80():
"""
Real Name: b'contacts per person symptomatic 60x80'
Original Eqn: b'contacts per person normal 60x80*(symptomatic contact fraction 80+symptomatic contact fraction 60\\\\ )/2'
Units: b'contact/Day'
Limits: (None, None)
Type: component
b''
"""
return contacts_per_person_normal_60x80() * (symptomatic_contact_fraction_80() +
symptomatic_contact_fraction_60()) / 2
| 5,347,291 |
def get_pull_through_cache_rule_output(ecr_repository_prefix: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPullThroughCacheRuleResult]:
"""
The AWS::ECR::PullThroughCacheRule resource configures the upstream registry configuration details for an Amazon Elastic Container Registry (Amazon Private ECR) pull-through cache.
:param str ecr_repository_prefix: The ECRRepositoryPrefix is a custom alias for upstream registry url.
"""
...
| 5,347,292 |
def get_input_fn_common(pattern, batch_size, mode, hparams: SmartComposeArg):
""" Returns the common input function used in Smart Compose training and evaluation"""
return _get_input_fn_common(pattern, batch_size, mode,
**_get_func_param_from_hparams(_get_input_fn_common, hparams, ('pattern', 'batch_size', 'mode')))
| 5,347,293 |
def text_to_int(sentence, map_dict, max_length=20, is_target=False):
"""
对文本句子进行数字编码
@param sentence: 一个完整的句子,str类型
@param map_dict: 单词到数字的映射,dict
@param max_length: 句子的最大长度
@param is_target: 是否为目标语句。在这里要区分目标句子与源句子,因为对于目标句子(即翻译后的句子)我们需要在句子最后增加<EOS>
"""
# 用<PAD>填充整个序列
text_to_idx = []
# unk index
unk_idx = map_dict.get("<UNK>")
pad_idx = map_dict.get("<PAD>")
eos_idx = map_dict.get("<EOS>")
# 如果是输入源文本
if not is_target:
for word in sentence.lower().split():
text_to_idx.append(map_dict.get(word, unk_idx))
# 否则,对于输出目标文本需要做<EOS>的填充最后
else:
for word in sentence.lower().split():
text_to_idx.append(map_dict.get(word, unk_idx))
text_to_idx.append(eos_idx)
# 如果超长需要截断
if len(text_to_idx) > max_length:
return text_to_idx[:max_length]
# 如果不够则增加<PAD>
else:
text_to_idx = text_to_idx + [pad_idx] * (max_length - len(text_to_idx))
return text_to_idx
| 5,347,294 |
def load_model_data(m, d, data_portal, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:param data_portal:
:param scenario_directory:
:param subproblem:
:param stage:
:return:
"""
generic_load_model_data(
m=m,
d=d,
data_portal=data_portal,
scenario_directory=scenario_directory,
subproblem=subproblem,
stage=stage,
ramp_rate_limit_column_name=RESERVE_PROVISION_RAMP_RATE_LIMIT_COLUMN_NAME_IN_INPUT_FILE,
reserve_provision_ramp_rate_limit_param=RESERVE_PROVISION_RAMP_RATE_LIMIT_PARAM_NAME,
)
| 5,347,295 |
def map_to_udm_users(users_df: DataFrame) -> DataFrame:
"""
Maps a DataFrame containing Canvas users into the Ed-Fi LMS Unified Data
Model (UDM) format.
Parameters
----------
users_df: DataFrame
Pandas DataFrame containing all Canvas users
Returns
-------
DataFrame
A LMSUsers-formatted DataFrame
DataFrame columns are:
EmailAddress: The primary e-mail address for the user
LocalUserIdentifier: The user identifier assigned by a school or district
Name: The full name of the user
SISUserIdentifier: The user identifier defined in the Student Information System (SIS)
SourceSystem: The system code or name providing the user data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to a user by the source system
CreateDate: datetime at which the record was first retrieved
LastModifiedDate: datetime when the record was modified, or when first retrieved
SourceCreateDate: Date this record was created in the LMS
SourceLastModifiedDate: Date this record was last updated in the LMS
"""
if users_df.empty:
return users_df
df: DataFrame = users_df[
[
"id",
"sis_user_id",
"created_at",
"name",
"email",
"login_id",
"CreateDate",
"LastModifiedDate",
]
].copy()
df["SourceSystem"] = constants.SOURCE_SYSTEM
df.rename(
columns={
"id": "SourceSystemIdentifier",
"sis_user_id": "SISUserIdentifier",
"login_id": "LocalUserIdentifier",
"email": "EmailAddress",
"name": "Name",
"created_at": "SourceCreateDate",
},
inplace=True,
)
df["SourceCreateDate"] = df["SourceCreateDate"].apply(
lambda x: datetime.strftime(
datetime.strptime(x, "%Y-%m-%dT%H:%M:%S%z"), "%Y/%m/%d %H:%M:%S"
)
)
df["UserRole"] = constants.ROLES.STUDENT
df["SourceLastModifiedDate"] = ""
return df
| 5,347,296 |
def custom_shibboleth_institution_login(
selenium, config, user_handle, user_pwd, user_name
):
"""Custom Login on Shibboleth institution page."""
wait = WebDriverWait(selenium, config.MAX_WAIT_TIME)
input_user_id = wait.until(
EC.element_to_be_clickable((By.XPATH, "//input[@id='userid']"))
)
input_user_id.send_keys(user_handle)
input_user_pwd = wait.until(
EC.element_to_be_clickable((By.XPATH, "//input[@id='password']"))
)
input_user_pwd.send_keys(user_pwd)
btn_login = wait.until(
EC.element_to_be_clickable((By.XPATH, "//button[@name='_eventId_proceed']"))
)
btn_login.click()
sleep(3)
if selenium.title == config.SHIBBOLETH_LOGIN_PAGE_TITLE:
btn_tou = wait.until(
EC.element_to_be_clickable(
(By.XPATH, "//button[@id='_shib_idp_accept_TOU']")
)
)
btn_tou.click()
btn_next = wait.until(
EC.element_to_be_clickable((By.XPATH, "//button[@id='_eventId_proceed']"))
)
btn_next.click()
navbar_user = wait.until(
EC.element_to_be_clickable((By.XPATH, "//span[@id='userDisplayInfoTitle']"))
)
assert navbar_user.text == user_name
return selenium
| 5,347,297 |
def beamcenter_mask():
"""Returns beamcenter mask as an array. Given the PSF and the dimensions of
the beamstop, the minimum intensity around beamcenter occurs at a radius of
3 pixels, hence a 7x7 mask."""
from numpy import array
return array([[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,0,1,1,1,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0]])
| 5,347,298 |
def physical_conversion_actionAngle(quantity,pop=False):
"""Decorator to convert to physical coordinates for the actionAngle methods:
quantity= call, actionsFreqs, or actionsFreqsAngles (or EccZmaxRperiRap for actionAngleStaeckel)"""
def wrapper(method):
@wraps(method)
def wrapped(*args,**kwargs):
use_physical= kwargs.get('use_physical',True)
ro= kwargs.get('ro',None)
if ro is None and hasattr(args[0],'_roSet') and args[0]._roSet:
ro= args[0]._ro
if _APY_LOADED and isinstance(ro,units.Quantity):
ro= ro.to(units.kpc).value
vo= kwargs.get('vo',None)
if vo is None and hasattr(args[0],'_voSet') and args[0]._voSet:
vo= args[0]._vo
if _APY_LOADED and isinstance(vo,units.Quantity):
vo= vo.to(units.km/units.s).value
#Remove ro and vo kwargs if necessary
if pop and 'use_physical' in kwargs: kwargs.pop('use_physical')
if pop and 'ro' in kwargs: kwargs.pop('ro')
if pop and 'vo' in kwargs: kwargs.pop('vo')
if use_physical and not vo is None and not ro is None:
out= method(*args,**kwargs)
if 'call' in quantity or 'actions' in quantity:
if 'actions' in quantity and len(out) < 4: # 1D system
fac= [ro*vo]
if _APY_UNITS:
u= [units.kpc*units.km/units.s]
else:
fac= [ro*vo,ro*vo,ro*vo]
if _APY_UNITS:
u= [units.kpc*units.km/units.s,
units.kpc*units.km/units.s,
units.kpc*units.km/units.s]
if 'Freqs' in quantity:
FreqsFac= freq_in_Gyr(vo,ro)
if len(out) < 4: # 1D system
fac.append(FreqsFac)
if _APY_UNITS:
Freqsu= units.Gyr**-1.
u.append(Freqsu)
else:
fac.extend([FreqsFac,FreqsFac,FreqsFac])
if _APY_UNITS:
Freqsu= units.Gyr**-1.
u.extend([Freqsu,Freqsu,Freqsu])
if 'Angles' in quantity:
if len(out) < 4: # 1D system
fac.append(1.)
if _APY_UNITS:
Freqsu= units.Gyr**-1.
u.append(units.rad)
else:
fac.extend([1.,1.,1.])
if _APY_UNITS:
Freqsu= units.Gyr**-1.
u.extend([units.rad,units.rad,units.rad])
if 'EccZmaxRperiRap' in quantity:
fac= [1.,ro,ro,ro]
if _APY_UNITS:
u= [1.,
units.kpc,
units.kpc,
units.kpc]
if _APY_UNITS:
newOut= ()
try:
for ii in range(len(out)):
newOut= newOut+(units.Quantity(out[ii]*fac[ii],
unit=u[ii]),)
except TypeError: # happens if out = scalar
newOut= units.Quantity(out*fac[0],unit=u[0])
else:
newOut= ()
try:
for ii in range(len(out)):
newOut= newOut+(out[ii]*fac[ii],)
except TypeError: # happens if out = scalar
newOut= out*fac[0]
return newOut
else:
return method(*args,**kwargs)
return wrapped
return wrapper
| 5,347,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.