content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def gen_certs():
""" Generate self-signed TLS certtificate
"""
os.mkdir("./remote/certs")
cmd = 'cd ./remote/certs && openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=workspace.com" -keyout cert.key -out cert.crt'
subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
return
| 5,344,900 |
def prune_deg_one_nodes(sampled_graph):
""" prune out degree one nodes from graph """
deg_one_nodes = []
for v in sampled_graph.nodes():
if sampled_graph.degree(v) == 1:
deg_one_nodes.append(v)
for v in deg_one_nodes:
sampled_graph.remove_node(v)
return sampled_graph
| 5,344,901 |
def process_all_bpch_files_in_dir(folder=None, ext_str=None):
"""
Process all bpch files in a given directory
(Warpper of process_bpch_files_in_dir2NetCDF for *ts*bpch* and *ctm*bpch*
files)
folder (str): directory address for folder contain files
ext_str (str): extra str to inc. in monthly filenames
Returns
-------
(None)
"""
# - Process *ctm*bpch* files
# Temporary variables
bpch_file_type = '*ctm.bpch.*'
filename = 'ctm.nc'
file_prefix = 'ctm_'
# process *ctm*bpch* to NetCDF
process_bpch_files_in_dir2NetCDF(folder=folder, filename=filename,
ext_str=ext_str, file_prefix=file_prefix,
bpch_file_type=bpch_file_type,
split_by_month=True)
# - Process *ts*bpch* files
# Temporary variables
bpch_file_type = 'ts*bpch*'
filename = 'ts_ctm.nc'
file_prefix = 'ts_ctm_'
# Process *ts*bpch* to netCDF4
process_bpch_files_in_dir2NetCDF(folder=folder, filename=filename,
ext_str=ext_str, file_prefix=file_prefix,
bpch_file_type=bpch_file_type,
split_by_month=True)
| 5,344,902 |
def reduce_clauses(clauses):
"""
Reduce a clause set by eliminating redundant clauses
"""
used = []
unexplored = clauses
while unexplored:
cl, unexplored = unexplored[0], unexplored[1:]
if not subsume(used, cl) and not subsume(unexplored,cl):
used.append(cl)
return used
| 5,344,903 |
def get_pretrained_t2v(name, model_dir=MODEL_DIR):
"""
It is a good idea if you want to switch token list to vector earily.
Parameters
----------
name:str
select the pretrained model
e.g.:
d2v_all_256,
d2v_sci_256,
d2v_eng_256,
d2v_lit_256,
w2v_eng_300,
w2v_lit_300.
model_dir:str
the path of model, default: MODEL_DIR = '~/.EduNLP/model'
Returns
-------
t2v model: T2V
Examples
--------
>>> item = [{'ques_content':'有公式$\\FormFigureID{wrong1?}$和公式$\\FormFigureBase64{wrong2?}$,\
... 如图$\\FigureID{088f15ea-8b7c-11eb-897e-b46bfc50aa29}$,若$x,y$满足约束条件$\\SIFSep$,则$z=x+7 y$的最大值为$\\SIFBlank$'}]
>>> i2v = get_pretrained_t2v("test_d2v", "examples/test_model/data/d2v") # doctest: +ELLIPSIS
>>> print(i2v(item)) # doctest: +ELLIPSIS
[array([...dtype=float32)]
"""
if name not in PRETRAINED_MODELS:
raise KeyError(
"Unknown pretrained model %s, use one of the provided pretrained models: %s" % (
name, ", ".join(PRETRAINED_MODELS.keys()))
)
url, model_name, *args = PRETRAINED_MODELS[name]
model_path = get_data(url, model_dir)
if model_name in ["d2v", "w2v"]:
postfix = ".bin" if model_name == "d2v" else ".kv"
model_path = path_append(model_path, os.path.basename(model_path) + postfix, to_str=True)
return T2V(model_name, model_path, *args)
| 5,344,904 |
def get_np_io(arr, **kwargs) -> BytesIO:
"""Get the numpy object as bytes.
:param arr: Array-like
:param kwargs: Additional kwargs to pass to :func:`numpy.save`.
:return: A bytes object that can be used as a file.
"""
import numpy as np
bio = BytesIO()
np.save(bio, arr, **kwargs)
bio.seek(0)
return bio
| 5,344,905 |
def get_deep_attr(obj, keys):
""" Helper for DeepKey"""
cur = obj
for k in keys:
if isinstance(cur, Mapping) and k in cur:
cur = cur[k]
continue
else:
try:
cur = getattr(cur, k)
continue
except AttributeError:
pass
raise DataError(error='Unexistent key')
return cur
| 5,344,906 |
def fork_node_item_inline_editor(item, view, pos=None) -> bool:
"""Text edit support for Named items."""
@transactional
def update_text(text):
item.subject.joinSpec = text
return True
def escape():
item.subject.joinSpec = join_spec
subject = item.subject
if not subject:
return False
join_spec = subject.joinSpec or ""
box = view.get_item_bounding_box(view.hovered_item)
entry = popup_entry(join_spec, update_text)
show_popover(entry, view, box, escape)
return True
| 5,344,907 |
def get_selfies_alphabet(smiles_list):
"""Returns a sorted list of all SELFIES tokens required to build a
SELFIES string for each molecule."""
selfies_list = list(map(sf.encoder, smiles_list))
all_selfies_symbols = sf.get_alphabet_from_selfies(selfies_list)
all_selfies_symbols.add('[nop]')
selfies_alphabet = list(all_selfies_symbols)
selfies_alphabet.sort()
return selfies_alphabet
| 5,344,908 |
def update_dependencies_simple():
"""
Updates all parameter dependencies
"""
# Number of input neurons
par['n_input'] = par['num_motion_tuned'] + par['num_fix_tuned'] + par['num_rule_tuned']
# General network shape
par['shape'] = (par['n_input'], par['n_hidden'], par['n_output'])
# If num_inh_units is set > 0, then neurons can be either excitatory or
# inihibitory; is num_inh_units = 0, then the weights projecting from
# a single neuron can be a mixture of excitatory or inhibitory
if par['exc_inh_prop'] < 1:
par['EI'] = True
else:
par['EI'] = False
par['num_exc_units'] = int(np.round(par['n_hidden']*par['exc_inh_prop']))
par['num_inh_units'] = par['n_hidden'] - par['num_exc_units']
par['EI_list'] = np.ones(par['n_hidden'], dtype=np.float32)
par['EI_list'][-par['num_inh_units']:] = -1.
par['EI_matrix'] = np.diag(par['EI_list'])
par['ind_inh'] = np.where(par['EI_list'] == -1)[0]
# Membrane time constant of RNN neurons
par['alpha_neuron'] = np.float32(par['dt'])/par['membrane_time_constant']
# The standard deviation of the Gaussian noise added to each RNN neuron
# at each time step
par['noise_rnn'] = np.sqrt(2*par['alpha_neuron'])*par['noise_rnn_sd']
par['noise_in'] = np.sqrt(2/par['alpha_neuron'])*par['noise_in_sd'] # since term will be multiplied by par['alpha_neuron']
par['num_exc'] = int(par['n_hidden']*par['exc_inh_prop'])
par['num_inh'] = int(par['n_hidden'] - par['num_exc'])
# General event profile info
#par['name_of_stimulus'], par['date_stimulus_created'], par['author_of_stimulus_profile'] = get_profile(par['profile_path'])
# List of events that occur for the network
#par['events'] = get_events(par['profile_path'])
# The time step in seconds
par['dt_sec'] = par['dt']/1000
# Length of each trial in ms
if par['trial_type'] == 'dualDMS' and not par['dualDMS_single_test']:
par['trial_length'] = par['dead_time']+par['fix_time']+par['sample_time']+2*par['delay_time']+2*par['test_time']
else:
par['trial_length'] = par['dead_time']+par['fix_time']+par['sample_time']+par['delay_time']+par['test_time']
# Length of each trial in time steps
par['num_time_steps'] = par['trial_length']//par['dt']
####################################################################
### Setting up assorted intial weights, biases, and other values ###
####################################################################
par['h_init'] = 0.1*np.ones((par['n_hidden'], par['batch_train_size']), dtype=np.float32)
par['h_init'][par['ind_inh']] = 0.4
par['input_to_hidden_dims'] = [par['n_hidden'], par['num_motion_tuned']]
par['hidden_to_hidden_dims'] = [par['n_hidden'], par['n_hidden']]
par['w_out_mask'] = np.ones((par['n_output'], par['n_hidden']), dtype=np.float32)
if par['EI']:
par['w_out_mask'][:, par['ind_inh']] = 0
#par['w_out_mask'][:,:25] = 0 # neurons receiving input from input layer cannot project to output layer
par['w_rule_mask'] = np.ones((par['n_hidden'], par['num_rule_tuned']), dtype=np.float32)
#par['w_rule_mask'][:25, :] = 0.
print('Generating random initial weights...')
par['w_in0'] = initialize([par['n_hidden'], par['n_input']], par['connection_prob'])
par['w_out0'] = initialize([par['n_output'], par['n_hidden']], par['connection_prob'])
if par['EI']:
par['w_rnn0'] = initialize(par['hidden_to_hidden_dims'], par['connection_prob'])/4
par['w_rnn0'][:, par['ind_inh']] *= 4
par['w_out0'][:, par['ind_inh']] = 0
for i in range(par['n_hidden']):
par['w_rnn0'][i,i] = 0
par['w_rnn_mask'] = np.ones((par['hidden_to_hidden_dims']), dtype=np.float32) - np.eye(par['n_hidden'])
else:
par['w_rnn0'] = 0.54*np.eye(par['n_hidden'])
par['w_rnn_mask'] = np.ones((par['hidden_to_hidden_dims']), dtype=np.float32)
"""
Setting up synaptic parameters
0 = static
1 = facilitating
2 = depressing
"""
par['synapse_type'] = np.zeros(par['n_hidden'], dtype=np.int8)
# only facilitating synapses
if par['synapse_config'] == 'stf':
par['synapse_type'] = np.ones(par['n_hidden'], dtype=np.int8)
# only depressing synapses
elif par['synapse_config'] == 'std':
par['synapse_type'] = 2*np.ones(par['n_hidden'], dtype=np.int8)
# even numbers facilitating, odd numbers depressing
elif par['synapse_config'] == 'std_stf':
par['synapse_type'] = np.ones(par['n_hidden'], dtype=np.int8)
par['ind'] = range(1,par['n_hidden'],2)
par['synapse_type'][par['ind']] = 2
par['alpha_stf'] = np.ones((par['n_hidden'], 1), dtype=np.float32)
par['alpha_std'] = np.ones((par['n_hidden'], 1), dtype=np.float32)
par['U'] = np.ones((par['n_hidden'], 1), dtype=np.float32)
# initial synaptic values
par['syn_x_init'] = np.zeros((par['n_hidden'], par['batch_train_size']), dtype=np.float32)
par['syn_u_init'] = np.zeros((par['n_hidden'], par['batch_train_size']), dtype=np.float32)
for i in range(par['n_hidden']):
if par['synapse_type'][i] == 1:
par['alpha_stf'][i,0] = par['dt']/par['tau_slow']
par['alpha_std'][i,0] = par['dt']/par['tau_fast']
par['U'][i,0] = 0.15
par['syn_x_init'][i,:] = 1
par['syn_u_init'][i,:] = par['U'][i,0]
elif par['synapse_type'][i] == 2:
par['alpha_stf'][i,0] = par['dt']/par['tau_fast']
par['alpha_std'][i,0] = par['dt']/par['tau_slow']
par['U'][i,0] = 0.45
par['syn_x_init'][i,:] = 1
par['syn_u_init'][i,:] = par['U'][i,0]
| 5,344,909 |
def get_face_angular_dataloader(dataset_path, input_size, batch_size, num_workers, train_portion=1):
""" Prepare dataset for training and evaluating pipeline
Args:
dataset_path (str)
input_size (int)
batch_size (int)
num_workers (int)
train_portion (float)
Return:
train_loader (torch.utils.data.DataLoader)
val_loader (torch.utils.data.DataLoader)
test_loader (torch.utils.data.DataLoader)
"""
train_transform = transforms.Compose([
transforms.Resize(input_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(FACE_MEAN, FACE_STD)
])
test_transform = transforms.Compose([
transforms.ToTensor(),
])
train_dataset = datasets.ImageFolder(root=osp.join(dataset_path, "face", "train", "CASIA-WebFace"),
transform=train_transform)
test_dataset = PairFaceDataset(root=osp.join(dataset_path, "face", "test", "LFW"),
transform=test_transform)
if train_portion != 1:
train_len = len(train_dataset)
indices = list(range(train_len))
random.shuffle(indices)
split = int(np.floor(train_portion * train_len))
train_idx, val_idx = indices[:split], indices[split:]
train_sampler = SubsetRandomSampler(train_idx)
val_sampler = SubsetRandomSampler(val_idx)
train_loader = DataLoader(
train_dataset,
num_workers=num_workers,
batch_size=batch_size,
sampler=train_sampler,
pin_memory=True)
val_loader = DataLoader(
train_dataset,
num_workers=num_workers,
batch_size=batch_size,
sampler=val_sampler,
pin_memory=True)
else:
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=True)
val_loader = None
test_loader = DataLoader(
dataset=test_dataset,
shuffle=False,
batch_size=batch_size,
num_workers=num_workers)
return train_loader, val_loader, test_loader
| 5,344,910 |
def compile_sql_numericize(element, compiler, **kw):
"""
Turn common number formatting into a number. use metric abbreviations, remove stuff like $, etc.
"""
arg, = list(element.clauses)
def sql_only_numeric(text):
# Returns substring of numeric values only (-, ., numbers, scientific notation)
# return func.nullif(func.substring(text, r'([+\-]?(\d\.?\d*[Ee][+\-]?\d+|(\d+\.\d*|\d*\.\d+)|\d+))'), '')
return func.coalesce(
func.substring(text, r'([+\-]?(\d+\.?\d*[Ee][+\-]?\d+))'), # check for valid scientific notation
func.nullif(
func.regexp_replace(text, r'[^0-9\.\+\-]+', '', 'g'), # remove all the non-numeric characters
''
)
)
return compiler.process(sql_only_numeric(arg), **kw)
| 5,344,911 |
def run_sgd(model, epochs):
"""
Runs SGD for a predefined number of epochs and saves the resulting model.
"""
print("Training full network")
weights_rand_init = model.optimize(epochs=epochs)
# weights_rand_init = model.optimize(epochs=epochs, batch_size=55000, learning_rate=0.1)
print("Model optimized!!!")
return [model.get_model_weights(), weights_rand_init]
| 5,344,912 |
def get_all_outcome_links_for_context_courses(request_ctx, course_id, outcome_style=None, outcome_group_style=None, per_page=None, **request_kwargs):
"""
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param outcome_style: (optional) The detail level of the outcomes. Defaults to "abbrev". Specify "full" for more information.
:type outcome_style: string or None
:param outcome_group_style: (optional) The detail level of the outcome groups. Defaults to "abbrev". Specify "full" for more information.
:type outcome_group_style: string or None
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: Get all outcome links for context
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/courses/{course_id}/outcome_group_links'
payload = {
'outcome_style' : outcome_style,
'outcome_group_style' : outcome_group_style,
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(course_id=course_id)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
| 5,344,913 |
def GetContentResourceSpec():
"""Gets Content resource spec."""
return concepts.ResourceSpec(
'dataplex.projects.locations.lakes.content',
resource_name='content',
projectsId=concepts.DEFAULT_PROJECT_ATTRIBUTE_CONFIG,
locationsId=LocationAttributeConfig(),
lakesId=LakeAttributeConfig(),
contentId=ContentAttributeConfig())
| 5,344,914 |
def softplus(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the softplus value of Blob.
The equation is:
.. math::
out = log(e^x+1)
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def softplus_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.softplus(x)
x = np.array([-1, 0, 1]).astype(np.float32)
out = softplus_Job(x)
# out [0.31326166 0.6931472 1.3132616 ]
"""
return build_unary_elemwise_math_op("softplus", x, name)
| 5,344,915 |
def test_basic_knn(alg_list, datasets, textkey="cv-basic", testname="BASIC", seed=28):
"""
Evaluates the algorithms specified in the datasets provided.
Parameters
----------
alg_list : list
The list of algorithms. Each item must be a quadruple (alg, name, key, ks, cons), where 'alg' is the algorithm, 'name'
is the string name, 'key' is a key-name for the alg, 'ks' is the list of neighbors to consider in k-NN, and cons
is the initialization code of the algorithm.
datasets : list
The list of datasets to use. Each item must be a pair (str, frac), where 'str' is the name of the dataset
and 'frac' is the fraction of the dataset to take (for big datasets).
"""
print("* " + testname + " TEST STARTED")
mms = MinMaxScaler()
rownames = ["FOLD " + str(i + 1) for i in range(10)]
results = {}
for dset, f in datasets:
print("** DATASET ", dset)
folds, [n, d, c] = ds.reduced_dobscv10(dset, f)
print("** SIZE ", n, " x ", d, " [", c, " classes]")
results[dset] = {}
norm_folds = []
for i, (xtr, ytr, xtst, ytst) in enumerate(folds):
print("*** NORMALIZING FOLD ", i + 1)
# Normalizing
xtr = mms.fit_transform(xtr)
xtst = mms.transform(xtst)
norm_folds.append((xtr, ytr, xtst, ytst))
for j, (dml, dml_name, dml_key, ks, cons) in enumerate(alg_list):
print("*** EVALUATING DML ", dml_name)
results[dset][dml_key] = defaultdict(lambda: np.zeros([12, 3]))
for i, (xtr, ytr, xtst, ytst) in enumerate(norm_folds):
print("**** FOLD ", i + 1)
np.random.seed(seed)
try:
print("***** TRAINING")
start = time.time() # Start timer
dml.fit(xtr, ytr) # Fitting distance
end = time.time() # Stop timer
elapsed = end - start # Timer measurement
for k in ks:
print("****** TEST K = ", k)
knn = kNN(k, dml)
knn.fit(xtr, ytr)
results[dset][dml_key][k][i, 0] = knn.score() # Train score
results[dset][dml_key][k][i, 1] = knn.score(xtst, ytst) # Test score
results[dset][dml_key][k][i, 2] = elapsed # Time score
except:
print("--- ERROR IN DML ", dml_name)
for k in ks:
results[dset][dml_key][k][i, 0] = np.nan # Train score
results[dset][dml_key][k][i, 1] = np.nan # Test score
results[dset][dml_key][k][i, 2] = np.nan # Time score
traceback.print_exc()
for k in ks:
results[dset][dml_key][k][10, :] = np.mean(results[dset][dml_key][k][:10, :], axis=0)
results[dset][dml_key][k][11, :] = np.std(results[dset][dml_key][k][:10, :], axis=0)
# Saving results
r = pd.DataFrame(results[dset][dml_key][k], columns=['TRAIN', 'TEST', 'TIME'], index=rownames + ["MEAN", "STD"])
r.to_csv("../results/" + textkey + "-" + dml_key + "-" + str(k) + "nn-" + dset + ".csv")
r.to_html("../results/" + textkey + "-" + dml_key + "-" + str(k) + "nn-" + dset + ".html", classes=[table_css(), "kfoldtable meanstd"])
print("RESULTS: ", dset, ", dml = ", dml_name, ", k = ", k)
print(r)
| 5,344,916 |
def diff_gcs_directories(
base_directory_url: str, target_directory_url: str
) -> Tuple[List[str], List[str], List[str]]:
"""
Compare objects under different GCS prefixes.
:param base_directory_url: URL for base directory
:param target_directory_url: URL for target directory
:returns: Tuple with 3 elements:
List of objects in base directory that are not present in target directory
List of objects in target directory that are not present in base directory
List of objects with different content in base and target directory
"""
base = urlparse(base_directory_url)
target = urlparse(target_directory_url)
if base.scheme != "gs":
raise ValueError("base_directory_url must be a gs:// URL")
if target.scheme != "gs":
raise ValueError("target_directory_url must be a gs:// URL")
client = Client(project=None)
base_blobs = client.list_blobs(base.hostname, prefix=base.path.strip("/") + "/")
base_blobs = {
_remove_prefix(blob.name, base.path.strip("/")): blob for blob in base_blobs
}
missing_objects = set(base_blobs.keys())
extra_objects = []
changed_objects = []
target_blobs = client.list_blobs(
target.hostname, prefix=target.path.strip("/") + "/"
)
for blob in target_blobs:
key = _remove_prefix(blob.name, target.path.strip("/"))
missing_objects.discard(key)
try:
if blob.md5_hash != base_blobs[key].md5_hash:
changed_objects.append(key)
except KeyError:
extra_objects.append(key)
return GCSDiffResult(list(missing_objects), extra_objects, changed_objects)
| 5,344,917 |
def main():
"""Run devappserver and the user's application in separate containers.
The application must be started with the proper environment variables,
port bindings, and volume bindings. The devappserver image runs a
standalone api server.
"""
logging.getLogger('appstart').setLevel(logging.INFO)
# args should include all the args to the sandbox, as well as a
# parser_type arg, which indicates which subparser was used.
args = vars(parsing.make_appstart_parser().parse_args())
# Find out what parser was used (and remove the entry from the args).
parser_type = args.pop('parser_type')
# In response to 'appstart init', create a new devappserver base image.
if parser_type == 'init':
utils.build_from_directory(os.path.dirname(devappserver_init.__file__),
constants.DEVAPPSERVER_IMAGE,
**args)
utils.build_from_directory(os.path.dirname(pinger.__file__),
constants.PINGER_IMAGE,
**args)
# In response to 'appstart run', create a container sandbox and run it.
elif parser_type == 'run':
try:
with warnings.catch_warnings():
# Suppress the InsecurePlatformWarning generated by urllib3
# see: http://stackoverflow.com/questions/29134512/
warnings.simplefilter('ignore')
with container_sandbox.ContainerSandbox(**args):
while True:
# Sleeping like this is hacky, but it works. Note
# that signal.pause is not compatible with Windows...
time.sleep(10000)
except KeyboardInterrupt:
utils.get_logger().info('Exiting')
sys.exit(0)
except utils.AppstartAbort as err:
if err.message:
utils.get_logger().warning(str(err.message))
sys.exit(1)
# In response to 'appstart validate', attempt to perform validation.
elif parser_type == 'validate':
logfile = args.pop('log_file')
threshold = args.pop('threshold')
tags = args.pop('tags')
verbose = args.pop('verbose')
list_clauses = args.pop('list_clauses')
success = False
utils.get_logger().setLevel(logging.INFO)
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
validator = contract.ContractValidator(runtime_contract, **args)
if list_clauses:
validator.list_clauses()
sys.exit(0)
success = validator.validate(tags, threshold, logfile, verbose)
except KeyboardInterrupt:
utils.get_logger().info('Exiting')
except utils.AppstartAbort as err:
if err.message:
utils.get_logger().warning(err.message)
if success:
sys.exit(0)
sys.exit('Validation failed')
else:
# This should not be reached
sys.exit(1)
| 5,344,918 |
def load_compatible_apps(file_name: str) -> List[Product]:
"""Loads from file and from github and merges results"""
local_list = load_installable_apps_from_file(file_name)
try:
github_list = load_compatible_apps_from_github()
except (URLError, IOError):
github_list = []
return list(set(local_list) | set(github_list))
| 5,344,919 |
def delete(page_id):
"""Delete a page."""
page = _get_page(page_id)
page_name = page.name
site_id = page.site_id
success, event = page_service.delete_page(page.id, initiator_id=g.user.id)
if not success:
flash_error(
gettext('Page "%(name)s" could not be deleted.', name=page_name)
)
return url_for('.view_current_version', page_id=page.id)
flash_success(gettext('Page "%(name)s" has been deleted.', name=page_name))
page_signals.page_deleted.send(None, event=event)
return url_for('.index_for_site', site_id=site_id)
| 5,344,920 |
async def test_config_schema(hass):
"""Test that config schema is imported properly."""
config = {
konnected.DOMAIN: {
konnected.CONF_API_HOST: "http://1.1.1.1:8888",
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [{konnected.CONF_ID: "aabbccddeeff"}],
}
}
assert konnected.CONFIG_SCHEMA(config) == {
"konnected": {
"access_token": "abcdefgh",
"api_host": "http://1.1.1.1:8888",
"devices": [
{
"default_options": {
"blink": True,
"api_host": "http://1.1.1.1:8888",
"discovery": True,
"io": {
"1": "Disabled",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Disabled",
"3": "Disabled",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
},
"id": "aabbccddeeff",
}
],
}
}
# check with host info
config = {
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [
{konnected.CONF_ID: "aabbccddeeff", "host": "192.168.1.1", "port": 1234}
],
}
}
assert konnected.CONFIG_SCHEMA(config) == {
"konnected": {
"access_token": "abcdefgh",
"devices": [
{
"default_options": {
"blink": True,
"api_host": "",
"discovery": True,
"io": {
"1": "Disabled",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Disabled",
"3": "Disabled",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
},
"id": "aabbccddeeff",
"host": "192.168.1.1",
"port": 1234,
}
],
}
}
# check pin to zone and multiple output
config = {
konnected.DOMAIN: {
konnected.CONF_ACCESS_TOKEN: "abcdefgh",
konnected.CONF_DEVICES: [
{
konnected.CONF_ID: "aabbccddeeff",
"binary_sensors": [
{"pin": 2, "type": "door"},
{"zone": 1, "type": "door"},
],
"switches": [
{
"zone": 3,
"name": "Beep Beep",
"momentary": 65,
"pause": 55,
"repeat": 4,
},
{
"zone": 3,
"name": "Warning",
"momentary": 100,
"pause": 100,
"repeat": -1,
},
],
}
],
}
}
assert konnected.CONFIG_SCHEMA(config) == {
"konnected": {
"access_token": "abcdefgh",
"devices": [
{
"default_options": {
"blink": True,
"api_host": "",
"discovery": True,
"io": {
"1": "Binary Sensor",
"10": "Disabled",
"11": "Disabled",
"12": "Disabled",
"2": "Binary Sensor",
"3": "Switchable Output",
"4": "Disabled",
"5": "Disabled",
"6": "Disabled",
"7": "Disabled",
"8": "Disabled",
"9": "Disabled",
"alarm1": "Disabled",
"alarm2_out2": "Disabled",
"out": "Disabled",
"out1": "Disabled",
},
"binary_sensors": [
{"inverse": False, "type": "door", "zone": "2"},
{"inverse": False, "type": "door", "zone": "1"},
],
"switches": [
{
"zone": "3",
"activation": "high",
"name": "Beep Beep",
"momentary": 65,
"pause": 55,
"repeat": 4,
},
{
"zone": "3",
"activation": "high",
"name": "Warning",
"momentary": 100,
"pause": 100,
"repeat": -1,
},
],
},
"id": "aabbccddeeff",
}
],
}
}
| 5,344,921 |
def save_mission(aFileName):
"""
Save a mission in the Waypoint file format
(http://qgroundcontrol.org/mavlink/waypoint_protocol#waypoint_file_format).
"""
print "\nSave mission from Vehicle to file: %s" % export_mission_filename
#Download mission from vehicle
missionlist = download_mission()
#Add file-format information
output='QGC WPL 110\n'
#Add home location as 0th waypoint
home = vehicle.home_location
output+="%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (0,1,0,16,0,0,0,0,home.lat,home.lon,home.alt,1)
#Add commands
for cmd in missionlist:
commandline="%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (cmd.seq,cmd.current,cmd.frame,cmd.command,cmd.param1,cmd.param2,cmd.param3,cmd.param4,cmd.x,cmd.y,cmd.z,cmd.autocontinue)
output+=commandline
with open(aFileName, 'w') as file_:
print " Write mission to file"
file_.write(output)
| 5,344,922 |
def drift_var():
"""
Concept drift:
1. n_drifts
2. concept_sigmoid_spacing (None for sudden)
3. incremental [True] or gradual [False]
4. recurring [True] or non-recurring [False]
"""
return [(10, None, False, False), (10, 5, False, False), (10, 5, True, False)]
| 5,344,923 |
def get_generators(matrix):
"""
Given a matrix in H-rep, gets the v-rep
Turns out, the code is the same as get_inequalities,
since lrs determines the directions based on the input.
Left like this for readability.
"""
return get_inequalities(matrix)
| 5,344,924 |
def num_instances(diff, flag=False):
"""returns the number of times the mother and daughter have
pallindromic ages in their lives, given the difference in age.
If flag==True, prints the details."""
daughter = 0
count = 0
while True:
mother = daughter + diff
if are_reversed(daughter, mother) or are_reversed(daughter, mother+1):
count = count + 1
if flag:
print daughter, mother
if mother > 120:
break
daughter = daughter + 1
return count
| 5,344,925 |
def bench(args):
"""
Run game of life benchmarks.
"""
raise ConsoleError("not implemented yet")
| 5,344,926 |
def get_dipy_workflows(module):
"""Search for DIPY workflow class.
Parameters
----------
module : object
module object
Returns
-------
l_wkflw : list of tuple
This a list of tuple containing 2 elements:
Worflow name, Workflow class obj
Examples
--------
>>> from dipy.workflows import align # doctest: +SKIP
>>> get_dipy_workflows(align) # doctest: +SKIP
"""
return [
(m, obj)
for m, obj in inspect.getmembers(module)
if inspect.isclass(obj)
and issubclass(obj, module.Workflow)
and m not in SKIP_WORKFLOWS_LIST
]
| 5,344,927 |
def test_service_to_rdf_without_identifier_should_raise_error(
minimal_spec: str,
) -> None:
"""It raises a RequiredFieldMissingError."""
with pytest.raises(RequiredFieldMissingError):
catalog = Catalog()
catalog.identifier = "http://example.com/catalogs/1"
url = "http://example.com/specifications/1"
oas = yaml.safe_load(minimal_spec)
oas_spec = OASDataService(url, oas, "")
for dataservice in oas_spec.dataservices:
catalog.services.append(dataservice)
catalog.to_rdf()
| 5,344,928 |
def azel_fit(coo_ref, coo_meas, nsamp=2000, ntune=2000, target_accept=0.95, random_seed=8675309):
"""
Fit full az/el pointing model using PyMC3. The terms are analogous to those used by TPOINT(tm). This fit includes
the eight normal terms used in `~pytelpoint.transform.azel` with additional terms, az_sigma and el_sigma, that
describes the intrinsic scatter.
Parameters
----------
coo_ref : `~astropy.coordinates.SkyCoord` instance
Reference coordinates
coo_meas : `~astropy.coordinates.SkyCoord` instance
Measured coordinates
nsamp : int (default: 2000)
Number of inference samples
ntune : int (default: 2000)
Number of burn-in samples
target_accept : float (default: 0.95)
Sets acceptance probability target for determining step size
random_seed : int (default: 8675309)
Seed number for random number generator
Returns
-------
idata : `~arviz.InferenceData`
Inference data from the pointing model
"""
pointing_model = pm.Model()
deg2rad = np.pi / 180
with pointing_model:
# az/el are the astrometric reference values. az_raw/el_raw are the observed encoder values.
az = pm.Data('az', coo_ref.az)
el = pm.Data('el', coo_ref.alt)
az_raw = pm.Data('az_raw', coo_meas.az)
el_raw = pm.Data('el_raw', coo_meas.alt)
ia = pm.Normal('ia', 1200., 100)
ie = pm.Normal('ie', 0., 50.)
an = pm.Normal('an', 0., 20.)
aw = pm.Normal('aw', 0., 20.)
ca = pm.Normal('ca', 0., 30.)
npae = pm.Normal('npae', 0., 30.)
tf = pm.Normal('tf', 0., 50.)
tx = pm.Normal('tx', 0., 20.)
az_sigma = pm.HalfNormal('az_sigma', sigma=1.)
el_sigma = pm.HalfNormal('el_sigma', sigma=1.)
daz = -ia
daz -= an * pm.math.sin(deg2rad * az) * pm.math.tan(deg2rad * el)
daz -= aw * pm.math.cos(deg2rad * az) * pm.math.tan(deg2rad * el)
daz -= ca / pm.math.cos(deg2rad * el)
daz -= npae * pm.math.tan(deg2rad * el)
dalt = ie
dalt -= an * pm.math.cos(deg2rad * az)
dalt += aw * pm.math.sin(deg2rad * az)
dalt -= tf * pm.math.cos(deg2rad * el)
dalt -= tx / pm.math.tan(deg2rad * el)
_ = pm.Normal('azerr', mu=0., sigma=az_sigma/3600, observed=pm.math.cos(deg2rad * el) * (az - (az_raw + daz/3600.)))
_ = pm.Normal('elerr', mu=0., sigma=el_sigma/3600, observed=el - (el_raw + dalt/3600.))
idata = pm.sample(
nsamp,
tune=ntune,
target_accept=target_accept,
return_inferencedata=True,
random_seed=random_seed
)
return idata
| 5,344,929 |
def repository_path(relative_path: str) -> Path:
"""
Resolve `relative_path` relative to the root of the repository.
"""
return Path(os.path.join(REPOSITORY_ROOT), relative_path).resolve()
| 5,344,930 |
def geojson_to_labels(geojson_dict, crs_transformer, extent=None):
"""Convert GeoJSON to ObjectDetectionLabels object.
If extent is provided, filter out the boxes that lie "more than a little
bit" outside the extent.
Args:
geojson_dict: dict in GeoJSON format
crs_transformer: used to convert map coords in geojson to pixel coords
in labels object
extent: Box in pixel coords
Returns:
ObjectDetectionLabels
"""
features = geojson_dict['features']
boxes = []
class_ids = []
scores = []
def polygon_to_label(polygon, crs_transformer):
polygon = [crs_transformer.map_to_pixel(p) for p in polygon]
xmin, ymin = np.min(polygon, axis=0)
xmax, ymax = np.max(polygon, axis=0)
boxes.append(Box(ymin, xmin, ymax, xmax))
properties = feature['properties']
class_ids.append(properties['class_id'])
scores.append(properties.get('score', 1.0))
for feature in features:
geom_type = feature['geometry']['type']
coordinates = feature['geometry']['coordinates']
if geom_type == 'MultiPolygon':
for polygon in coordinates:
polygon_to_label(polygon[0], crs_transformer)
elif geom_type == 'Polygon':
polygon_to_label(coordinates[0], crs_transformer)
else:
raise Exception(
"Geometries of type {} are not supported in object detection \
labels.".format(geom_type))
if len(boxes):
boxes = np.array([box.npbox_format() for box in boxes], dtype=float)
class_ids = np.array(class_ids)
scores = np.array(scores)
labels = ObjectDetectionLabels(boxes, class_ids, scores=scores)
else:
labels = ObjectDetectionLabels.make_empty()
if extent is not None:
labels = ObjectDetectionLabels.get_overlapping(
labels, extent, ioa_thresh=0.8, clip=True)
return labels
| 5,344,931 |
def processed_transcript(df):
"""
Cleans the Transcript table by splitting value fileds and replacing nan values, drop extra columns
PARAMETERS:
transcript dataframe
RETURNS:
Cleaned transcript dataframe
"""
#expand the dictionary to coulmns (reward, amount, offre id) from value field
df['offer_id'] = df['value'].apply(lambda x: x.get('offer_id'))
df['offer id'] = df['value'].apply(lambda x: x.get('offer id'))
df['reward'] = df['value'].apply(lambda x: x.get('reward'))
df['amount'] = df['value'].apply(lambda x: x.get('amount'))
#move 'offer id' values into 'offer_id'
df['offer_id'] = df.apply(lambda x : x['offer id'] if x['offer_id'] == None else x['offer_id'], axis=1)
#drop 'offer id' column
df.drop(['offer id' , 'value'] , axis=1, inplace=True)
#replace nan
df.fillna(0 , inplace=True)
return df
| 5,344,932 |
def _TestSuiteName(dash_json_dict):
"""Extracts a test suite name from Dashboard JSON.
The dashboard JSON may contain a field "test_suite_name". If this is not
present or it is None, the dashboard will fall back to using "benchmark_name"
in the "chart_data" dict.
"""
name = None
if dash_json_dict.get('test_suite_name'):
name = dash_json_dict['test_suite_name']
else:
try:
name = dash_json_dict['chart_data']['benchmark_name']
except KeyError as e:
six.raise_from(
BadRequestError('Could not find test suite name. ' + str(e)), e)
_ValidateNameString(name, 'test_suite_name')
return name
| 5,344,933 |
def create_default_prior(name, default_priors_file=None):
"""Make a default prior for a parameter with a known name.
Parameters
----------
name: str
Parameter name
default_priors_file: str, optional
If given, a file containing the default priors.
Return
------
prior: Prior
Default prior distribution for that parameter, if unknown None is
returned.
"""
if default_priors_file is None:
logger.debug(
"No prior file given.")
prior = None
else:
default_priors = PriorDict(filename=default_priors_file)
if name in default_priors.keys():
prior = default_priors[name]
else:
logger.debug(
"No default prior found for variable {}.".format(name))
prior = None
return prior
| 5,344,934 |
def set_polling_interval(duthosts, enum_rand_one_per_hwsku_frontend_hostname):
""" Set CRM polling interval to 1 second """
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
wait_time = 2
duthost.command("crm config polling interval {}".format(CRM_POLLING_INTERVAL))["stdout"]
logger.info("Waiting {} sec for CRM counters to become updated".format(wait_time))
time.sleep(wait_time)
| 5,344,935 |
def client():
"""Provide the client session used by tests."""
with app.app.test_client() as client:
yield client
| 5,344,936 |
def clean_data(list_in):
"""
Inputs:
list_in - filtered list of ticket orders
Outputs:
Return list of tuples, each tuple contains
(last name, first name, note,[tickets])
"""
notes_list = []
data_out = []
for row in list_in:
trimmed_row = row[row.index('Purchaser Name: ')+16:]
name = trimmed_row[:trimmed_row.index('<br/>')].strip().title()
first_name = name[:name.rindex(' ')] #get first name
last_name = name[name.rindex(' '):] #get last name
trimmed_row = trimmed_row[len(name+'<br/>')+1:]
if 'Special Instructions:' in row: #get notes
note = trimmed_row[22:trimmed_row.index('<br/>')]
trimmed_row = trimmed_row[trimmed_row.index('<br/>')+5:]
notes_list.append((last_name,first_name,note))
else:
note = ''
orders = trimmed_row.split('<br/>')
tickets = []
for order in orders: #get ticket orders
if ('Membership Dues' in order) or ('Donation' in order):
continue
else:
tickets.append(order)
data_out.append([last_name, first_name, note, tickets])
# print(last_name, first_name,note,tickets)
# print()
data_out.sort(key=lambda item: item[1]) #sort by first name (to break last name ties)
data_out.sort(key=lambda item: item[0]) #sort by last name
# for idx, note in enumerate(notes_list): #optional print of all notes
# print(idx,note)
return data_out
| 5,344,937 |
def adfuller(
vdf,
column: str,
ts: str,
by: list = [],
p: int = 1,
with_trend: bool = False,
regresults: bool = False,
):
"""
---------------------------------------------------------------------------
Augmented Dickey Fuller test (Time Series stationarity).
Parameters
----------
vdf: vDataFrame
input vDataFrame.
column: str
Input vcolumn to test.
ts: str
vcolumn used as timeline. It will be to use to order the data. It can be
a numerical or type date like (date, datetime, timestamp...) vcolumn.
by: list, optional
vcolumns used in the partition.
p: int, optional
Number of lags to consider in the test.
with_trend: bool, optional
Add a trend in the Regression.
regresults: bool, optional
If True, the full regression results are returned.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
"""
def critical_value(alpha, N, with_trend):
if not (with_trend):
if N <= 25:
if alpha == 0.01:
return -3.75
elif alpha == 0.10:
return -2.62
elif alpha == 0.025:
return -3.33
else:
return -3.00
elif N <= 50:
if alpha == 0.01:
return -3.58
elif alpha == 0.10:
return -2.60
elif alpha == 0.025:
return -3.22
else:
return -2.93
elif N <= 100:
if alpha == 0.01:
return -3.51
elif alpha == 0.10:
return -2.58
elif alpha == 0.025:
return -3.17
else:
return -2.89
elif N <= 250:
if alpha == 0.01:
return -3.46
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.14
else:
return -2.88
elif N <= 500:
if alpha == 0.01:
return -3.44
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.13
else:
return -2.87
else:
if alpha == 0.01:
return -3.43
elif alpha == 0.10:
return -2.57
elif alpha == 0.025:
return -3.12
else:
return -2.86
else:
if N <= 25:
if alpha == 0.01:
return -4.38
elif alpha == 0.10:
return -3.24
elif alpha == 0.025:
return -3.95
else:
return -3.60
elif N <= 50:
if alpha == 0.01:
return -4.15
elif alpha == 0.10:
return -3.18
elif alpha == 0.025:
return -3.80
else:
return -3.50
elif N <= 100:
if alpha == 0.01:
return -4.04
elif alpha == 0.10:
return -3.15
elif alpha == 0.025:
return -3.73
else:
return -5.45
elif N <= 250:
if alpha == 0.01:
return -3.99
elif alpha == 0.10:
return -3.13
elif alpha == 0.025:
return -3.69
else:
return -3.43
elif N <= 500:
if alpha == 0.01:
return 3.98
elif alpha == 0.10:
return -3.13
elif alpha == 0.025:
return -3.68
else:
return -3.42
else:
if alpha == 0.01:
return -3.96
elif alpha == 0.10:
return -3.12
elif alpha == 0.025:
return -3.66
else:
return -3.41
check_types(
[
("ts", ts, [str],),
("column", column, [str],),
("p", p, [int, float],),
("by", by, [list],),
("with_trend", with_trend, [bool],),
("regresults", regresults, [bool],),
],
vdf=["vdf", vdf],
)
columns_check([ts, column] + by, vdf)
ts = vdf_columns_names([ts], vdf)[0]
column = vdf_columns_names([column], vdf)[0]
by = vdf_columns_names(by, vdf)
schema = vdf._VERTICAPY_VARIABLES_["schema_writing"]
if not (schema):
schema = "public"
name = "{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_{}".format(
schema, gen_name([column]).upper()
)
relation_name = "{}.VERTICAPY_TEMP_MODEL_LINEAR_REGRESSION_VIEW_{}".format(
schema, gen_name([column]).upper()
)
try:
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP MODEL IF EXISTS {}".format(name)
)
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP VIEW IF EXISTS {}".format(relation_name)
)
except:
pass
lag = [
"LAG({}, 1) OVER ({}ORDER BY {}) AS lag1".format(
column, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts
)
]
lag += [
"LAG({}, {}) OVER ({}ORDER BY {}) - LAG({}, {}) OVER ({}ORDER BY {}) AS delta{}".format(
column,
i,
"PARTITION BY {}".format(", ".join(by)) if (by) else "",
ts,
column,
i + 1,
"PARTITION BY {}".format(", ".join(by)) if (by) else "",
ts,
i,
)
for i in range(1, p + 1)
]
lag += [
"{} - LAG({}, 1) OVER ({}ORDER BY {}) AS delta".format(
column, column, "PARTITION BY {}".format(", ".join(by)) if (by) else "", ts
)
]
query = "CREATE VIEW {} AS SELECT {}, {} AS ts FROM {}".format(
relation_name,
", ".join(lag),
"TIMESTAMPDIFF(SECOND, {}, MIN({}) OVER ())".format(ts, ts)
if vdf[ts].isdate()
else ts,
vdf.__genSQL__(),
)
vdf._VERTICAPY_VARIABLES_["cursor"].execute(query)
model = LinearRegression(name, vdf._VERTICAPY_VARIABLES_["cursor"])
model.fit(
relation_name,
["ts"] + ["lag1"] + ["delta{}".format(i) for i in range(1, p + 1)],
"delta",
)
coef = model.coef_
vdf._VERTICAPY_VARIABLES_["cursor"].execute("DROP MODEL IF EXISTS {}".format(name))
vdf._VERTICAPY_VARIABLES_["cursor"].execute(
"DROP VIEW IF EXISTS {}".format(relation_name)
)
if regresults:
return coef
coef = coef.transpose()
DF = coef.values["lag1"][0] / (max(coef.values["lag1"][1], 1e-99))
p_value = coef.values["lag1"][3]
count = vdf.shape()[0]
result = tablesample(
{
"index": [
"ADF Test Statistic",
"p_value",
"# Lags used",
"# Observations Used",
"Critical Value (1%)",
"Critical Value (2.5%)",
"Critical Value (5%)",
"Critical Value (10%)",
"Stationarity (alpha = 1%)",
],
"value": [
DF,
p_value,
p,
count,
critical_value(0.01, count, with_trend),
critical_value(0.025, count, with_trend),
critical_value(0.05, count, with_trend),
critical_value(0.10, count, with_trend),
DF < critical_value(0.01, count, with_trend) and p_value < 0.01,
],
}
)
return result
| 5,344,938 |
def get_routes_bend180(
ports: Union[List[Port], Dict[str, Port]],
bend: ComponentOrFactory = bend_euler,
cross_section: CrossSectionFactory = strip,
bend_port1: Optional[str] = None,
bend_port2: Optional[str] = None,
**kwargs,
) -> Routes:
"""Returns routes made by 180 degree bends.
Args:
ports: List or dict of ports
bend: function for bend
cross_section:
**kwargs: bend settings
"""
ports = list(ports.values()) if isinstance(ports, dict) else ports
bend = bend(angle=180, cross_section=cross_section, **kwargs)
bend_ports = bend.get_ports_list()
bend_port1 = bend_port1 or bend_ports[0].name
bend_port2 = bend_port2 or bend_ports[1].name
references = [bend.ref() for port in ports]
references = [ref.connect(bend_port1, port) for port, ref in zip(ports, references)]
ports = [ref.ports[bend_port2] for i, ref in enumerate(references)]
lengths = [bend.info.length] * len(ports)
return Routes(references=references, ports=ports, lengths=lengths)
| 5,344,939 |
def list_datasets(github_repo="Ouranosinc/xclim-testdata", branch="main"):
"""Return a DataFrame listing all xclim test datasets available on the GitHub repo for the given branch.
The result includes the filepath, as passed to `open_dataset`, the file size (in KB) and the html url to the file.
This uses an unauthenticated call to GitHub's REST API, so it is limited to 60 requests per hour (per IP).
A single call of this function triggers one request per subdirectory, so use with parsimony.
"""
res = urlopen( # nosec
f"https://api.github.com/repos/{github_repo}/contents?ref={branch}"
)
base = json.loads(res.read().decode())
records = []
for folder in base:
if folder["path"].startswith(".") or folder["size"] > 0:
# drop hidden folders and other files.
continue
res = urlopen(folder["url"]) # nosec
listing = json.loads(res.read().decode())
for file in listing:
if file["path"].endswith(".nc"):
records.append(
{
"name": file["path"],
"size": file["size"] / 2**10,
"url": file["html_url"],
}
)
df = pd.DataFrame.from_records(records).set_index("name")
print(f"Found {len(df)} datasets.")
return df
| 5,344,940 |
def make_layerwise_projection_unshrink(*, server_state_type,
client_update_output_type,
server_update_fn, server_model_fn,
client_model_fn, shrink_unshrink_info):
"""Creates an unshrink function which unshrinks by unprojecting weight matrices corresponding to make_layerwise_projection_shrink.
Args:
server_state_type: the type of server_state.
client_update_output_type: the type of client_outputs.
server_update_fn: a function which evolves the server_state.
server_model_fn: a `tf.keras.Model' which specifies the server-side model.
client_model_fn: a `tf.keras.Model' which specifies the client-side model.
shrink_unshrink_info: an object specifying how the shrink and unshrink
operations are performed.
Returns:
A corresponding shrink and unshrink functions.
"""
left_mask = shrink_unshrink_info.left_mask
right_mask = shrink_unshrink_info.right_mask
tf.debugging.assert_equal(len(left_mask), len(right_mask))
tf.debugging.assert_equal(
len(left_mask), len(get_model_weights(server_model_fn()).trainable))
tf.debugging.assert_equal(
len(left_mask), len(get_model_weights(client_model_fn()).trainable))
build_projection_matrix = shrink_unshrink_info.build_projection_matrix
federated_server_state_type = tff.type_at_server(server_state_type)
federated_client_outputs_type = tff.type_at_clients(client_update_output_type)
@tff.tf_computation(client_update_output_type)
def unproject_client_weights_fn(client_output):
whimsy_server_weights = get_model_weights(server_model_fn()).trainable
whimsy_client_weights = get_model_weights(client_model_fn()).trainable
left_maskval_to_projmat_dict = create_left_maskval_to_projmat_dict(
client_output.round_num //
shrink_unshrink_info.new_projection_dict_decimate,
whimsy_server_weights,
whimsy_client_weights,
left_mask,
right_mask,
build_projection_matrix=build_projection_matrix)
return unproject_client_weights(client_output, left_maskval_to_projmat_dict,
left_mask, right_mask)
@tff.tf_computation
def reshape_a(client_ouput_weight_delta):
whimsy_server_weights = get_model_weights(server_model_fn()).trainable
return tf.nest.map_structure(lambda a, b: tf.reshape(a, tf.shape(b)),
client_ouput_weight_delta,
whimsy_server_weights)
@tff.federated_computation(federated_server_state_type,
federated_client_outputs_type)
def unshrink(server_state, client_outputs):
client_outputs = tff.federated_map(unproject_client_weights_fn,
client_outputs)
my_weights_delta = tff.federated_map(reshape_a,
client_outputs.weights_delta)
round_model_delta = tff.federated_mean(
my_weights_delta, weight=client_outputs.client_weight)
logging.info("finished computing mean")
return tff.federated_map(server_update_fn,
(server_state, round_model_delta))
return unshrink
| 5,344,941 |
def main(fasta_in, fasta_out, report):
"""Insert repetitive sequences into a fasta file."""
tbls = []
recs = [rec for rec in parse(fasta_in, 'fasta') if len(rec.seq) >= TENMIL]
with click.progressbar(recs) as seq_recs:
for rec in seq_recs:
tbls.append(insert_repetitive_regions(rec))
tbl = pd.concat(tbls)
tbl.to_csv(report)
write(recs, fasta_out, 'fasta')
| 5,344,942 |
def embed_terms(args, classes, dest, use_cache=True, path_to_json='ebd_cache.json'):
"""
Embeds class strings into word representations.
:param args
:param classes: (list of str) topic classes
:param dest: (str) path to destination file
:param path_to_json: (str) path to json file containing word embeddings
:return: dict {newsgroup class (int id) : embedded vector (nparray of float)}
"""
if use_cache:
with open(dest) as json_file:
return classes, json.load(json_file)
# Not using cache: extract vectors from global set
with open(path_to_json) as json_file:
mappings = json.load(json_file)
input()
input(mappings)
# Cache topic reps
cache = dict(zip(classes, [mappings[topic] for topic in classes]))
with open(dest, 'w') as fp:
json.dump(cache, fp)
| 5,344,943 |
def wf_paths(reachable):
"""
Construct all well-formed paths satisfying a given condition.
The condition is as follows: all the paths have height equal to
the ceiling of log_2(`reachable` + 1). `reachable` is interpreted
as a bitfield, with 1 meaning that the corresponding leaf on the
floor of the path should be reachable from the root, 0 meaning the
opposite.
This function has been used to count well-formed paths and guess
the link with Gelfand-Zetlin polytopes.
"""
if reachable <= 0:
raise ValueError
elif reachable == 1:
return [Path([])]
else:
floors = [reachable & 1]
reachable >>= 1
left = 2; right = 4
while reachable > 1:
if reachable & 1:
floors = [f | left for f in floors] + [f | right for f in floors]
left <<= 2; right <<= 2
reachable >>= 1
floors = [f | left for f in floors]
paths = []
for f in floors:
paths.extend([p.cat(f) for p in wf_paths(_h4(f))])
return paths
| 5,344,944 |
def get_imagemodel_in_rar(rar_path, mode):
""" 압축파일(rar_path)의 이미지파일의 name, width, height를 모아서 반환한다."""
image_models = []
with rarfile.RarFile(rar_path) as rf:
for name in rf.namelist():
if is_hidden_or_trash(name):
continue
if is_extensions_allow_image(name):
model = BaseImageModel()
model._name = name
app.logger.info("fileName: " + name)
if mode == "1":
try:
with rf.read(name) as f:
data = BytesIO()
data.write(f)
data.seek(0)
size = get_image_size_from_bytes(data)
model._width = size[0]
model._height = size[1]
except Exception:
app.logger.error("Can not getting width, height >> " + name)
image_models.append(model)
return image_models
| 5,344,945 |
async def test_create_new_credential(provider: SynologyAuthProvider):
"""Test that we create a new credential."""
credentials = await provider.async_get_or_create_credentials(
{
"account": "test-user",
}
)
assert credentials.is_new is True
assert credentials.data["account"] == "test-user"
| 5,344,946 |
def run_single(i,threshold_area_fraction,death_to_birth_rate_ratio,domain_size_multiplier,return_history=False):
"""run a single voronoi tessellation model simulation"""
rates = (DEATH_RATE,DEATH_RATE/death_to_birth_rate_ratio)
rand = np.random.RandomState()
history = lib.run_simulation(simulation,L,TIMESTEP,TIMEND,rand,progress_on=False,
init_time=INIT_TIME,til_fix=False,save_areas=True,
return_events=False,save_cell_histories=False,N_limit=MAX_POP_SIZE,
domain_size_multiplier=domain_size_multiplier,rates=rates,threshold_area_fraction=threshold_area_fraction)
return number_proliferating_neighbours_distribution(history,threshold_area_fraction,death_to_birth_rate_ratio)
| 5,344,947 |
def matrixmult (A, B):
"""Matrix multiplication function
This function returns the product of a matrix multiplication given two matrices.
Let the dimension of the matrix A be: m by n,
let the dimension of the matrix B be: p by q,
multiplication will only possible if n = p,
thus creating a matrix of m by q size.
Parameters
----------
A : list
First matrix, in a 2D array format.
B : list
Second matrix, in a 2D array format.
Returns
-------
C : list
The product of the matrix multiplication.
Examples
--------
>>> from .pycgmStatic import matrixmult
>>> A = [[11,12,13],[14,15,16]]
>>> B = [[1,2],[3,4],[5,6]]
>>> matrixmult(A, B)
[[112, 148], [139, 184]]
"""
C = [[0 for row in range(len(A))] for col in range(len(B[0]))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
C[i][j] += A[i][k]*B[k][j]
return C
| 5,344,948 |
def TorsLattice(data = None, *args, **kwargs):
"""
Construct a lattice of torsion classes from various forms of input data
This raises an error if the constructed lattice is not semidistributive,
since the lattice of torsion classes is semidistributive.
INPUT:
- ``data``, ``*args``, ``**kwargs`` -- data and options that will
be passed down to :func:`LatticePoset` to construct a poset that is
also a lattice.
OUTPUT:
An instance of :class:`FiniteTorsLattice`
"""
if isinstance(data, FiniteTorsLattice) and not args and not kwargs:
return data
L = LatticePoset(data, *args, **kwargs)
if not L.is_semidistributive():
raise ValueError("This lattice is not semidistributive.")
return FiniteTorsLattice(L)
| 5,344,949 |
def test_dupes_no_cache(mock_DirInfo):
"""
dupes command does populate if necessary.
"""
mock_DirInfo.cached.return_value.file_count = 0
with pytest.raises(SystemExit, match='0'):
main()
mock_DirInfo.cached.assert_called_once_with(os.path.abspath('.'))
mock_DirInfo.cached.return_value.populate.assert_called_once_with(
**dupes_options()
)
mock_DirInfo.cached.return_value.dupe_groups.assert_called()
| 5,344,950 |
def get_accumulated_report(trigger_id, mission='fermi'):
"""
Return the last value for each keyword on the summary page for a given trigger_id
:param trigger_id:
:param mission: 'fermi' or 'swift'
:return:
"""
if 'fermi' in mission:
site = fermi_grb_site
elif 'swift' in mission:
site = swift_grb_site
else:
print "Uknown mission {0}".format(mission)
sys.exit()
html = urllib.urlopen(site.format(trigger_id))
rs = page_to_reports(html)
fs = None
for r in rs:
fs = report_to_fields(r, fs)
return fs
| 5,344,951 |
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.get_block_size()
model.eval()
for k in range(steps):
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
return x
| 5,344,952 |
def get_numbers(number, size, *, fg=DEFAULT_FGCHARACTER, bg=DEFAULT_BGCHARACTER):
"""Creates a shape of numbers.
Positional arguments:
number - number to print.
size - size of the shape.
Keyword arguments:
fg - foreground character.
bg - background character.
"""
_validate_positive_params(number+1,size)
width = int(size+1)
height = int(size*2+1)
x = range(width)
y = range(height)
# https://en.wikipedia.org/wiki/Seven-segment_display
l = [
f"y == {size*2} and x<={size}", # A
f"x == {size} and y>{size} and y<={size*2}", # B
f"x == {size} and y<={size}", # C
f"y == 0 and x<={size}", # D
f"x == 0 and y<={size}", # E
f"x == 0 and y>{size} and y<={size*2}", # F
f"y == {size} and x<={size}", # G
]
numbers = [
{l[0],l[1],l[2],l[3],l[4],l[5] }, # 0
{ l[1],l[2] }, # 1
{l[0],l[1], l[3],l[4], l[6]}, # 2
{l[0],l[1],l[2],l[3], l[6]}, # 3
{ l[1],l[2], l[5],l[6]}, # 4
{l[0], l[2],l[3], l[5],l[6]}, # 5
{l[0], l[2],l[3],l[4],l[5],l[6]}, # 6
{l[0],l[1],l[2] }, # 7
{l[0],l[1],l[2],l[3],l[4],l[5],l[6]}, # 8
{l[0],l[1],l[2],l[3], l[5],l[6]}, # 9
]
res = ""
for digit in str(number):
feqs = numbers[int(digit)]
s_digit = _make_shape(x, y, feqs, [], fg=fg, bg=bg)
if res:
new_res = ""
for i,j in zip(res.split("\n"),s_digit.split("\n")):
if i and j:
new_res += i+" "+j+'\n'
res=new_res
else:
res = s_digit
return res
| 5,344,953 |
def load_external_data_for_model(model, base_dir): # type: (ModelProto, Text) -> None
"""
Loads external tensors into model
@params
model: ModelProto to load external data to
base_dir: directory that contains external data
"""
for tensor in _get_all_tensors(model):
if uses_external_data(tensor):
load_external_data_for_tensor(tensor, base_dir)
# After loading raw_data from external_data, change the state of tensors
tensor.data_location = TensorProto.DEFAULT
# and remove external data
del tensor.external_data[:]
| 5,344,954 |
def config_from_env(key, config_schema=None):
"""Read config from a file path in os.env.
Args:
key (str) : Key represents an evironment variable to read config path
config_schema (trafaret): Trafaret object that defines the schema of the config.
If None, then trafaret validation is not used.
Example:
```
import trafaret as tr
config_schema = tr.Dict({
tr.Key('project_name'):
tr.Dict({
'db_path': tr.String(),
'username': tr.String(),
'password': tr.String(),
}),
})
```
Trafaret docs: http://trafaret.readthedocs.io/en/latest/
Return:
config json
"""
filepath = os.getenv(key, default=None)
if not filepath:
sys.stderr.write("Passed key does not exist: {0}".format(key))
raise AttributeError('Key {} does not exist in environment.'.format(key))
return config_from_path(filepath, config_schema)
| 5,344,955 |
def to_cmyk(r: int, g: int, b: int) -> _cmyk:
"""
Takes RGB values 0->255 and returns their values
in the CMYK namespace.
https://www.rapidtables.com/convert/color/rgb-to-cmyk.html
"""
r, g, b = to_float(r, g, b)
k = 1 - max(r, g, b)
c = (1 - r - k) / (1 - k)
m = (1 - g - k) / (1 - k)
y = (1 - b - k) / (1 - k)
return (c, m, y, k)
| 5,344,956 |
def GetHostsInClusters(datacenter, clusterNames=[], connectionState=None):
"""
Return list of host objects from given cluster names.
@param datacenter: datacenter object
@type datacenter: Vim.Datacenter
@param clusterNames: cluster name list
@type clusterNames: string[]
@param connectionState: host connection state ("connected", "disconnected", "notResponding"), None means all states.
@typr connectionState: string
"""
if len(clusterNames) == 0:
clusterObjs = GetAllClusters(datacenter)
else:
clusterObjs = GetClusters(datacenter, clusterNames)
hostObjs = []
if connectionState == None:
hostObjs = [h for cl in clusterObjs for h in cl.host]
else:
hostObjs = [h for cl in clusterObjs for h in cl.host if h.runtime.connectionState == connectionState]
return hostObjs
| 5,344,957 |
def get_gfa_targets(tiles, gfafile, faintlim=99, gaiadr="dr2"):
"""Returns a list of tables of GFA targets on each tile
Args:
tiles: table with columns TILEID, RA, DEC; or Tiles object
targets: table of targets with columsn RA, DEC
gaiadr: string, must be either "dr2" or "edr3" (default to "dr2")
MAY NOT BE FULLY IMPLEMENTED
Returns:
list of tables (one row per input tile) with the subset of targets
that are covered by GFAs on each tile. Each table has additional
`GFA_LOC` column indicating 0-9 which GFA was covered.
Note that a given target could be covered by GFAs on more than one tile.
Output is a list of astropy Tables; inputs can be numpy structured arrays
or astropy Tables
"""
log = Logger.get()
tm = Timer()
tm.start()
# Convert tiles to vanilla numpy array if needed
if isinstance(tiles, Tiles):
tx = np.zeros(len(tiles.ra),
dtype=[("RA", "f8"), ("DEC", "f8"), ("TILEID", "i4")])
tx["RA"] = tiles.ra
tx["DEC"] = tiles.dec
tx["TILEID"] = tiles.id
tiles = tx
# Load potential GFA targets and GFA locations
targets = fitsio.read(gfafile)
gfa = desimodel.focalplane.gfa.GFALocations(scale=2)
# Pre-filter what GFA targets cover what tiles with some buffer.
# find_points_in_tiles returns a list of lists;
# convert to dictionary of lists keyed by tileid
log.info("Finding overlap of {} GFA targets on {} tiles".format(
len(targets), len(tiles)))
gfa_tile_indices = dict()
ii = desimodel.footprint.find_points_in_tiles(
tiles, targets["RA"], targets["DEC"], radius=1.8)
for i, tileid in enumerate(tiles["TILEID"]):
gfa_tile_indices[tileid] = ii[i]
gfa_targets = list()
log.info("Generating GFA targets tables")
for telra, teldec, tileid in zip(tiles["RA"], tiles["DEC"],
tiles["TILEID"]):
tmp = gfa.targets_on_gfa(telra, teldec,
targets[gfa_tile_indices[tileid]])
t = Table(tmp)
# Rename some columns for downstream clarity and consistency
for oldname, newname in [
("TYPE", "MORPHTYPE"),
("RA", "TARGET_RA"),
("DEC", "TARGET_DEC"),
("RA_IVAR", "TARGET_RA_IVAR"),
("DEC_IVAR", "TARGET_DEC_IVAR")]:
if oldname in t.colnames:
t.rename_column(oldname, newname)
# Select which targets are good for ETC / GUIDE / FOCUS
# 0 == good
flag = np.zeros(len(t), dtype="i2")
#- Not PSF-like
isPSF = (t["MORPHTYPE"] == "PSF ") | (t["MORPHTYPE"] == "GPSF") | (t["MORPHTYPE"] == "PSF")
flag[~isPSF] |= 2**0
#- Not Isolated
if len(tmp) > 1:
notIsolated = ~isolated(tmp['RA'], tmp['DEC'])
flag[notIsolated] |= 2**1
#- Questionable astrometry / proper motion
tych = (0 < t['REF_ID'])
tych &= ( t['REF_ID'] < 1e10)
flag[tych] |= 2**2
#- Too faint
faint = t['GAIA_PHOT_G_MEAN_MAG'] > faintlim
flag[faint] |= 2**3
# AR not passing the Gaia AEN criterion (PM correction done for AEN targets only)
g = t["GAIA_PHOT_G_MEAN_MAG"]
aen = t["GAIA_ASTROMETRIC_EXCESS_NOISE"]
isaen = np.logical_or(
(g <= 19.0) * (aen < 10.0 ** 0.5),
(g >= 19.0) * (aen < 10.0 ** (0.5 + 0.2 * (g - 19.0))),
)
flag[~isaen] |= 2**4
if len(flag)-np.count_nonzero(flag) == 0:
log.error("ERROR: no good GFA targets for "
"ETC/GUIDE/FOCUS on tile {}".format(tileid))
t["ETC_FLAG"] = flag
t["GUIDE_FLAG"] = flag
t["FOCUS_FLAG"] = flag
# patch in Gaia-based synthetic r flux for use by ETC
t["FLUX_R"] = gaia_synth_r_flux(t, gaiadr=gaiadr)
gfa_targets.append(t)
tm.stop()
tm.report(" Identifying GFA targets")
return gfa_targets
| 5,344,958 |
def test_set_str_value():
"""Test that a string option can have its value set"""
config = Configuration()
config.add_option('test', option_type=str)
with pytest.raises(ConfigurationError):
config.test
config.test = "1"
assert config.test == "1"
config.test = 1
assert config.test == "1"
| 5,344,959 |
def main():
"""Entrypoint of application"""
version = sb.utils.get_version()
parser = argparse.ArgumentParser(description=f"Shape Bruteforce CLI v{version}")
parser.add_argument("image", type=str, nargs='?', default="", help="Image to be processed")
parser.add_argument("-s", "--size", default=64, type=int,
help="Max size of image during training, smaller = faster")
parser.add_argument("-a", "--shapes", default=256, type=int, help="Number of shapes")
parser.add_argument("-g", "--generations", default=2000, type=int, help="Generations per shape")
parser.add_argument("--egg", action='store_true', help=argparse.SUPPRESS)
args = parser.parse_args()
if args.egg:
egg()
return
if args.image == "":
parser.print_help()
return
target = sb.utils.load_image(args.image)
target = sb.utils.resize_image(target, args.size)
target = sb.utils.normalize_image(target)
trainer = sb.training.Training(target)
trainer.train(shape_count=args.shapes, gens_per_shape=args.generations)
result = trainer.parent.to_array()
sb.utils.show_image(result)
| 5,344,960 |
def meijerint_indefinite(f, x):
"""
Compute an indefinite integral of ``f`` by rewriting it as a G function.
Examples
========
>>> from sympy.integrals.meijerint import meijerint_indefinite
>>> from sympy import sin
>>> from sympy.abc import x
>>> meijerint_indefinite(sin(x), x)
-cos(x)
"""
from sympy import hyper, meijerg
results = []
for a in sorted(_find_splitting_points(f, x) | {S(0)}, key=default_sort_key):
res = _meijerint_indefinite_1(f.subs(x, x + a), x)
if not res:
continue
res = res.subs(x, x - a)
if _has(res, hyper, meijerg):
results.append(res)
else:
return res
if f.has(HyperbolicFunction):
_debug('Try rewriting hyperbolics in terms of exp.')
rv = meijerint_indefinite(
_rewrite_hyperbolics_as_exp(f), x)
if rv:
if not type(rv) is list:
return collect(factor_terms(rv), rv.atoms(exp))
results.extend(rv)
if results:
return next(ordered(results))
| 5,344,961 |
def fit_size(
img: IMG,
size: Tuple[int, int],
mode: FitSizeMode = FitSizeMode.INCLUDE,
direction: FitSizeDir = FitSizeDir.CENTER,
bg_color: Union[str, float, Tuple[float, ...]] = (255, 255, 255, 0),
) -> IMG:
"""
调整图片到指定的大小,超出部分裁剪,不足部分设为指定颜色
:params
* ``img``: 待调整的图片
* ``size``: 期望图片大小
* ``mode``: FitSizeMode.INSIDE 表示图片必须在指定的大小范围内,不足部分设为指定颜色;FitSizeMode.INCLUDE 表示图片必须包括指定的大小范围,超出部分裁剪
* ``direction``: 调整图片大小时图片的方位;默认为居中 FitSizeDir.CENTER
* ``bg_color``: FitSizeMode.INSIDE 时的背景颜色
"""
return cut_size(limit_size(img, size, mode), size, direction, bg_color)
| 5,344,962 |
def load_iot_config(ini: dict):
"""
"""
if not verify_params(ini, 'iot', ['file', 'filedir']):
return {}
# Set file directory
iot_params = ini['iot']
filedir = iot_params['filedir']
filedir = filedir.strip(" ")
if filedir == "":
# HOME directory set
filedir = os.environ['HOME']
logger.info("IoT configuration dir: '{}'".format(filedir))
iot_config = iot_params['file']
filepath = os.path.join(filedir, iot_config)
logger.info("IoT config filepath: '{}'".format(filepath))
if not os.path.exists(filepath):
logger.error("IoT config file '{}' not found".format(filepath))
return {}
# Load configuration data
topics = dict()
fieldnames = ["topic", "where", "h", "x", "y", "unit", "notes"]
try:
csvfd = open(filepath, "r", newline='')
reader = csv.DictReader(csvfd, fieldnames=fieldnames,
delimiter=";")
for row in reader:
topic = row['topic'].strip(" ")
if topic == 'topic':
logger.debug("Skipping header")
continue
if topic[0] == "#":
logger.warning("Line {} commented out".format(reader.line_num))
continue
if topic in topics.keys():
logger.warning("Topic '{}' at line {} skipped beacause duplicated ".format(topic, reader.line_num))
continue
# Remove 'topic' key and assign remaining to topics dict
row.pop('topic')
topics[topic] = row
logger.debug("Topic '{}' added: {}".format(topic, topics[topic]))
except:
logger.error("Loading IoT topics failed")
logger.error("Reason: {}".format(sys.exc_info()))
return {}
return topics
| 5,344,963 |
def test_submit_except8(mock_ssh):
"""
Check that jobsubmit exception is raised on generic SSH failure.
"""
job = {
"destdir": "/path/to/destdir",
"subfile": "submit.file"
}
mock_ssh.side_effect = exceptions.SSHError("Error", ("out", "err", 0))
mock_ssh.return_value = ("success", "error", 0)
with pytest.raises(exceptions.JobsubmitError):
submit(job)
| 5,344,964 |
def recordDevice(*args, **kwargs):
"""
Starts and stops server side device recording.
Returns: None
"""
pass
| 5,344,965 |
def test_guitab_loadall():
"""Confirm that the custom shell program can correctly load tab data"""
test_file = Path(__file__).parent / "test_guitab_file.txt"
guitab_shell = GuitabShell()
guitab_shell.do_loadall(str(test_file))
assert str(guitab_shell.user_tab) == global_test_data.str_tab_file_load
assert guitab_shell.user_tab.info == global_test_data.file_info
| 5,344,966 |
def supports_colour():
"""
Return True if the running system's terminal supports colour,
and False otherwise.
Adapted from https://github.com/django/django/blob/master/django/core/management/color.py
"""
def vt_codes_enabled_in_windows_registry():
"""
Check the Windows Registry to see if VT code handling has been enabled
by default, see https://superuser.com/a/1300251/447564.
"""
try:
# winreg is only available on Windows.
import winreg
except ImportError:
return False
else:
reg_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Console')
try:
reg_key_value, _ = winreg.QueryValueEx(
reg_key, 'VirtualTerminalLevel')
except FileNotFoundError:
return False
else:
return reg_key_value == 1
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
return is_a_tty and (
sys.platform != 'win32' or
HAS_COLORAMA or
'ANSICON' in os.environ or
# Windows Terminal supports VT codes.
'WT_SESSION' in os.environ or
# Microsoft Visual Studio Code's built-in terminal supports colors.
os.environ.get('TERM_PROGRAM') == 'vscode' or
vt_codes_enabled_in_windows_registry()
)
| 5,344,967 |
def next_code(value: int, mul: int = 252533, div: int = 33554393) -> int:
"""
Returns the value of the next code given the value of the current code
The first code is `20151125`.
After that, each code is generated by taking the previous one, multiplying it by `252533`,
and then keeping the remainder from dividing that value by `33554393`
"""
return (value * mul) % div
| 5,344,968 |
def main() -> int:
"""Runs a program specified by command-line arguments."""
args = argument_parser().parse_args()
if not args.command or args.command[0] != '--':
return 1
env = os.environ.copy()
# Command starts after the "--".
command = args.command[1:]
if args.args_file is not None:
empty = True
for line in args.args_file:
empty = False
command.append(line.strip())
if args.skip_empty_args and empty:
return 0
if args.env_file is not None:
for line in args.env_file:
apply_env_var(line, env)
# Apply command-line overrides at a higher priority than the env file.
for string in args.env:
apply_env_var(string, env)
if args.capture_output:
output_args = {'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT}
else:
output_args = {}
process = subprocess.run(command, env=env, **output_args) # type: ignore
if process.returncode != 0 and args.capture_output:
_LOG.error('')
_LOG.error('Command failed with exit code %d in GN build.',
process.returncode)
_LOG.error('')
_LOG.error('Build target:')
_LOG.error('')
_LOG.error(' %s', args.target)
_LOG.error('')
_LOG.error('Full command:')
_LOG.error('')
_LOG.error(' %s', ' '.join(shlex.quote(arg) for arg in command))
_LOG.error('')
_LOG.error('Process output:')
print(flush=True)
sys.stdout.buffer.write(process.stdout)
print(flush=True)
_LOG.error('')
return process.returncode
| 5,344,969 |
def read_csv_to_lol(full_path, sep=";"):
"""
Read csv file into lists of list.
Make sure to have a empty line at the bottom
"""
with open(full_path, 'r') as ff:
# read from CSV
data = ff.readlines()
# New line at the end of each line is removed
data = [i.replace("\n", "") for i in data]
# Creating lists of list
data = [i.split(sep) for i in data]
return data
| 5,344,970 |
def read_experiment(path):
"""
Discovers CSV files an experiment produced and construct columns
for the experiment's conditions from the sub-directory structure.
Args:
path: path to the experiment's results.
Returns:
pd.DataFrame
"""
objects = list(path.rglob('*.csv'))
data = []
path_split = _recursive_split(path)
for obj in objects:
obj_path_split = _recursive_split(obj)
if len(obj_path_split) - len(path_split) > 7:
raise Exception("Path depth too long! Provide path to actual experiment or one of its sub-directories.")
data.append(obj_path_split)
df = pd.DataFrame(data=data)
columns = ["experiment", "imputer", "task", "missing_type", "missing_fraction", "strategy", "file_or_dir", "detail_file"]
auto_columns = []
for i in range(df.shape[1] - len(columns)):
auto_columns.append(f"col{i}")
df.columns = auto_columns + columns
df.drop(auto_columns, axis=1, inplace=True)
df["path"] = objects
df["detail_file"] = df["detail_file"].fillna("")
return df.reset_index(drop=True)
| 5,344,971 |
def single_keyword_search(keyword):
"""
구글에 keyword 검색결과를 html로 받아온뒤에 그 안에 일반 게시물 분류에 속하는
class='r' 부분만 모아서 return해주는 함수 입니다.
Args:
Keyword (String) : 구글에 검색할 Keyword
Returns:
title_list (bs4.element.ResultSet) : 구글 검색에서 확인된 일반게시물(class='r')들의 모음
"""
URL = 'https://www.google.com/search?q=' +keyword_preprocessing(keyword)
driver = webdriver.Chrome("C:/Users/ksg/py_tutorial/chromedriver.exe")
driver.implicitly_wait(1)
driver.get(URL)
driver.implicitly_wait(2)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
title_list = soup.find_all(name='div',attrs={'class':'r'})
return title_list
| 5,344,972 |
def test_cluttered_table_place_get_gt_nsrts():
"""Tests for get_gt_nsrts in ClutteredTablePlaceEnv."""
test_cluttered_table_get_gt_nsrts(place_version=True)
| 5,344,973 |
def partial_list(ys, xs, specified_shapes=None):
"""
Args:
ys: A list of tensors. Each tensor will be differentiated with the partial_nd
xs: A Tensor to be used for differentiation, or a list of tensors to be used for differentiation with the smae length as ys
specified_shapes: A list of specified dynamical shapes of ys. The first element of each shape is dummy as None or -1.
"""
assert (len(ys) > 0) and (len(xs) > 0), "The length of ys is 0"
if specified_shapes is None:
if len(xs) == 1:
return [partial_nd(y, xs) for y in ys]
else:
return [partial_nd(y, x) for (y,x) in zip(ys,xs)]
else:
if len(xs) == 1:
return [partial_nd(y, xs, specified_shape) for (y, specified_shape) in (ys, specified_shapes)]
else:
return [partial_nd(y, x, specified_shape) for (y,x,specified_shape) in zip(ys,xs,specified_shapes)]
| 5,344,974 |
def _GuessBrowserName(bisect_bot):
"""Returns a browser name string for Telemetry to use."""
default = 'release'
browser_map = namespaced_stored_object.Get(_BOT_BROWSER_MAP_KEY)
if not browser_map:
return default
for bot_name_prefix, browser_name in browser_map:
if bisect_bot.startswith(bot_name_prefix):
return browser_name
return default
| 5,344,975 |
def compile_train_function(network, batch_size, learning_rate):
"""Compiles the training function.
Args:
network: The network instance.
batch_size: The training batch size.
learning_rate: The learning rate.
Returns:
The update function that takes a batch of images and targets and updates the
network weights.
"""
learning_rate = np.float32(learning_rate)
input_var = network.input_layers[0].input_var
target_var = T.ftensor4()
# Loss function
loss_fn = functools.partial(
losses.bootstrapped_xentropy,
targets=target_var,
batch_size=batch_size,
multiplier=BOOTSTRAP_MULTIPLIER
)
# Update function
lr = theano.shared(learning_rate)
update_fn = functools.partial(lasagne.updates.adam, learning_rate=lr)
pylogging.info("Compile SGD updates")
gd_step = hybrid_training.compile_gd_step(
network, loss_fn, [input_var, target_var], update_fn)
reduce_lr = theano.function(
inputs=[],
updates=collections.OrderedDict([
(lr, T.maximum(np.float32(5e-5), lr / np.float32(1.25)))
])
)
def _compute_update(imgs, targets, update_counter):
if (update_counter + 1) % REDUCE_LR_INTERVAL == 0:
reduce_lr()
loss = gd_step(imgs, targets)
return loss
return _compute_update
| 5,344,976 |
def json_loads(data):
"""Load json data, allowing - to represent stdin."""
if data is None:
return ""
if data == "-":
return json.load(sys.stdin)
elif os.path.exists(data):
with open(data, 'r') as handle:
return json.load(handle)
else:
return json.loads(data)
| 5,344,977 |
def filter_values(freq, values, nthOct: int = 3):
"""
Filters the given values into nthOct bands.
Parameters
----------
freq : ndarray
Array containing the frequency axis.
values : ndarray
Array containing the magnitude values to be filtered.
nthOct : int, optional
Fractional octave bands that the absorption will be filtered to.
Returns
-------
bands : ndarray
An array containing the center frequencies of the available bands.
result : ndarray
An array containing the filtered values in the available bands.
"""
bands = fractional_octave_frequencies(nthOct=nthOct) # [band_min, band_center, band_max]
bands = bands[np.argwhere((bands[:, 1] >= min(freq)) & (bands[:, 1] <= max(freq)))[:, 0]]
idx = np.array([np.argwhere((freq >= bands[a, 0]) & (freq <= bands[a, 2])) for a in
np.arange(0, len(bands))], dtype=object)
result = np.array([np.sum(values[idx[a]]) / len(idx[a]) for a in np.arange(0, len(bands))], dtype=object)
result = np.nan_to_num(result)
return bands[:, 1], result.astype(values.dtype)
| 5,344,978 |
def create_project(request):
"""View to create new project"""
user = request.user
if user.is_annotator:
error = ErrorMessage(header="Access denied", message="Only admin and managers can create projects")
return render(request, 'error.html', {'error':error})
if request.method == "POST":
form = ProjectCreateForm(request.POST)
if form.is_valid():
project = form.save(commit=False)
project.manager = user
project.save()
return redirect('projects_list')
else:
form = ProjectCreateForm()
return render(request, 'projects/create.html', {'form': form})
| 5,344,979 |
def limit(
observed_CLs: np.ndarray,
expected_CLs: np.ndarray,
poi_values: np.ndarray,
figure_path: Optional[pathlib.Path] = None,
close_figure: bool = False,
) -> mpl.figure.Figure:
"""Draws observed and expected CLs values as function of the parameter of interest.
Args:
observed_CLs (np.ndarray): observed CLs values
expected_CLs (np.ndarray): expected CLs values, including 1 and 2 sigma bands
poi_values (np.ndarray): parameter of interest values used in scan
figure_path (Optional[pathlib.Path], optional): path where figure should be
saved, or None to not save it, defaults to None
close_figure (bool, optional): whether to close each figure immediately after
saving it, defaults to False (enable when producing many figures to avoid
memory issues, prevents rendering in notebooks)
Returns:
matplotlib.figure.Figure: the CLs figure
"""
fig, ax = plt.subplots()
xmin = min(poi_values)
xmax = max(poi_values)
# line through CLs = 0.05
ax.hlines(
0.05,
xmin=xmin,
xmax=xmax,
linestyle="dashdot",
color="red",
label=r"CL$_S$ = 5%",
)
# 1 and 2 sigma bands
ax.fill_between(
poi_values,
expected_CLs[:, 0],
expected_CLs[:, 4],
color="yellow",
label=r"expected CL$_S$ $\pm 2\sigma$",
)
ax.fill_between(
poi_values,
expected_CLs[:, 1],
expected_CLs[:, 3],
color="limegreen",
label=r"expected CL$_S$ $\pm 1\sigma$",
)
# expected CLs
ax.plot(
poi_values,
expected_CLs[:, 2],
"--",
color="black",
label=r"expected CL$_S$",
)
# observed CLs values
ax.plot(poi_values, observed_CLs, "o-", color="black", label=r"observed CL$_S$")
# increase font sizes
for item in (
[ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()
):
item.set_fontsize("large")
# minor ticks
for axis in [ax.xaxis, ax.yaxis]:
axis.set_minor_locator(mpl.ticker.AutoMinorLocator())
ax.legend(frameon=False, fontsize="large")
ax.set_xlabel(r"$\mu$")
ax.set_ylabel(r"$\mathrm{CL}_{s}$")
ax.set_xlim([xmin, xmax])
ax.set_ylim([0, 1])
ax.tick_params(axis="both", which="major", pad=8)
ax.tick_params(direction="in", top=True, right=True, which="both")
fig.tight_layout()
utils._save_and_close(fig, figure_path, close_figure)
return fig
| 5,344,980 |
def get_genlu_code(cursor, label):
"""Find or create the code for this label."""
if label not in GENLU_CODES:
cursor.execute("SELECT max(id) from general_landuse")
row = cursor.fetchone()
newval = 0 if row[0] is None else row[0] + 1
LOG.debug("Inserting new general landuse code: %s [%s]", newval, label)
cursor.execute(
"INSERT into general_landuse(id, label) values (%s, %s)",
(newval, label),
)
GENLU_CODES[label] = newval
return GENLU_CODES[label]
| 5,344,981 |
def main():
"""
Combines all subprograms.
:return:
"""
# Tests ftp connection.
ftp,success = connectToFTP(FTP_HOST, FTP_USER, FTP_PASS,1)
if not success:
return
# Creates a queue that will end all other threads when necessary.
killThread = queue.Queue()
# Initializes the local path
initPath(localPath)
# Starts the updating thread that will constantly poll the config file on the server
checker = threading.Thread(target=configUpdateThreaed, args=(publicPath, localPath, configName, killThread,))
checker.daemon = True
checker.start()
# Creates the list which is going to be used to store recording data
dat = []
initData(dat, channelList)
# Initializes mcc hat and respective channels.
mcc, channel_mask, options, c = initHat(channelList)
# This time.sleep allows the updating thread to download a config file in case there is not one already.
time.sleep(1)
# Updates the config object from file.
updateConfig(localPath, configName)
print("\n\nWaiting...")
start = False
errors =0
while not start:
updateConfig(localPath, configName)
try:
if config['RAS']['Start'] == 'TRUE':
start = True
except:
pass
print("Config file received!")
# Sets the start recording time and adjusts according to labview formatting.
timeM = datetime.datetime.fromtimestamp(float(config['RAS']['Start Time']) / (10 ** 9))
year1 = relativedelta(years=10, days=4, months=0)
final = timeM + year1
print(f"\nWaiting until: {final}")
# Waits until it is time to record
pause.until(final)
start = datetime.datetime.now()
print("STARTING RECORDING NOW")
print(f"Time of Starting: {start}")
# Initialize scan rate and start recording.
scanRate = (10 ** 6) / (int(config['RAS']['Sample Period']))
print(f"Frequency : {scanRate}")
sampPerChan = int(scanRate / 10)
mcc.a_in_scan_start(channel_mask, sampPerChan, scanRate, options)
# Wait until stop recording command is sent.
while config['RAS']['Start'] == 'TRUE':
updateConfig(localPath, configName)
dat = collectData(mcc, sampPerChan, dat, channelList)
mcc.a_in_scan_stop()
# Store stop recording time.
end = datetime.datetime.now()
print(f"Time of stopping: {end}")
print(f"Diff: {end - start}")
# Write out data to file.
success = writeFile(dat, str(start), len(channelList), 'Data0.csv', localPath)
if not success:
return
# Print number of samples
print(f"Number of samples: {len(dat[0])}")
# Send kill command
killThread.put("end")
checker.join()
try:
ftp, success = connectToFTP(FTP_HOST, FTP_USER, FTP_PASS,1)
if success:
uploadFile(ftp, publicPath, localPath, 'Data0.csv')
ftp.close()
except Exception as error:
print("ERROR while uploading")
print(error)
print("\n\n")
| 5,344,982 |
def make_predictions(clf_object,predictors_str,data_source):
"""make_predictions comes up with predictions
from given input data
Input:
clf_object
object
constructed classification model
predictors_str
nd str array
string array containing names
of predictors
data_source
ndarray
source of data
either from valid
or test
Output:
preds
ndarray
prediction classes based on
given input data
"""
preds = clf_object.predict(data_source[predictors_str])
return preds
| 5,344,983 |
def ticket_queue_asnauto_skipvq(request, org, net, rir_data):
"""
queue deskro ticket creation for asn automation action: skip vq
"""
if isinstance(net, dict):
net_name = net.get("name")
else:
net_name = net.name
if isinstance(org, dict):
org_name = org.get("name")
else:
org_name = org.name
user = get_user_from_request(request)
if user:
ticket_queue(
f"[ASNAUTO] Network '{net_name}' approved for existing Org '{org_name}'",
loader.get_template("email/notify-pdb-admin-asnauto-skipvq.txt").render(
{"user": user, "org": org, "net": net, "rir_data": rir_data}
),
user,
)
return
org_key = get_org_key_from_request(request)
if org_key:
ticket_queue_email_only(
f"[ASNAUTO] Network '{net_name}' approved for existing Org '{org_name}'",
loader.get_template(
"email/notify-pdb-admin-asnauto-skipvq-org-key.txt"
).render(
{"org_key": org_key, "org": org, "net": net, "rir_data": rir_data}
),
org_key.email,
)
| 5,344,984 |
def no_span_nodes(tree, debug=False, root_id=None):
"""Return True, iff there is no span node in the given ParentedTree."""
assert isinstance(tree, ParentedTree)
if root_id is None:
root_id = tree.root_id
span_label = debug_root_label('span', debug=debug, root_id=root_id)
if tree.label() == span_label:
return False
for node in tree:
if isinstance(node, ParentedTree) :
if node.label() == span_label:
return False
subtree_is_okay = no_span_nodes(node, debug=debug, root_id=root_id)
if not subtree_is_okay:
return False
return True
| 5,344,985 |
def get_token_annualized(address, days):
"""Return annualized returns for a specific token.
Args:
days [int]: Days ago for which to display annualized returns.
address [str]: Ethereum token address.
Return:
dict: Annualized returns for a specified token.
key [str]: Days annualized.
value [str]: Annualized returns.
"""
url = f"{config.URLS['annualized_returns']}/{address}"
response = api_call(url, params={'daysBack': days, 'key': POOLS_KEY})
return response
| 5,344,986 |
def generate_random_string(N):
"""
Generate a random string
Parameters
-------------
N
length of the string
Returns
-------------
random_string
Random string
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
| 5,344,987 |
def test_cmat2aset():
"""Test cmat2aset."""
assert True
| 5,344,988 |
def _save_owner_edge(obj_ver_key, info_tup):
"""Save the ws_owner_of edge."""
username = info_tup[5]
from_id = 'ws_user/' + sanitize_arangodb_key(username)
to_id = 'ws_object_version/' + obj_ver_key
logger.debug(f'Saving ws_owner_of edge from {from_id} to {to_id}')
save('ws_owner_of', [{
'_from': from_id,
'_to': to_id
}])
| 5,344,989 |
def derive_related_properties():
"""Derive the list of related properties from property statistics."""
logger.info('Deriving related properties ...')
data = statistics.get_json_data('properties')
related = {}
for pid in data:
if 'r' in data[pid] and data[pid]['r']:
related[pid] = data[pid]['r']
statistics.update_json_data('properties/related', related)
statistics.update_split_json_data('properties/related', related, 10)
| 5,344,990 |
def extract_charm_name_from_url(charm_url):
"""Extract the charm name from the charm url.
E.g. Extract 'heat' from local:bionic/heat-12
:param charm_url: Name of model to query.
:type charm_url: str
:returns: Charm name
:rtype: str
"""
charm_name = re.sub(r'-[0-9]+$', '', charm_url.split('/')[-1])
return charm_name.split(':')[-1]
| 5,344,991 |
def class_name(service_name: str) -> str:
"""Map service name to .pyi class name."""
return f"Service_{service_name}"
| 5,344,992 |
def interesting_columns(df):
"""Returns non-constant column names of a dataframe."""
return sorted(set(df.columns) - set(constant_columns(df)))
| 5,344,993 |
def liberty_str(s):
"""
>>> liberty_str("hello")
'"hello"'
>>> liberty_str('he"llo')
Traceback (most recent call last):
...
ValueError: '"' is not allow in the string: 'he"llo'
>>> liberty_str(1.0)
'"1.0000000000"'
>>> liberty_str(1)
'"1.0000000000"'
>>> liberty_str([])
Traceback (most recent call last):
...
ValueError: [] is not a string
>>> liberty_str(True)
Traceback (most recent call last):
...
ValueError: True is not a string
"""
try:
if isinstance(s, (int, float)):
s = liberty_float(s)
except ValueError:
pass
if not isinstance(s, str):
raise ValueError("%r is not a string" % s)
if '"' in s:
raise ValueError("'\"' is not allow in the string: %r" % s)
return '"'+s+'"'
| 5,344,994 |
def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
"""
Return True if input array is a valid distance matrix.
Distance matrices must be 2-dimensional numpy arrays.
They must have a zero-diagonal, and they must be symmetric.
Parameters
----------
D : ndarray
The candidate object to test for validity.
tol : float, optional
The distance matrix should be symmetric. `tol` is the maximum
difference between entries ``ij`` and ``ji`` for the distance
metric to be considered symmetric.
throw : bool, optional
An exception is thrown if the distance matrix passed is not valid.
name : str, optional
The name of the variable to checked. This is useful if
throw is set to True so the offending variable can be identified
in the exception message when an exception is thrown.
warning : bool, optional
Instead of throwing an exception, a warning message is
raised.
Returns
-------
valid : bool
True if the variable `D` passed is a valid distance matrix.
Notes
-----
Small numerical differences in `D` and `D.T` and non-zeroness of
the diagonal are ignored if they are within the tolerance specified
by `tol`.
"""
D = np.asarray(D, order='c')
valid = True
try:
s = D.shape
if len(D.shape) != 2:
if name:
raise ValueError(('Distance matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Distance matrix must have shape=2 (i.e. '
'be two-dimensional).')
if tol == 0.0:
if not (D == D.T).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric.') % name)
else:
raise ValueError('Distance matrix must be symmetric.')
if not (D[range(0, s[0]), range(0, s[0])] == 0).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must '
'be zero.') % name)
else:
raise ValueError('Distance matrix diagonal must be zero.')
else:
if not (D - D.T <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError('Distance matrix must be symmetric within'
' tolerance %5.5f.' % tol)
if not (D[range(0, s[0]), range(0, s[0])] <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% tol)
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
| 5,344,995 |
def solve_gradwavefront(data, excludeself=False, predict_at=None,
fix_covar=False, **kw):
"""Find turbulent contributions to measured fiber positions.
Assumes that the turbulent contributions can be modeled as the
gradient of a wavefront error. i.e., they are curl free.
Args:
data : ndarray containing measured positions and residuals
from expected locations
excludeself : bool
do not use this fiber when computing the turbulence
affecting this fiber.
**kw : additional keywords passed to solve_covar
Returns:
xturb : turbulent contributions in x direction
yturb : turbulent contributions in y direction
res : output from scipy.optimize.minimize describing best fit
covariance matrix
"""
if predict_at is not None and excludeself:
raise ValueError('predict_at does not make sense in combination with '
'excludeself')
if not fix_covar:
covar, res = solve_covar(data, lossfun=loss_gradwavefront,
covarfun=make_covar_gradwavefront, **kw)
else:
from types import SimpleNamespace
res = SimpleNamespace()
res.x = [5e-3, 5e-3, 100]
if kw.get('rq', False):
res.x = res.x + [2]
covar = make_covar_gradwavefront(data, res.x, **kw)
dvec = np.concatenate([data['dx'], data['dy']])
if not excludeself:
if predict_at:
# K(X*, X)(K(X, X) + C_n)^-1 y
# Rasmussen & Williams algorithm 2.1
chol, low = scipy.linalg.cho_factor(covar, check_finite=False,
overwrite_a=True)
covarpred = make_covar_gradwavefront_nonoise(
data['x'], data['y'], predict_at[0], predict_at[1],
res.x, **kw)
alpha = scipy.linalg.cho_solve((chol, low), dvec)
turb = np.dot(covarpred, alpha)
xturb, yturb = turb[:len(predict_at[0])], turb[len(predict_at[0]):]
else:
# remove measurement noise contribution to covar
cninv = np.eye(len(dvec))*res.x[0]**(-2)
covar -= np.eye(len(dvec))*res.x[0]**2
cpcninv = np.dot(covar, cninv)
aa = cpcninv+np.eye(len(dvec))
turb = np.linalg.solve(aa, np.dot(cpcninv, dvec))
xturb, yturb = turb[:len(data)], turb[len(data):]
else:
# Rasmussen & Williams 5.12
kinv = np.linalg.inv(covar)
turb = dvec - kinv.dot(dvec)/np.diag(kinv)
xturb, yturb = turb[:len(data)], turb[len(data):]
return xturb, yturb, res
| 5,344,996 |
def str_to_py(value: str):
"""Convert an string value to a native python type."""
rv: Any
if is_boolean_state(value):
rv = get_boolean(value)
elif is_integer(value):
rv = get_integer(value)
elif is_float(value):
rv = get_float(value)
else:
rv = value
return rv
| 5,344,997 |
def action(request: Dict[str, Any]) -> Tuple[str, int]:
"""Triggered from Slack action via an HTTPS endpoint.
Args:
request (dict): Request payload.
"""
if request.method != 'POST':
return 'Only POST requests are accepted', 405
print('Triggered Slack action.')
form = json.loads(request.form.get('payload', ''))
_verify_web_hook(form)
response_url = form.get('response_url')
if not response_url:
return 'No response URL!', 405
action_to_perform = form.get('actions')[0].get('value')
in_office = action_to_perform == 'response_yes'
_set_information(in_office)
today = _now().strftime('%Y-%m-%d')
status_to_response = {
True: f'{DOG_NAME} will be in the office today ({today}). :dog:',
False: f'{DOG_NAME} will not be in the office today ({today}). :no_entry_sign:',
}
response_text = f'Thanks for the response! I noted that {status_to_response[in_office]}'
response = _format_slack_message(response_text)
print(f'Replying with "{response_text}".')
response_obj = requests.post(
response_url,
data=json.dumps(response),
headers={'Content-Type': 'application/json'}
)
print(f'Slack POST request status code: "{response_obj.status_code}".')
return '', 200
| 5,344,998 |
def inmemory():
"""Returns an xodb database backed by an in-memory xapian
database. Does not support spelling correction.
"""
return open(xapian.inmemory_open(), spelling=False, inmem=True)
| 5,344,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.