content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def diff(source: list):
"""
Compute the first-order discrete differences for a 1-dimensional list.
TODO: Support higher orders and dimensions as required.
"""
result = []
for index in range(1, len(source)):
result.append(source[index] - source[index - 1])
return result
| 5,348,800 |
def is_gzip(name):
"""Return True if the name indicates that the file is compressed with
gzip."""
return name.endswith(".gz")
| 5,348,801 |
def _search_on_path(filenames):
"""Find file on system path."""
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52224
from os.path import exists, abspath, join
from os import pathsep, environ
search_path = environ["PATH"]
paths = search_path.split(pathsep)
for path in paths:
for filename in filenames:
if exists(join(path, filename)):
return abspath(join(path, filename))
| 5,348,802 |
def test_two_waypoints() -> None:
"""Fresh world with two waypoints."""
world = World()
world.add_waypoints(
[("station", 48.99420, 8.4003), ("castle", 49.0134, 8.4044)]
)
assert len(world.waypoints) == 2
assert world.graph is None
| 5,348,803 |
def test_boto3_authentication_returns_exception() -> None:
"""
Tests that authentication returns an exception
"""
s3_client = Mock(assume_role=Mock(side_effect=get_client_error("AssumeRole", "AccessDenied", "S3")))
mfa = "273941"
username = "test.user"
role = "testRole"
account = "testAccount"
aws_sts_endpoint = "https://sts.eu-west-2.amazonaws.com"
with pytest.raises(AwsBotoAuthException):
boto3_auth(mfa=mfa, username=username, role=role, account=account,
aws_sts_endpoint=aws_sts_endpoint)
| 5,348,804 |
def test_partial_load__bad_column():
"""Crash if an invalid column name is given."""
stream = io.BytesIO(b"zyxwvut\0\xba\xdc\x0f\xfe\xe1\x5b\xad\x01")
with pytest.raises(ValueError) as errinfo:
BasicStruct.partial_load(stream, "lol")
assert str(errinfo.value) == "BasicStruct doesn't have a field named 'lol'."
| 5,348,805 |
def possible_sums_of(numbers: list) -> list:
"""Compute all possible sums of numbers excluding self."""
possible_sums = []
for idx, nr_0 in enumerate(numbers[:-1]):
for nr_1 in numbers[idx + 1:]:
possible_sums.append(nr_0 + nr_1)
return possible_sums
| 5,348,806 |
def normalize_norms(X, scale_factor=1, axis=0, by='sum'):
""" wrapper of `normalize_colsum` and `normalize_rowsum`
Parameters
----------
X:
a (sparse) matrix
scale_factor: numeric, None
if None, use the median of sum level as the scaling factor.
axis: int, {0, 1}
if axis = 0, apply to each column;
if axis = 1, apply to each row.
by: str, {'sum', 'max'}
normalization method
"""
foo = normalize_col if axis == 0 else normalize_row
return foo(X, scale_factor=scale_factor, by=by)
| 5,348,807 |
def full_solution(combined, prob_dists):
"""
combined: (w, n-1->n-w, 3, 3)
prob_dists: (n, 3, total_reads)
p[v,g,k] = prob of observing k of total_reads on ref if gneotype ig on varaint v
"""
N = len(combined[0])+1
best_idx, best_score = np.empty(N), -np.inf*np.ones(N)
for j, counts in enumerate(combined, 1):
scores = get_scores(counts, prob_dists[:-j])
do_update = scores>best_score[j:]
best_score[j:][do_update] = scores[do_update]
best_idx[j:][do_update] = np.flatnonzero(do_update)
rev_scores = get_scores(counts.swapaxes(-2, -1), prob_dists[j:])
do_update = rev_scores>best_score[:-j]
best_score[:-j][do_update] = rev_scores[do_update]
best_idx[:-j][do_update] = np.flatnonzero(do_update)+j
return best_idx
| 5,348,808 |
def _generate_code_for_client(service: ProtoService, root: ProtoNode,
output: OutputFile) -> None:
"""Outputs client code for an RPC service."""
output.write_line('namespace nanopb {')
class_name = f'{service.cpp_namespace(root)}Client'
output.write_line(f'\nclass {class_name} {{')
output.write_line(' public:')
with output.indent():
output.write_line('template <typename T>')
output.write_line(
f'using NanopbClientCall = {RPC_NAMESPACE}::NanopbClientCall<T>;')
output.write_line('')
output.write_line(f'{class_name}() = delete;')
for method in service.methods():
_generate_code_for_client_method(method, output)
service_name_hash = pw_rpc.ids.calculate(service.proto_path())
output.write_line('\n private:')
with output.indent():
output.write_line(f'// Hash of "{service.proto_path()}".')
output.write_line(
f'static constexpr uint32_t kServiceId = 0x{service_name_hash:08x};'
)
output.write_line('};')
output.write_line('\n} // namespace nanopb\n')
| 5,348,809 |
def download_url(url, filename):
"""Download a file."""
if not os.path.exists(filename):
print('Downloading {} ...'.format(filename))
download_webfile(url, filename)
print('Downloading complete.')
| 5,348,810 |
def ycbcr2bgr(img):
"""Convert a YCbCr image to BGR image.
The bgr version of ycbcr2rgb.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
ndarray: The converted BGR image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img) * 255
out_img = np.matmul(
img,
[
[0.00456621, 0.00456621, 0.00456621],
[0.00791071, -0.00153632, 0],
[0, -0.00318811, 0.00625893],
],
) * 255.0 + [-276.836, 135.576, -222.921]
out_img = _convert_output_type_range(out_img, img_type)
return out_img
| 5,348,811 |
def main(config):
"""Read shapes, plot map
"""
data_path = config['paths']['data']
# data
output_file = os.path.join(config['paths']['figures'], 'network-air-map.png')
air_edge_file = os.path.join(data_path, 'network', 'air_edges.shp')
air_node_file = os.path.join(data_path, 'network', 'air_nodes.shp')
# air_usage_file = os.path.join(data_path, 'usage', 'air_passenger.csv')
# basemap
proj_lat_lon = ccrs.PlateCarree()
ax = get_axes()
plot_basemap(ax, data_path)
scale_bar(ax, location=(0.8, 0.05))
plot_basemap_labels(ax, data_path, include_regions=False)
colors = {
'Air route': '#252525',
'Airport': '#d95f0e'
}
# edges
edges = geopandas.read_file(air_edge_file)
ax.add_geometries(
list(edges.geometry),
crs=proj_lat_lon,
linewidth=1.5,
edgecolor=colors['Air route'],
facecolor='none',
zorder=4
)
# edges merged with usage
# usage = pandas.read_csv(air_usage_file)
# edges_with_usage = edges.merge(usage[['id', 'passengers_2016']], on='id')
# nodes
nodes = geopandas.read_file(air_node_file)
ax.scatter(
list(nodes.geometry.x),
list(nodes.geometry.y),
transform=proj_lat_lon,
facecolor=colors['Airport'],
s=12,
zorder=5
)
# legend
legend_handles = [
mpatches.Patch(color=color, label=label)
for label, color in colors.items()
]
plt.legend(handles=legend_handles, loc='lower left')
# save
save_fig(output_file)
| 5,348,812 |
def tensorize_fg_coeffs(
data,
wgts,
fg_model_comps,
notebook_progressbar=False,
verbose=False,
):
"""Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
"""
echo(
f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n",
verbose=verbose,
)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [
tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks)
]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
# set up linear leastsq
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = data[cnum].shape[1] * data[cnum].shape[2]
nvecs = fg_model_comps[cnum].shape[0]
# pad with zeros
for gnum in range(ngrps):
nonzero_rows = np.where(
np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1)
)[0]
if len(nonzero_rows) > 0:
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
# solve linear leastsq
fg_coeff_chunk.append(
tf.reshape(
tf.linalg.lstsq(
tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero],
tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)),
),
(nvecs_nonzero,),
)
)
# pad zeros at the end back up to nvecs.
fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)])
# add two additional dummy indices to satify broadcasting rules.
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(
f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n",
verbose=verbose,
)
return fg_coeffs
| 5,348,813 |
def main() -> None:
"""Execute the main routine."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--operation_dir",
help="path to the directory where temporary test files are stored; "
"if not specified, uses mkdtemp()")
args = parser.parse_args()
params = Params(args=args)
cases_dir = tests.path.REPO_DIR / 'test_cases'
##
# Collect test cases
##
schema_pths = sorted(
list(cases_dir.glob("general/**/schema.json")) +
list(cases_dir.glob("py/**/schema.json")) +
list(cases_dir.glob("docs/**/schema.json")))
# yapf: disable
cases = [
Case(
schema_path=schema_pth,
example_paths=sorted(schema_pth.parent.glob("example_*.json")),
rel_path=schema_pth.parent.relative_to(cases_dir))
for schema_pth in schema_pths
]
# yapf: enable
with temppathlib.TmpDirIfNecessary(
path=params.operation_dir) as base_operation_dir:
##
# Execute the test cases
##
src_dir = base_operation_dir.path / "src"
src_dir.mkdir(exist_ok=True)
for case in cases:
execute_case(case=case, case_src_dir=src_dir / case.rel_path)
| 5,348,814 |
def test_get_serializer_class():
"""
The view should use the serializer defined in the app's settings.
"""
view = views.UserCreateView()
expected = app_settings.USER_SERIALIZER
assert view.get_serializer_class() == expected
| 5,348,815 |
def receive_incoming_bets():
"""
Sends fixtures to the front-end
"""
return fixtures.fixtures_information
| 5,348,816 |
def fibonacci(n):
"""
object: fibonacci(n) returns the first n Fibonacci numbers in a list
input: n- the number used to calculate the fibonacci list
return: retList- the fibonacci list
"""
if type(n) != int:
print(n)
print(":input not an integer")
return False
if n <= 0:
print(str(n)+"not a postive integer")
return False
f1=1
f2=1
retList=[]
for i in range (0,n):
retList.append(f1)
fn=f1+f2
f1=f2
f2=fn
return retList
| 5,348,817 |
def psnr_batch(_mse_batch_val):
"""
:param _mse_val_each: ndarray
:return: ndarray
Usage:
1) The Bath is deal with channel.
Thus, it is recommended to call mse_batch function before the psnr_batch function.
2) cumsum_psnr_rgb += (metric_.psnr_batch(_mse_batch_val=(metric_.mse_batch(_ndarr_input=imgcv_.batch2channel(_ndarr=ndarr_input),
_ndarr_ref=imgcv_.batch2channel(_ndarr=ndarr_ref), _num_colr_channel=3)))).sum()
"""
return (10 * np.log10((255.0 ** 2) / _mse_batch_val))
| 5,348,818 |
def get_u0(u0_type, num_features):
"""return a polyhedral definition for U^0, B_mat and b_vec"""
assert u0_type in ["box", "positive_normed"]
if u0_type == "box":
B_mat, b_vec = U0_box(num_features)
if u0_type == "positive_normed":
B_mat, b_vec = U0_positive_normed(num_features)
return B_mat, b_vec
| 5,348,819 |
def funcparser_callable_search_list(*args, caller=None, access="control", **kwargs):
"""
Usage: $objlist(#123)
Legacy alias for search with a return_list=True kwarg preset.
"""
return funcparser_callable_search(*args, caller=caller, access=access,
return_list=True, **kwargs)
| 5,348,820 |
def upcoming_movie_name(soup):
"""
Extracts the list of movies from BeautifulSoup object.
:param soup: BeautifulSoup object containing the html content.
:return: list of movie names
"""
movie_names = []
movie_name_tag = soup.find_all('h4')
for _movie in movie_name_tag:
_movie_result = _movie.find_all('a')
try:
_movie_name = _movie_result[0]['title']
movie_names.append(_movie_name)
except KeyError as e:
continue
return movie_names
| 5,348,821 |
def options(*args, **kw):
"""Mark the decorated function as a handler for OPTIONS requests."""
return _make_handler_decorator('OPTIONS', *args, **kw)
| 5,348,822 |
def lookup_vendor_name(mac_address):
"""
Translates the returned mac-address to a vendor
"""
url = "http://macvendors.co/api/%s" % mac_address
request = urllib2.Request(url, headers={'User-Agent': "API Browser"})
try:
response = urllib2.urlopen(request)
reader = codecs.getreader("utf-8")
obj = json.load(reader(response))
response.close()
return obj['result']['company']
except urllib2.URLError:
return "Unable to lookup MAC address"
except KeyError:
return "MAC lookup API changed"
| 5,348,823 |
def train_on(text):
""" Return a dictionary whose keys are alle the tuple of len PREFIX
of consecutive words inside text, and whose value is the list of
every single word which follows that tuple inside the text. For ex:
{('Happy', 'birthday'): ['to', 'dear'] ...} """
words = text.split()
assert len(words) > PREFIX
training = defaultdict(list)
for i in range(0, len(words) - PREFIX):
duo = tuple(words[i:i + PREFIX])
following = words[i + PREFIX]
training[duo].append(following)
return training
| 5,348,824 |
def learning_rate_decay(alpha, decay_rate, global_step, decay_step):
"""learning_rate_decay: updates the learning rate using
inverse time decay in numpy
Args:
alpha : is the original learning rate
decay_rate : is the weight used to determine the
rate at which alpha will decay
global_step : is the number of passes of gradient
descent that have elapsed
decay_step : is the number of passes of gradient descent
that should occur before alpha is decayed further
Returns:
the updated value for alpha
"""
alpha = alpha / (1 + decay_rate * int(global_step / decay_step))
return alpha
| 5,348,825 |
def update_studio(request):
"""updates the studio
"""
studio_id = request.params.get('studio_id')
studio = Studio.query.filter_by(id=studio_id).first()
name = request.params.get('name', None)
dwh = request.params.get('dwh', None)
wh_mon_start = get_time(request, 'mon_start')
wh_mon_end = get_time(request, 'mon_end')
wh_tue_start = get_time(request, 'tue_start')
wh_tue_end = get_time(request, 'tue_end')
wh_wed_start = get_time(request, 'wed_start')
wh_wed_end = get_time(request, 'wed_end')
wh_thu_start = get_time(request, 'thu_start')
wh_thu_end = get_time(request, 'thu_end')
wh_fri_start = get_time(request, 'fri_start')
wh_fri_end = get_time(request, 'fri_end')
wh_sat_start = get_time(request, 'sat_start')
wh_sat_end = get_time(request, 'sat_end')
wh_sun_start = get_time(request, 'sun_start')
wh_sun_end = get_time(request, 'sun_end')
if studio and name and dwh:
# update new studio
studio.name = name
studio.daily_working_hours = int(dwh)
wh = WorkingHours()
def set_wh_for_day(day, start, end):
if start != end:
wh[day] = [[start.seconds/60, end.seconds/60]]
else:
wh[day] = []
set_wh_for_day('mon', wh_mon_start, wh_mon_end)
set_wh_for_day('tue', wh_tue_start, wh_tue_end)
set_wh_for_day('wed', wh_wed_start, wh_wed_end)
set_wh_for_day('thu', wh_thu_start, wh_thu_end)
set_wh_for_day('fri', wh_fri_start, wh_fri_end)
set_wh_for_day('sat', wh_sat_start, wh_sat_end)
set_wh_for_day('sun', wh_sun_start, wh_sun_end)
studio.working_hours = wh
DBSession.add(studio)
# Commit will be handled by the zope transaction extension
return HTTPOk()
| 5,348,826 |
def to_json(data, fpath):
"""
Save a dict as JSON.
"""
with open(fpath, 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
f.write('\n')
| 5,348,827 |
def flip_coin(num_of_experiments = 1000, num_of_flips = 30):
"""
Flip the coin `num_of_flips` times and repeat this experiment `num_of_experiments` times. And
return the number of heads grouped together in all the experiments.
"""
all_heads = []
for i in range(num_of_experiments):
heads = tails = 0
for counter in range(num_of_flips):
num = np.random.randint(0,2)
if num == 0:
heads += 1
else:
tails += 1
all_heads.append(heads)
# group the number of heads in all the experiments
flip_heads = []
for flip in range(num_of_flips + 1):
num_of_heads = 0
for h in all_heads:
if h == flip:
num_of_heads += 1
flip_heads.append(num_of_heads)
return flip_heads
| 5,348,828 |
def get_questions(
path: str,
uid2idx: dict = None,
path_data: Path = None,
) -> po.DataFrame:
"""
Identify correct answer text and filter out wrong distractors from question string
Get tokens and lemmas
Get explanation sentence ids and roles
"""
# Dropping questions without explanations hurts score
df = po.read_csv(path, sep="\t")
df = add_q_reformat(df)
# Preprocess texts
tokens, lemmas = preprocess_texts(df.q_reformat.tolist(), path_data)
df["tokens"], df["lemmas"], df["embedding"] = tokens, lemmas, None
# Get explanation uids and roles
exp_uids = []
exp_roles = []
exp_idxs = []
for exp_string in df.explanation.values:
_uids, _roles = extract_explanation(exp_string)
uids = []
roles = []
idxs = []
assert len(_uids) == len(_roles)
for i in range(len(_uids)):
if _uids[i] not in uid2idx:
continue
uids.append(_uids[i])
roles.append(_roles[i])
idxs.append(uid2idx[_uids[i]])
exp_uids.append(uids)
exp_roles.append(roles)
exp_idxs.append(idxs)
df["exp_uids"], df["exp_roles"], df[
"exp_idxs"] = exp_uids, exp_roles, exp_idxs
print(df.shape)
return df
| 5,348,829 |
def simple_command(device, cmd_id, data=None, receive=True):
"""
Raises:
HIDException -> if reading/writing to the USB device failed:
KBProtocolException -> if the packet is too large
"""
cmd_packet = bytearray(EP_VENDOR_SIZE)
cmd_packet[0] = cmd_id
# Optional data component
if data != None:
data = bytearray(data)
if len(data) > (EP_VENDOR_SIZE-1):
raise KBProtocolException("Data can't fit in one packet. Got {} "
"bytes, max is {}".format(len(data), EP_VENDOR_SIZE))
for i, byte in enumerate(data):
cmd_packet[i+1] = byte
device.write(cmd_packet)
if receive:
response = device.read()
packet_type = response[0]
while packet_type != cmd_id and packet_type != CMD_ERROR_CODE: # ignore other packets
response = device.read(timeout=2)
if response == None:
device.write(cmd_packet)
else:
packet_type = response[0]
if response[0] == CMD_ERROR_CODE:
raise_error_code(response[1])
elif response[0] != cmd_id:
raise KBProtocolException("Unexpected packet with packet_id: {}"
.format(response[0]))
return response[1:]
else:
return None
| 5,348,830 |
def dbopen(dbname, perm = 'r'):
"""Open a Datascope database"""
return Dbptr(dbname, perm)
| 5,348,831 |
def create(haproxy_lua_svc: HaproxyLuaFacade, **kwargs):
"""
Create a new lua
"""
json_payload = {
'lua': {
"enabled": kwargs['enabled'],
"name": kwargs['name'],
"description": kwargs['description'],
"content": kwargs['content'],
}
}
result = haproxy_lua_svc.create_lua(json_payload)
CliOutputFormatter(result, kwargs['output'], kwargs['cols'].split(",")).echo()
| 5,348,832 |
def to_nx(dsk):
"""
Code mainly identical to dask.dot.to_graphviz and kept compatible.
"""
collapse_outputs = False
verbose = False
data_attributes = {}
function_attributes = {}
g = nx.DiGraph()
seen = set()
connected = set()
for k, v in dsk.items():
k_name = name(k)
if istask(v):
func_name = name((k, "function")) if not collapse_outputs else k_name
if collapse_outputs or func_name not in seen:
seen.add(func_name)
attrs = function_attributes.get(k, {}).copy()
attrs.setdefault("label", key_split(k))
attrs.setdefault("shape", "circle")
g.add_node(func_name, **attrs)
if not collapse_outputs:
g.add_edge(func_name, k_name)
connected.add(func_name)
connected.add(k_name)
for dep in get_dependencies(dsk, k):
dep_name = name(dep)
if dep_name not in seen:
seen.add(dep_name)
attrs = data_attributes.get(dep, {}).copy()
attrs.setdefault("label", box_label(dep, verbose))
attrs.setdefault("shape", "box")
g.add_node(dep_name, **attrs)
g.add_edge(dep_name, func_name)
connected.add(dep_name)
connected.add(func_name)
elif ishashable(v) and v in dsk:
v_name = name(v)
g.add_edge(v_name, k_name)
connected.add(v_name)
connected.add(k_name)
if (not collapse_outputs or k_name in connected) and k_name not in seen:
seen.add(k_name)
attrs = data_attributes.get(k, {}).copy()
attrs.setdefault("label", box_label(k, verbose))
attrs.setdefault("shape", "box")
g.add_node(k_name, **attrs)
assert nx.dag.is_directed_acyclic_graph(g)
return g
| 5,348,833 |
def predict(model, img_base64):
"""
Returns the prediction for a given image.
Params:
model: the neural network (classifier).
"""
return model.predict_disease(img_base64)
| 5,348,834 |
def get_hash(x: str):
"""Generate a hash from a string."""
h = hashlib.md5(x.encode())
return h.hexdigest()
| 5,348,835 |
def pair_force(r1, r2, par1, par2, sigma_c, box, r_cut, lj=True, coulomb=True):
"""Compute the sum of the Lennard Jones force and the short ranged part
of the Coulomb force between two particles.
Arguments:
r1 (ndarray): A one dimensional numpy-array with d elements (position of the first particle)
r2 (ndarray): A one dimensional numpy-array with d elements (position of the second particle)
par1 (ndarray): A one dimensional numpy-array with 4 elements (charge, epsillon, sigma, mass) for the first particle
par2 (ndarray): A one dimensional numpy-array with 4 elements (charge, epsillon, sigma, mass) for the second particle
sigma_c (float): A positive float (width of the gaussian distribution used to shield the particle)
box (ndarray): A one dimensional numpy-array with d elements (size of preriodic box)
r_cut (float): A positive float (cutoff radius)
lj (boolean): If True the Lannard Jones force is calculated
coulomb (boolean): If True the Coulomb force is calculated
Returns:
force * direction (ndarray): A one dimensional numpy-array with d elements (force acting on the first particle)
"""
dist = pbc(r1 - r2, box)
r12 = np.linalg.norm(dist)
force = 0
if r12 <= r_cut:
if lj:
epsilon = calc_eps(par1[1], par2[1])
sigma_lj = calc_sig(par2[2], par2[2])
rs = sigma_lj / r12
force += 24 * epsilon / r12 * (2 * rs**12 - rs**6)
if coulomb:
q1 = par1[0]
q2 = par2[0]
f1 = erfc(r12 / (np.sqrt(2) * sigma_c)) / r12
f2 = np.sqrt(2 / np.pi) / sigma_c * np.exp(- r12**2 / (2 * sigma_c**2))
force += q1 * q2 / (4 * np.pi * eps * r12) * (f1 + f2)
direction = dist / r12
return force * direction
| 5,348,836 |
def delete_network_acl_entry(client, network_acl_id, num=100, egress=False, dry=True):
"""
Delete a network acl entry
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.delete_network_acl_entry
"""
try:
response = client.delete_network_acl_entry( Egress=egress, NetworkAclId=network_acl_id, RuleNumber=num, DryRun=dry)
print('Deleted %s %s' % (network_acl_id, ('(dry)' if dry else '')))
return response
except Exception as err:
handle(err)
| 5,348,837 |
def mp_run(data, process_num, func, *args):
""" run func with multi process
"""
level_start = time.time()
partn = max(len(data) / process_num, 1)
start = 0
p_idx = 0
ps = []
while start < len(data):
local_data = data[start:start + partn]
start += partn
p = mp.Process(target=func, args=(local_data, p_idx) + args)
ps.append(p)
p.start()
p_idx += 1
for p in ps:
p.join()
for p in ps:
p.terminate()
return p_idx
| 5,348,838 |
def _write_heat_cool_to_b18(list_dict, old_new_names, zone, b18_lines, string):
"""
Args:
list_dict:
old_new_names:
zone:
b18_lines:
string:
"""
for key in list_dict.keys():
if old_new_names[zone.Name.upper()][0] in key:
f_count = checkStr(b18_lines, "Z o n e " + zone.Name)
regimeNum = checkStr(b18_lines, "REGIME", f_count)
# Write
if not isinstance(list_dict[key], list):
value = list_dict[key]
else:
value = list_dict[key][0]
b18_lines.insert(regimeNum, string + " = " + value + "\n")
| 5,348,839 |
def test_list_token_min_length_3_nistxml_sv_iv_list_token_min_length_4_4(mode, save_output, output_format):
"""
Type list/token is restricted by facet minLength with value 8.
"""
assert_bindings(
schema="nistData/list/token/Schema+Instance/NISTSchema-SV-IV-list-token-minLength-4.xsd",
instance="nistData/list/token/Schema+Instance/NISTXML-SV-IV-list-token-minLength-4-4.xml",
class_name="NistschemaSvIvListTokenMinLength4",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 5,348,840 |
def spell(corpus):
"""
Train a Spelling Normalizer
Parameters
----------
corpus : list of strings. Prefer to feed with malaya.load_malay_dictionary().
Returns
-------
SPELL_NORMALIZE: Trained malaya.normalizer._SPELL_NORMALIZE class
"""
if not isinstance(corpus, list):
raise ValueError('corpus must be a list')
if not isinstance(corpus[0], str):
raise ValueError('corpus must be list of strings')
return _SPELL_NORMALIZE([unidecode(w) for w in corpus])
| 5,348,841 |
def check_PA_vector(angle_list, unit='deg'):
""" Checks if the angle list has the right format to avoid any bug in the
pca-adi algorithm. The right format complies to 3 criteria:
1) angles are expressed in degree
2) the angles are positive
3) there is no jump of more than 180 deg between consecutive values (e.g.
no jump like [350deg,355deg,0deg,5deg] => replaced by
[350deg,355deg,360deg,365deg])
Parameters
----------
angle_list: 1D-array_like
Vector containing the derotation angles
unit: string, {'deg','rad'}, optional
The unit type of the input angle list
Returns
-------
angle_list: 1-D array_like
Vector containing the derotation angles (after correction to comply with
the 3 criteria, if needed)
"""
angle_list = angle_list.copy()
if unit != 'rad' and unit != 'deg':
raise ValueError("The input unit should either be 'deg' or 'rad'")
npa = angle_list.shape[0]
for ii in range(npa):
if unit == 'rad':
angle_list[ii] = np.rad2deg(angle_list[ii])
if angle_list[ii] < 0:
angle_list[ii] = 360+angle_list[ii]
correct = False
sorted_rot = np.sort(angle_list)
# Check if there is a jump > 180deg within the angle list
for ii in range(npa-1):
if abs(sorted_rot[ii+1]-sorted_rot[ii]) > 180:
correct = True
break
# In the previous case, correct for it by adding 360deg to angles < 180deg
if correct:
for ii in range(npa):
if angle_list[ii] < 180:
angle_list[ii] = 360+angle_list[ii]
return angle_list
| 5,348,842 |
def client_id_to_org_type_id(client_id):
"""
Client ID should be a string: "g:" + self._options['org'] + ":" +
self._options['type'] + ":" + self._options['id'],
"""
split = client_id.split(':')
if len(split) != 4:
raise InvalidClientId()
org = split[1]
device_type = split[2]
device_id = split[3]
return (org, device_type, device_id)
| 5,348,843 |
def get_total_implements():
"""Obtiene los implementos totales solicitados en prestamos."""
total_implements = 0
for i in Loans.objects.all():
total_implements += i.ammount_implements
return total_implements
| 5,348,844 |
def add_menu(installdir: Path, name: Text) -> None:
"""Add a submenu to 'VNM' menu.
Parameters
----------
name : Text
The name of the submenu.
"""
logging.info(f"Adding submenu for '{name}'")
icon_path = installdir/f"icons/{name.split()[0]}.png"
icon_src = (Path(__file__).parent/'icons'/icon_path.name)
try:
shutil.copy2(icon_src, icon_path)
except FileNotFoundError:
logging.warning(f'{icon_src} not found')
icon_src = (Path(__file__).parent/'icons/vnm.png')
shutil.copy2(icon_src, icon_path)
# Generate `.directory` file
entry = configparser.ConfigParser()
entry.optionxform = str
entry["Desktop Entry"] = {
"Name": name,
"Comment": name,
"Icon": icon_path,
"Type": "Directory",
}
directories_path = installdir/"desktop-directories"
if not os.path.exists(directories_path):
os.makedirs(directories_path)
directory_name = f"vnm-{name.lower().replace(' ', '-')}.directory"
with open(Path(f"{directories_path}/{directory_name}"), "w",) as directory_file:
entry.write(directory_file, space_around_delimiters=False)
# Add entry to `.menu` file
menu_path = installdir/"vnm-applications.menu"
with open(menu_path, "r") as xml_file:
s = xml_file.read()
s = re.sub(r"\s+(?=<)", "", s)
root = et.fromstring(s)
menu_el = root.findall("./Menu")[0]
sub_el = et.SubElement(menu_el, "Menu")
name_el = et.SubElement(sub_el, "Name")
name_el.text = name.capitalize()
dir_el = et.SubElement(sub_el, "Directory")
dir_el.text = f'vnm/{directory_name}'
include_el = et.SubElement(sub_el, "Include")
and_el = et.SubElement(include_el, "And")
cat_el = et.SubElement(and_el, "Category")
cat_el.text = name.replace(" ", "-")
cat_el.text = f"vnm-{cat_el.text}"
xmlstr = minidom.parseString(et.tostring(root)).toprettyxml(indent="\t")
with open(menu_path, "w") as f:
f.write('<!DOCTYPE Menu PUBLIC "-//freedesktop//DTD Menu 1.0//EN"\n ')
f.write('"http://www.freedesktop.org/standards/menu-spec/1.0/menu.dtd">\n\n')
f.write(xmlstr[xmlstr.find("?>") + 3 :])
os.chmod(menu_path, 0o644)
| 5,348,845 |
def _parse_maybe_array(
type_name: str, innermost_type: Optional[Union[ast_nodes.ValueType,
ast_nodes.PointerType]]
) -> Union[ast_nodes.ValueType, ast_nodes.PointerType, ast_nodes.ArrayType]:
"""Internal-only helper that parses a type that may be an array type."""
array_match = ARRAY_EXTENTS_PATTERN.search(type_name)
if array_match:
extents = tuple(
int(s.strip()) for s in ARRAY_N_PATTERN.findall(array_match.group(0)))
inner_type_str = type_name[:array_match.start()]
return ast_nodes.ArrayType(
inner_type=_parse_maybe_pointer(inner_type_str.strip(), innermost_type),
extents=extents)
else:
return _parse_maybe_pointer(type_name, innermost_type)
| 5,348,846 |
def ema_decay_schedule(
base_rate: jnp.ndarray,
step: jnp.ndarray,
total_steps: jnp.ndarray,
use_schedule: bool,
) -> jnp.ndarray:
"""Anneals decay rate to 1 with cosine schedule."""
if not use_schedule:
return base_rate
multiplier = _cosine_decay(step, total_steps, 1.)
return 1. - (1. - base_rate) * multiplier
| 5,348,847 |
def _kill_filter(mm: MergedMiningCoordinator, filter_fn: Callable[[MergedMiningStratumProtocol], bool]) -> int:
""" Kill all workers that the filter `fltr` returns true for.
"""
count = 0
for protocol in filter(filter_fn, mm.miner_protocols.values()):
count += 1
protocol.transport.abort()
return count
| 5,348,848 |
def get_node_layer_sort_preference(device_role):
"""Layer priority selection function
Layer sort preference is designed as numeric value.
This function identifies it by LAYERS_SORT_ORDER
object position by default. With numeric values,
the logic may be improved without changes on NeXt app side.
0(null) results undefined layer position in NeXt UI.
Valid indexes start with 1.
"""
for i, role in enumerate(LAYERS_SORT_ORDER, start=1):
if device_role == role:
return i
return 1
| 5,348,849 |
def test_predictive_evaluation_0(model, test_interactions_ds):
"""Evaluation without counting None predictions."""
assert predictive_evaluation(model, test_interactions_ds, count_none_predictions=False,
n_test_predictions=None, skip_errors=True) == {'MSE': 0.6667, 'RMSE': 0.8165}
| 5,348,850 |
def is_anagram(s,t):
"""True if strings s and t are anagrams.
"""
# We can use sorted() on a string, which will give a list of characters
# == will then compare two lists of characters, now sorted.
return sorted(s)==sorted(t)
| 5,348,851 |
def read_dictionary(set_permutations):
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
global word_dict
with open('dictionary.txt') as f:
for line in f:
line = line.strip()
word_dict += line.split(',')
count = 0
for ans in set_permutations:
if ans.lower() in word_dict:
count += 1
print(f'Found "{ans}"')
print(f'There are {count} words in total.')
| 5,348,852 |
def f1_score(labels, predict, name=None):
"""
Streaming f1 score.
"""
predictions = tf.floor(predict + 0.5)
with tf.variable_scope(name, 'f1', (labels, predictions)):
epsilon = 1e-7
_, tp = tf.metrics.true_positives(labels, predictions)
_, fn = tf.metrics.false_negatives(labels, predictions)
_, fp = tf.metrics.false_positives(labels, predictions)
precision = tf.div(tp, epsilon + tp + fp, name='precision')
recall = tf.div(tp, epsilon + tp + fn, name='recall')
f1 = 2.0 * precision * recall / (precision + recall + epsilon)
return f1
| 5,348,853 |
async def test_turn_on_with_brightness(mock_send, hass):
"""Test the light turns on to the specified brightness."""
await hass.async_block_till_done()
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_1_LIGHT, ATTR_BRIGHTNESS: 60},
blocking=True,
)
state = hass.states.get(ENTITY_1_LIGHT)
mock_send.assert_called_with(
b"SZ00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x01\x03\x01\xcf\x17"
)
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 60
| 5,348,854 |
def test_notify_matrix_plugin_fetch(mock_post, mock_get):
"""
API: NotifyMatrix() Server Fetch/API Tests
"""
# Disable Throttling to speed testing
plugins.NotifyBase.request_rate_per_sec = 0
response_obj = {
'room_id': '!abc123:localhost',
'room_alias': '#abc123:localhost',
'joined_rooms': ['!abc123:localhost', '!def456:localhost'],
# Login details
'access_token': 'abcd1234',
'user_id': '@apprise:localhost',
'home_server': 'localhost',
}
def fetch_failed(url, *args, **kwargs):
# Default configuration
request = mock.Mock()
request.status_code = requests.codes.ok
request.content = dumps(response_obj)
if url.find('/rooms/') > -1:
# over-ride on room query
request.status_code = 403
request.content = dumps({
u'errcode': u'M_UNKNOWN',
u'error': u'Internal server error',
})
return request
mock_get.side_effect = fetch_failed
mock_post.side_effect = fetch_failed
obj = plugins.NotifyMatrix(
user='user', password='passwd', include_image=True)
assert isinstance(obj, plugins.NotifyMatrix) is True
# We would hve failed to send our image notification
assert obj.send(user='test', password='passwd', body="test") is False
# Do the same query with no images to fetch
asset = AppriseAsset(image_path_mask=False, image_url_mask=False)
obj = plugins.NotifyMatrix(user='user', password='passwd', asset=asset)
assert isinstance(obj, plugins.NotifyMatrix) is True
# We would hve failed to send our notification
assert obj.send(user='test', password='passwd', body="test") is False
# Disable Throttling to speed testing
plugins.NotifyBase.request_rate_per_sec = 0
response_obj = {
# Registration
'access_token': 'abcd1234',
'user_id': '@apprise:localhost',
'home_server': 'localhost',
# For room joining
'room_id': '!abc123:localhost',
}
# Default configuration
mock_get.side_effect = None
mock_post.side_effect = None
request = mock.Mock()
request.status_code = requests.codes.ok
request.content = dumps(response_obj)
mock_post.return_value = request
mock_get.return_value = request
obj = plugins.NotifyMatrix(include_image=True)
assert isinstance(obj, plugins.NotifyMatrix) is True
assert obj.access_token is None
assert obj._register() is True
assert obj.access_token is not None
# Cause retries
request.status_code = 429
request.content = dumps({
'retry_after_ms': 1,
})
code, response = obj._fetch('/retry/apprise/unit/test')
assert code is False
request.content = dumps({
'error': {
'retry_after_ms': 1,
}
})
code, response = obj._fetch('/retry/apprise/unit/test')
assert code is False
request.content = dumps({
'error': {}
})
code, response = obj._fetch('/retry/apprise/unit/test')
assert code is False
| 5,348,855 |
def remove_quotes(fin, fout):
"""
Remove quotes in lines.
If a line has odd number quotes, remove all quotes in this line.
"""
fin = open(fin)
fout = open(fout, "w")
for line in fin:
fout.write(line.replace("\"", ""))
fin.close()
fout.close()
| 5,348,856 |
def site_summary_data(query, notime=True, extra="(1=1)"):
"""
Summary of jobs in different states for errors page to indicate if the errors caused by massive site failures or not
"""
summary = []
summaryResources = []
# remove jobstatus from the query
if 'jobstatus__in' in query:
del query['jobstatus__in']
# remove the time window limit for active jobs table
querynotime = copy.deepcopy(query)
if notime:
if 'modificationtime__castdate__range' in querynotime:
del querynotime['modificationtime__castdate__range']
ejquery = {'jobstatus__in': ['failed', 'finished', 'closed', 'cancelled']}
jvalues = ('cloud', 'computingsite', 'jobstatus', 'resourcetype', 'corecount')
orderby = ('cloud', 'computingsite', 'jobstatus')
summaryResources.extend(
Jobsactive4.objects.filter(**querynotime).exclude(**ejquery).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby))
summaryResources.extend(
Jobsactive4.objects.filter(**query).filter(**ejquery).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby))
summaryResources.extend(
Jobsdefined4.objects.filter(**querynotime).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby))
summaryResources.extend(
Jobswaiting4.objects.filter(**querynotime).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby))
summaryResources.extend(
Jobsarchived4.objects.filter(**query).values(*jvalues).extra(where=[extra]).annotate(Count('jobstatus')).order_by(*orderby))
summaryResourcesDict = {}
actualcorecount = 0
for sumS in summaryResources:
if sumS['corecount'] is None:
actualcorecount = 1
else:
actualcorecount = sumS['corecount']
if sumS['cloud'] not in summaryResourcesDict:
summaryResourcesDict[sumS['cloud']] = {}
if sumS['computingsite'] not in summaryResourcesDict[sumS['cloud']]:
summaryResourcesDict[sumS['cloud']][sumS['computingsite']] = {}
if sumS['jobstatus'] not in summaryResourcesDict[sumS['cloud']][sumS['computingsite']]:
summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']] = {}
if sumS['resourcetype'] not in summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']]:
summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']][sumS['resourcetype']] = {
'jobstatus__count': 0,
'corecount': actualcorecount
}
summaryResourcesDict[sumS['cloud']][sumS['computingsite']][sumS['jobstatus']][sumS['resourcetype']]['jobstatus__count'] += sumS['jobstatus__count']
summaryList = []
obj = {}
for cloud in summaryResourcesDict.keys():
for site in summaryResourcesDict[cloud].keys():
for jobstatus in summaryResourcesDict[cloud][site].keys():
jobscount =0
obj['resource'] = {}
for i, resource in enumerate(summaryResourcesDict[cloud][site][jobstatus]):
if resource not in obj['resource']:
obj['resource'][resource] = {}
obj['resource'][resource]['jobstatus__count'] = {}
if resource not in obj['resource']:
obj['resource'][resource] = {}
obj['resource'][resource]['corecount'] = {}
obj['resource'][resource]['jobstatus__count'] = summaryResourcesDict[cloud][site][jobstatus][resource]['jobstatus__count']
obj['resource'][resource]['corecount'] = summaryResourcesDict[cloud][site][jobstatus][resource]['corecount']
jobscount += summaryResourcesDict[cloud][site][jobstatus][resource]['jobstatus__count']
if i == len(summaryResourcesDict[cloud][site][jobstatus]) - 1:
obj['cloud'] = cloud
obj['computingsite'] = site
obj['jobstatus'] = jobstatus
obj['jobstatus__count'] = jobscount
summaryList.append(obj)
obj = {}
return summaryList
| 5,348,857 |
def test_multiout_st(tmpdir):
""" testing a simple function that returns a numpy array, adding splitter"""
wf = Workflow("wf", input_spec=["val"], val=[0, 1, 2])
wf.add(arrayout(name="mo", val=wf.lzin.val))
wf.mo.split("val").combine("val")
wf.set_output([("array", wf.mo.lzout.b)])
wf.cache_dir = tmpdir
with Submitter(plugin="cf", n_procs=2) as sub:
sub(runnable=wf)
results = wf.result(return_inputs=True)
assert results[0] == {"wf.val": [0, 1, 2]}
for el in range(3):
assert np.array_equal(results[1].output.array[el], np.array([el, el]))
| 5,348,858 |
def load_solr(csv_file, solr_url):
"""
Load CSV file into Solr. solr_params are a dictionary of parameters
sent to solr on the index request.
"""
file_path = os.path.abspath(csv_file)
solr_params = {}
for fieldname in MULTIVALUE_FIELDNAMES:
tag_split = "f.%s.split" % fieldname
solr_params[tag_split] = 'true'
tag_separator = "f.%s.separator" % fieldname
solr_params[tag_separator] = '|'
solr_params['stream.file'] = file_path
solr_params['commit'] = 'true'
params = urllib.urlencode(solr_params)
print "Loading records into Solr ..."
try:
output = urllib.urlopen(solr_url % params)
except IOError:
raise IOError, 'Unable to connect to the Solr instance.'
print "Solr response:\n"
print output.read()
| 5,348,859 |
def Epsilon(u):
"""Vector symmetric gradient."""
return Sym(Grad(u.transpose()))
| 5,348,860 |
def create_section_and_PCA(data: ML_prepare, labled: bool = False):
"""
Creates PCA for every section (organism group) of the data:
"all", "filaments", "total_counts", "various".
Using helper function "pca_plot".
Plots by the "y", results, whether labeled or not.
Parameters
----------
data: ML_prepare
labled: bool
"""
section_lst = ["all", "filaments", "total_counts", "various"]
fig, ax = plt.subplots(4, 2)
for i in range(len(section_lst)):
table_xy = data.get_partial_table(x_section=section_lst[i], y_labels=labled)
y_cols = table_xy.loc[:, "y"].columns.tolist()
for j in range(2):
### model on y = y_cols[j]
pca_plot(
table_xy, color_col=y_cols[j], section=section_lst[i], ax_i=ax[i, j]
)
fig.set_figheight(15)
fig.set_figwidth(15)
fig.suptitle(
f"PCA of groups, colored by output, delay = {data.delay} days",
fontsize=20,
y=1.02,
)
plt.tight_layout()
fig_name = "PCA_by_groups"
if labled:
fig_name = fig_name + "_labled"
plt.tight_layout()
fig.savefig("figures/" + fig_name + ".png", dpi=150, bbox_inches="tight")
plt.show()
| 5,348,861 |
def at(seq, msg, cmd=None, *args, **kwargs):
"""Output the comwdg"""
return translator(seq)(*COMWDG_CMD)()
| 5,348,862 |
def get_directory(f):
"""Get a directory in the form of a list of entries."""
entries = []
while 1:
line = f.readline()
if not line:
print '(Unexpected EOF from server)'
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] in CRLF:
line = line[:-1]
if line == '.':
break
if not line:
print '(Empty line from server)'
continue
gtype = line[0]
parts = line[1:].split(TAB)
if len(parts) < 4:
print '(Bad line from server: %r)' % (line,)
continue
if len(parts) > 4:
if parts[4:] != ['+']:
print '(Extra info from server:',
print parts[4:], ')'
else:
parts.append('')
parts.insert(0, gtype)
entries.append(parts)
return entries
| 5,348,863 |
def pytest_configure(config):
"""pytest hook: Perform custom initialization at startup time.
Args:
config: The pytest configuration.
Returns:
Nothing.
"""
global log
global console
global ubconfig
test_py_dir = os.path.dirname(os.path.abspath(__file__))
source_dir = os.path.dirname(os.path.dirname(test_py_dir))
board_type = config.getoption('board_type')
board_type_filename = board_type.replace('-', '_')
board_identity = config.getoption('board_identity')
board_identity_filename = board_identity.replace('-', '_')
build_dir = config.getoption('build_dir')
if not build_dir:
build_dir = source_dir + '/build-' + board_type
mkdir_p(build_dir)
result_dir = config.getoption('result_dir')
if not result_dir:
result_dir = build_dir
mkdir_p(result_dir)
persistent_data_dir = config.getoption('persistent_data_dir')
if not persistent_data_dir:
persistent_data_dir = build_dir + '/persistent-data'
mkdir_p(persistent_data_dir)
gdbserver = config.getoption('gdbserver')
if gdbserver and board_type != 'sandbox':
raise Exception('--gdbserver only supported with sandbox')
import multiplexed_log
log = multiplexed_log.Logfile(result_dir + '/test-log.html')
if config.getoption('build'):
if build_dir != source_dir:
o_opt = 'O=%s' % build_dir
else:
o_opt = ''
cmds = (
['make', o_opt, '-s', board_type + '_defconfig'],
['make', o_opt, '-s', '-j8'],
)
with log.section('make'):
runner = log.get_runner('make', sys.stdout)
for cmd in cmds:
runner.run(cmd, cwd=source_dir)
runner.close()
log.status_pass('OK')
class ArbitraryAttributeContainer(object):
pass
ubconfig = ArbitraryAttributeContainer()
ubconfig.brd = dict()
ubconfig.env = dict()
modules = [
(ubconfig.brd, 'u_boot_board_' + board_type_filename),
(ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
(ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
board_identity_filename),
]
for (dict_to_fill, module_name) in modules:
try:
module = __import__(module_name)
except ImportError:
continue
dict_to_fill.update(module.__dict__)
ubconfig.buildconfig = dict()
for conf_file in ('.config', 'include/autoconf.mk'):
dot_config = build_dir + '/' + conf_file
if not os.path.exists(dot_config):
raise Exception(conf_file + ' does not exist; ' +
'try passing --build option?')
with open(dot_config, 'rt') as f:
ini_str = '[root]\n' + f.read()
ini_sio = StringIO.StringIO(ini_str)
parser = ConfigParser.RawConfigParser()
parser.readfp(ini_sio)
ubconfig.buildconfig.update(parser.items('root'))
ubconfig.test_py_dir = test_py_dir
ubconfig.source_dir = source_dir
ubconfig.build_dir = build_dir
ubconfig.result_dir = result_dir
ubconfig.persistent_data_dir = persistent_data_dir
ubconfig.board_type = board_type
ubconfig.board_identity = board_identity
ubconfig.gdbserver = gdbserver
ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
env_vars = (
'board_type',
'board_identity',
'source_dir',
'test_py_dir',
'build_dir',
'result_dir',
'persistent_data_dir',
)
for v in env_vars:
os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
if board_type.startswith('sandbox'):
import u_boot_console_sandbox
console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
else:
import u_boot_console_exec_attach
console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
| 5,348,864 |
def init(provider=None):
"""
Runs through a questionnaire to set up your project's deploy settings
"""
if os.path.exists(DEPLOY_YAML):
_yellow("\nIt looks like you've already gone through the questionnaire.")
cont = prompt("Do you want to go through it again and overwrite the current one?", default="No")
if cont.strip().lower() == "no":
return None
_green("\nWelcome to the django-deployer!")
_green("\nWe need to ask a few questions in order to set up your project to be deployed to a PaaS provider.")
# TODO: identify the project dir based on where we find the settings.py or urls.py
django_settings = prompt(
"* What is your Django settings module?",
default="settings",
validate=_validate_django_settings
)
managepy = prompt(
"* Where is your manage.py file?",
default="./manage.py",
validate=_validate_managepy
)
requirements = prompt(
"* Where is your requirements.txt file?",
default="requirements.txt",
validate=_validate_requirements
)
# TODO: confirm that the file exists
# parse the requirements file and warn the user about best practices:
# Django==1.4.1
# psycopg2 if they selected PostgreSQL
# MySQL-python if they selected MySQL
# South for database migrations
# dj-database-url
pyversion = prompt("* What version of Python does your app need?", default="Python2.7")
# TODO: get these values by reading the settings.py file
static_url = prompt("* What is your STATIC_URL?", default="/static/")
media_url = prompt("* What is your MEDIA_URL?", default="/media/")
if not provider:
provider = prompt("* Which provider would you like to deploy to (dotcloud, appengine, stackato, openshift)?",
validate=_validate_providers)
# Where to place the provider specific questions
site = {}
additional_site = {}
if provider == "appengine":
applicationid = prompt("* What's your Google App Engine application ID (see https://appengine.google.com/)?", validate=r'.+')
instancename = prompt("* What's the full instance ID of your Cloud SQL instance\n"
"(should be in format \"projectid:instanceid\" found at https://code.google.com/apis/console/)?", validate=r'.+:.+')
databasename = prompt("* What's your database name?", validate=r'.+')
sdk_location = prompt("* Where is your Google App Engine SDK location?",
default="/usr/local/google_appengine",
validate=r'.+' # TODO: validate that this path exists
)
additional_site.update({
# quotes for the yaml issue
'application_id': applicationid,
'instancename': instancename,
'databasename': databasename,
'sdk_location': sdk_location,
})
# only option with Google App Engine is MySQL, so we'll just hardcode it
site = {
'database': 'MySQL'
}
elif provider == "openshift":
application_name = prompt("* What is your openshift application name?")
site = {
'application_name': application_name
}
else:
database = prompt("* What database does your app use?", default="PostgreSQL")
site = {
'database': database,
}
# TODO: add some validation that the admin password is valid
# TODO: let the user choose the admin username instead of hardcoding it to 'admin'
admin_password = prompt("* What do you want to set as the admin password?",
validate=_validate_admin_password
)
import random
SECRET_KEY = ''.join([random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)])
SECRET_KEY = "'" + SECRET_KEY + "'"
site.update({
'pyversion': pyversion,
'django_settings': django_settings,
'managepy': managepy,
'requirements': requirements,
'static_url': static_url,
'media_url': media_url,
'provider': provider,
'admin_password': admin_password,
'secret_key': SECRET_KEY,
})
site.update(additional_site)
_create_deploy_yaml(site)
return site
| 5,348,865 |
def plot_data(options, data):
"""Plotting data en returning figure."""
color = 'cornflowerblue'
wip_col = options['column']
ax = plt.axes()
ax.xaxis.set_major_locator(mdates.DayLocator(interval=1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%y-%m-%d'))
ax.xaxis.grid(options['grid'])
ax.yaxis.grid(options['grid'])
ystart, yend = ax.get_ylim()
try:
plt.plot_date(mdates.datestr2num(data['time']),
data[wip_col],
xdate=True,
marker='8',
markersize='1',
color=color)
except:
usage("Unexpected error in plot function", 1)
plt.xticks(rotation=90)
plt.tick_params(axis='both', which='major', labelsize=4)
try:
plt.savefig(options['output'] + '.png',
figsize=(options['width'], options['height']),
dpi=options['dpi'])
except:
usage("Unexpected error in savefig function", 1)
print "%s saved from %s datafile" % (options['output'], options['file'])
return
| 5,348,866 |
async def test_get_job_failure_reason_success(mock_client):
"""Assert get_job_failure_reason return failure reason as string"""
mock_client.return_value.__aenter__.return_value.describe_job_run.return_value = {
"jobRun": {
"failureReason": "Unknown",
"stateDetails": "TERMINATED",
}
}
hook = EmrContainerHookAsync(virtual_cluster_id=VIRTUAL_CLUSTER_ID, aws_conn_id=AWS_CONN_ID)
reason = await hook.get_job_failure_reason(JOB_ID)
expected = "Unknown - TERMINATED"
assert reason == expected
| 5,348,867 |
def get_worker_status(worker):
"""Retrieve worker status by worker ID from redis."""
set_redis_worker_status_pool()
global WORKER_STATUS_POOL
# retrieve worker status
r = StrictRedis(connection_pool=WORKER_STATUS_POOL)
res = r.get(WORKER_STATUS_KEY_TMPL % worker)
return res.decode() if hasattr(res, "decode") else res
| 5,348,868 |
def lif_r_psc_aibs_converter(config, syn_tau=[5.5, 8.5, 2.8, 5.8]):
"""Creates a nest glif_lif_r_psc object"""
coeffs = config['coeffs']
threshold_params = config['threshold_dynamics_method']['params']
reset_params = config['voltage_reset_method']['params']
params = {'V_th': coeffs['th_inf'] * config['th_inf'] * 1.0e03 + config['El_reference'] * 1.0e03,
'g': coeffs['G'] / config['R_input'] * 1.0e09,
'E_L': config['El'] * 1.0e03 + config['El_reference'] * 1.0e03,
'C_m': coeffs['C'] * config['C'] * 1.0e12,
't_ref': config['spike_cut_length'] * config['dt'] * 1.0e03,
'a_spike': threshold_params['a_spike'] * 1.0e03,
'b_spike': threshold_params['b_spike'] * 1.0e-03,
'a_reset': reset_params['a'],
'b_reset': reset_params['b'] * 1.0e03,
'tau_syn': syn_tau, # in ms
'V_dynamics_method': 'linear_exact'}
return params
| 5,348,869 |
def test_best_estimator():
"""Ensure that the best estimator is the one giving the best score (by
re-running it)"""
train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
data = Dataset.load_from_folds([(train_file, test_file)],
Reader('ml-100k'))
param_grid = {'n_epochs': [5], 'lr_all': [0.002, 0.005],
'reg_all': [0.4, 0.6], 'n_factors': [1], 'init_std_dev': [0]}
gs = GridSearchCV(SVD, param_grid, measures=['mae'],
cv=PredefinedKFold(), joblib_verbose=100)
gs.fit(data)
best_estimator = gs.best_estimator['mae']
# recompute MAE of best_estimator
mae = cross_validate(best_estimator, data, measures=['MAE'],
cv=PredefinedKFold())['test_mae']
assert mae == gs.best_score['mae']
| 5,348,870 |
def pytest_collection_modifyitems(items, config):
"""
This function is called upon during the pytest test collection phase and allows for modification
of the test items within the list
"""
collect_only = config.getoption("--collect-only")
cassandra_dir = config.getoption("--cassandra-dir")
cassandra_version = config.getoption("--cassandra-version")
if not collect_only and cassandra_dir is None:
if cassandra_version is None:
raise Exception("Required dtest arguments were missing! You must provide either --cassandra-dir "
"or --cassandra-version. Refer to the documentation or invoke the help with --help.")
# Either cassandra_version or cassandra_dir is defined, so figure out the version
CASSANDRA_VERSION = cassandra_version or get_version_from_build(cassandra_dir)
# Check that use_off_heap_memtables is supported in this c* version
if config.getoption("--use-off-heap-memtables") and ("3.0" <= CASSANDRA_VERSION < "3.4"):
raise Exception("The selected Cassandra version %s doesn't support the provided option "
"--use-off-heap-memtables, see https://issues.apache.org/jira/browse/CASSANDRA-9472 "
"for details" % CASSANDRA_VERSION)
selected_items = []
deselected_items = []
sufficient_system_resources_resource_intensive = sufficient_system_resources_for_resource_intensive_tests()
logger.debug("has sufficient resources? %s" % sufficient_system_resources_resource_intensive)
for item in items:
deselect_test = False
if item.get_closest_marker("resource_intensive") and not collect_only:
force_resource_intensive = config.getoption("--force-resource-intensive-tests")
skip_resource_intensive = config.getoption("--skip-resource-intensive-tests")
if not force_resource_intensive:
if skip_resource_intensive:
deselect_test = True
logger.info("SKIP: Deselecting test %s as test marked resource_intensive. To force execution of "
"this test re-run with the --force-resource-intensive-tests command line argument" % item.name)
if not sufficient_system_resources_resource_intensive:
deselect_test = True
logger.info("SKIP: Deselecting resource_intensive test %s due to insufficient system resources" % item.name)
if item.get_closest_marker("no_vnodes"):
if config.getoption("--use-vnodes"):
deselect_test = True
logger.info("SKIP: Deselecting test %s as the test requires vnodes to be disabled. To run this test, "
"re-run without the --use-vnodes command line argument" % item.name)
if item.get_closest_marker("vnodes"):
if not config.getoption("--use-vnodes"):
deselect_test = True
logger.info("SKIP: Deselecting test %s as the test requires vnodes to be enabled. To run this test, "
"re-run with the --use-vnodes command line argument" % item.name)
for test_item_class in inspect.getmembers(item.module, inspect.isclass):
if not hasattr(test_item_class[1], "pytestmark"):
continue
for module_pytest_mark in test_item_class[1].pytestmark:
if module_pytest_mark.name == "upgrade_test":
if not config.getoption("--execute-upgrade-tests"):
deselect_test = True
if item.get_closest_marker("upgrade_test"):
if not config.getoption("--execute-upgrade-tests"):
deselect_test = True
if item.get_closest_marker("no_offheap_memtables"):
if config.getoption("use_off_heap_memtables"):
deselect_test = True
# temporarily deselect tests in cqlsh_copy_tests that depend on cqlshlib,
# until cqlshlib is Python 3 compatibile
if item.get_marker("depends_cqlshlib"):
deselect_test = True
if deselect_test:
deselected_items.append(item)
else:
selected_items.append(item)
config.hook.pytest_deselected(items=deselected_items)
items[:] = selected_items
| 5,348,871 |
def get_bigwig_values(bigwig_path, chrom_name, chrom_end, chrom_start=0):
"""
Get the values for a genomic region of interest from a bigwig file.
:param bigwig_path: Path to the bigwig file
:param chrom_name: Chromosome name
:param chrom_end: chromosome end
:param chrom_start: chromosome start
:return: Bigwig values from the region given
"""
with pyBigWig.open(bigwig_path) as input_bw:
return np.nan_to_num(input_bw.values(chrom_name, chrom_start, chrom_end, numpy=True))
| 5,348,872 |
def dtool_config(files):
"""Provide default dtool config."""
logger = logging.getLogger(__name__)
# use environment variables instead of custom config file, see
# https://github.com/jic-dtool/dtoolcore/pull/17
# _original_environ = os.environ.copy()
# inject configuration into environment:
dtool_config = _read_json(files['dtool_config_path'])
logger.debug("dtool config overrides:")
_log_nested_dict(logger.debug, dtool_config)
return dtool_config
| 5,348,873 |
def get_successors(state, maxwords):
"""Traverses state graph to find valid anagrams."""
terminal = len(state['chars']) == 0
# Check whether the state is invalid and should be pruned
if not is_valid(state['anagram'], terminal, maxwords):
return []
# If valid terminal state, stop search and return
if terminal:
return [state['anagram']]
# Continue to recursively explore subsequent states
next_states = []
for c in state['chars']:
chars = state['chars'].copy()
chars.subtract({c: 1})
if chars[c] == 0:
del chars[c]
next_states.append({
'anagram': state['anagram'] + c,
'chars': chars,
})
# Add an additional next state for word breaks
if state['anagram'] != '' and state['anagram'][-1] != ' ':
next_states.append({
'anagram': state['anagram'] + ' ',
'chars': state['chars'],
})
anagrams = []
for next_state in next_states:
anagrams += get_successors(next_state, maxwords=maxwords)
return anagrams
| 5,348,874 |
def read_and_parse_cdl_file(file_name):
"""
Reads relevant information from a "cdl" file
"""
if file_name is None:
return None
wl_map = {}
bl_map = {}
colclk_wl_map = {}
# Parse line-by-line
with open(file_name, "r") as fp:
for line in fp:
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
fields = split_cdl_line(line)
if not fields:
continue
# Row definition
if fields[0] == "define_row":
wl_idx = fields.index("-WL_range")
row = 0
for pair in fields[wl_idx+1]:
if isinstance(pair, list) and len(pair) == 2:
wl_map[row] = (int(pair[0]), int(pair[1]),)
row += 1
# Clock column definition
elif fields[0] == "define_colclk_instances":
wl_idx = fields.index("-WL_Port")
row_idx = fields.index("-row")
wl = int(fields[wl_idx+1])
row = int(fields[row_idx+1])
colclk_wl_map[row] = (wl, wl,)
# Column definition
elif fields[0] == "define_column":
bl_idx = fields.index("-BL_range")
col = 0
for pair in fields[bl_idx+1]:
if isinstance(pair, list) and len(pair) == 2:
bl_map[col] = (int(pair[0]), int(pair[1]),)
col += 1
data = {
"colclk_wl_map": colclk_wl_map,
"wl_map": wl_map,
"bl_map": bl_map,
}
return data
| 5,348,875 |
def test_install_noneditable_git(script, tmpdir):
"""
Test installing from a non-editable git URL with a given tag.
"""
result = script.pip(
'install',
'git+https://github.com/pypa/pip-test-package.git'
'@0.1.1#egg=pip-test-package'
)
egg_info_folder = (
script.site_packages /
'pip_test_package-0.1.1-py%s.egg-info' % pyversion
)
result.assert_installed('piptestpackage',
without_egg_link=True,
editable=False)
assert egg_info_folder in result.files_created, str(result)
| 5,348,876 |
def test_4_arguments() -> None:
"""four arguments supplied when 3 expected"""
for extra in BAD_EXTRA_ARGUMENTS:
with pytest.raises(TypeError, match=re.compile(
"__init__[(][)] takes 4 positional arguments but {} were given".format(
len(extra) + 4))):
AutomataUniverse(NEIGHBOURHOOD_2D, [2,3], [3], *extra)
| 5,348,877 |
def detect_outlier_at_index(
srs: pd.Series,
idx: int,
n_samples: int,
z_score_threshold: float,
) -> bool:
"""
Check if a value at index `idx` in a series is an outlier.
The passed series is supposed to be ordered by increasing timestamps.
This function
- detects z-score window index boundaries with respeect to index order and number of samples
- computes the z-score of the current element with respect to the z-score window values
- compares the z-score to the threshold to declare the current element an outlier
:param srs: input series
:param idx: numerical index of a value to check
:param n_samples: number of samples in z-score window
:param z_score_threshold: threshold to mark a value as an outlier based on
its z-score in the window
:return: whether the element at index idx is an outlier
"""
# Set z-score window boundaries.
window_first_index = max(0, idx - n_samples)
# Get a series window to compute z-score for.
window_srs = srs.iloc[window_first_index : idx + 1]
# Compute z-score of a value at index.
z_score = (srs.iloc[idx] - window_srs.mean()) / window_srs.std()
# Return if a value at index is an outlier.
# Done via `<=` since a series can contain None values that should be detected
# as well but will result to NaN if compared to the threshold directly.
is_outlier = not (abs(z_score) <= z_score_threshold)
return is_outlier
| 5,348,878 |
def _organize_parameter(parameter):
"""
Convert operation parameter message to its dict format.
Args:
parameter (OperationParameter): Operation parameter message.
Returns:
dict, operation parameter.
"""
parameter_result = dict()
parameter_keys = [
'mapStr',
'mapBool',
'mapInt',
'mapDouble',
]
for parameter_key in parameter_keys:
base_attr = getattr(parameter, parameter_key)
parameter_value = dict(base_attr)
# convert str 'None' to None
for key, value in parameter_value.items():
if value == 'None':
parameter_value[key] = None
parameter_result.update(parameter_value)
# drop `mapStrList` and `strValue` keys in result parameter
str_list_para = dict(getattr(parameter, 'mapStrList'))
result_str_list_para = dict()
for key, value in str_list_para.items():
str_list_para_list = list()
for str_ele in getattr(value, 'strValue'):
str_list_para_list.append(str_ele)
str_list_para_list = list(map(lambda x: None if x == '' else x, str_list_para_list))
result_str_list_para[key] = str_list_para_list
parameter_result.update(result_str_list_para)
return parameter_result
| 5,348,879 |
def colorpicker(request):
"""
Controller for the app home page.
"""
my_param = MyParamColor()
context = get_context(request, my_param)
return render(request, 'tethys_django_form_tutorial/colorpicker.html', context)
| 5,348,880 |
def _copy_part_from_other_model(model,
other_model,
part,
realization = None,
consolidate = True,
force = False,
cut_refs_to_uuids = None,
cut_node_types = None,
self_h5_file_name = None,
h5_uuid = None,
other_h5_file_name = None):
"""Fully copies part in from another model, with referenced parts, hdf5 data and relationships."""
# todo: double check behaviour around equivalent CRSes, especially any default crs in model
assert other_model is not None
if other_model is model:
return
assert part is not None
if realization is not None:
assert isinstance(realization, int) and realization >= 0
if force:
assert consolidate
if not other_h5_file_name:
other_h5_file_name = other_model.h5_file_name()
if not self_h5_file_name:
self_h5_file_name = model.h5_file_name(file_must_exist = False)
hdf5_copy_needed = not os.path.samefile(self_h5_file_name, other_h5_file_name)
# check whether already existing in this model
if part in model.parts_forest.keys():
return
if other_model.type_of_part(part) == 'obj_EpcExternalPartReference':
log.debug('refusing to copy hdf5 ext part from other model')
return
log.debug('copying part: ' + str(part))
uuid = rqet.uuid_in_part_name(part)
if not force:
assert model.part_for_uuid(uuid) is None, 'part copying failure: uuid exists for different part!'
# duplicate xml tree and add as a part
other_root = other_model.root_for_part(part, is_rels = False)
if other_root is None:
log.error('failed to copy part (missing in source model?): ' + str(part))
return
resident_uuid = _unforced_consolidation(model, other_model, consolidate, force, part)
if resident_uuid is None:
root_node = _duplicate_node(model, other_root) # adds duplicated node as part
assert root_node is not None
_set_realization_index_node_if_required(realization, root_node)
if hdf5_copy_needed:
# copy hdf5 data
hdf5_internal_paths = [node.text for node in rqet.list_of_descendant_tag(other_root, 'PathInHdfFile')]
hdf5_count = whdf5.copy_h5_path_list(other_h5_file_name, self_h5_file_name, hdf5_internal_paths, mode = 'a')
# create relationship with hdf5 if needed and modify h5 file uuid in xml references
_copy_part_hdf5_setup(model, hdf5_count, h5_uuid, root_node)
# NB. assumes ext part is already established when sharing a common hdf5 file
# cut references to objects to be excluded
if cut_refs_to_uuids:
rqet.cut_obj_references(root_node, cut_refs_to_uuids)
if cut_node_types:
rqet.cut_nodes_of_types(root_node, cut_node_types)
# recursively copy in referenced parts where they don't already exist in this model
_copy_referenced_parts(model, other_model, realization, consolidate, force, cut_refs_to_uuids, cut_node_types,
self_h5_file_name, h5_uuid, other_h5_file_name, root_node)
resident_uuid = uuid
else:
root_node = model.root_for_uuid(resident_uuid)
# copy relationships where target part is present in this model – this part is source, then destination
_copy_relationships_for_present_targets(model, other_model, consolidate, force, resident_uuid, root_node)
| 5,348,881 |
def flatten_in(iterable, pred=None):
"""Like flatten, but recurse also into tuples/lists not matching pred.
This makes also those items get the same flattening applied inside them.
Example::
is_nested = lambda e: all(isinstance(x, (list, tuple)) for x in e)
data = (((1, 2), ((3, 4), (5, 6)), 7), ((8, 9), (10, 11)))
assert (tuple(flatten(data, is_nested)) ==
(((1, 2), ((3, 4), (5, 6)), 7), (8, 9), (10, 11)))
assert (tuple(flatten_in(data, is_nested)) ==
(((1, 2), (3, 4), (5, 6), 7), (8, 9), (10, 11)))
"""
pred = pred or (lambda x: True)
it = iter(iterable)
for e in it:
if isinstance(e, (list, tuple)):
if pred(e):
for f in flatten_in(e, pred):
yield f
else:
t = type(e)
yield t(flatten_in(e, pred))
else:
yield e
| 5,348,882 |
def test_non_callable_methods(monkeypatch, subtests):
"""
Tests that an exception is raised when any of the integration base methods are non-callable.
"""
from django_guid.config import Settings
from django_guid.integrations import SentryIntegration
mock_integration = SentryIntegration()
to_test = [
{
'function_name': 'cleanup',
'error': 'Integration method `cleanup` needs to be made callable for `SentryIntegration`',
},
{
'function_name': 'run',
'error': 'Integration method `run` needs to be made callable for `SentryIntegration`',
},
{
'function_name': 'setup',
'error': 'Integration method `setup` needs to be made callable for `SentryIntegration`',
},
]
for test in to_test:
setattr(mock_integration, test.get('function_name'), 'test')
mocked_settings = deepcopy(django_settings.DJANGO_GUID)
mocked_settings['INTEGRATIONS'] = [mock_integration]
with override_settings(DJANGO_GUID=mocked_settings):
with subtests.test(msg=f'Testing function {test.get("function_name")}'):
with pytest.raises(ImproperlyConfigured, match=test.get('error')):
Settings().validate()
| 5,348,883 |
def translate_date(default=defaults.get('language')):
"""Parse/translate a date."""
d = request.args.get('date')
if not d:
raise RuntimeError(_('Date is mandatory.'))
dest_lang = request.args.get('dest') if request.args.get('dest') else default
variation = request.args.get('variation') if request.args.get('variation') else 'short'
d_list = d.split('/')
if request.args.get('src') == 'es':
d = date(year=int(d_list[2]), month=int(d_list[1]), day=int(d_list[0]))
else:
d = date(*d_list)
return render_template_string(source=get_date(d=d, f=variation, l=dest_lang))
| 5,348,884 |
def for_4():
""" *'s printed in the shape of number 4 """
for row in range(9):
for col in range(7):
if row+col ==6 or row ==6 or col ==5 and row>3:
print('*',end=' ')
else:
print(' ',end=' ')
print()
| 5,348,885 |
def ocaml_configure(
opam = None,
build = None,
switch = None,
# hermetic = False,
# verify = False,
# install = False,
# pin = False,
# force = False,
debug = False,
verbose = False
):
# is_rules_ocaml = False,
# opam = None):
"""Declares workspaces (repositories) the Ocaml rules depend on.
Args:
opam: an [OpamConfig](#provider-opamconfig) provider
debug: enable debugging
"""
# print("ocaml.configure")
if switch and (build or opam):
fail("ocaml_configure: param 'switch' cannot be combined with 'build' or 'opam'.")
if build and not opam:
fail("configure param 'build' must be used with param 'opam'.")
if build and switch:
fail("configure params 'build' and 'switch' incompatible, pass one or the other.")
ppx_repo(name="ppx")
# obazl_repo(name="obazl")
default_build = None
if opam:
config_opam(
opam,
build,
# hermetic,
# verify,
# install,
# pin,
# force,
verbose = verbose,
debug = debug)
else:
# print("no opam")
_ocaml_repo(name="ocaml",
# hermetic = hermetic,
# verify = verify,
# install = install,
# force = force,
# pin = pin,
case = 1 if switch else 0,
switch_name = switch if switch else "",
build_name = build,
# build_compiler = None,
# opam_pkgs = None,
# findlib_pkgs = None,
# pin_specs = None,
verbose = verbose,
debug = debug)
ocaml_register_toolchains(installation="host")
# print("ocaml_configure done")
| 5,348,886 |
def check_call(
*command: Any,
working_directory: Union[PathLike, str] = ".",
verbose: bool = False,
quoted: bool = False,
**kwargs: Any,
) -> Optional[str]:
"""Proxy for subprocess.check_call"""
return check_run(
*command, working_directory=working_directory, verbose=verbose, quoted=quoted, **kwargs
)
| 5,348,887 |
def fit_client(client: Client, weights: Weights) -> Tuple[Weights, int]:
"""Refine weights on a single client."""
return client.fit(weights)
| 5,348,888 |
def _find_rpms_in_packages(koji_api, name_list, major_minor):
"""
Given a list of package names, look up the RPMs that are built in them.
Of course, this is an inexact science to do generically; contents can
vary from build to build, and multiple packages could build the same RPM name.
We will first look for the latest build in the tags for the given
major_minor version. If not there, we will look in brew for the package
name and choose the latest build.
:koji_api: existing brew connection
:name_list: list of package names to search for
:major_minor: minor version of OCP to search for builds in
Returns: a map of package_name: set(rpm_names)
"""
rpms_for_package = {}
tags = _tags_for_version(major_minor)
for package in name_list:
for tag in tags:
for build in koji_api.getLatestBuilds(tag=tag, package=package):
rpm_list = set(rpm["name"] for rpm in koji_api.listBuildRPMs(build["build_id"]))
rpms_for_package.setdefault(package, set()).update(rpm_list)
if package not in rpms_for_package:
# it wasn't in our tags; look for it by name
pkg_info = koji_api.getPackage(package)
if not pkg_info:
continue
latest_builds = koji_api.listBuilds(packageID=pkg_info["id"], state=1, queryOpts=dict(limit=1))
if not latest_builds:
continue
rpm_list = set(rpm["name"] for rpm in koji_api.listBuildRPMs(latest_builds[0]["build_id"]))
rpms_for_package[package] = set(rpm_list)
return rpms_for_package
| 5,348,889 |
def calculate_algorithm_tags(analyses):
"""
Calculate the algorithm tags (eg. "ip", True) that should be applied to a sample document based on a list of its
associated analyses.
:param analyses: the analyses to calculate tags for
:type analyses: list
:return: algorithm tags to apply to the sample document
:rtype: dict
"""
pathoscope = False
nuvs = False
for analysis in analyses:
if pathoscope is not True and analysis["algorithm"] in PATHOSCOPE_TASK_NAMES:
pathoscope = analysis["ready"] or "ip" or pathoscope
if nuvs is not True and analysis["algorithm"] == "nuvs":
nuvs = analysis["ready"] or "ip" or nuvs
if pathoscope is True and nuvs is True:
break
return {
"pathoscope": pathoscope,
"nuvs": nuvs
}
| 5,348,890 |
def open_json(filepath):
"""
Returns open .json file in python as a list.
:param: .json file path
:returns: list
:rvalue: str
"""
with open(filepath) as f:
notes = json.load(f)
return notes
| 5,348,891 |
def test_account_registration_view_form_ok(client):
"""test valid form data on registration view"""
payload = {
"email": "[email protected]",
"password1": "UnMotDePasse12",
"password2": "UnMotDePasse12",
"username": "testuser",
}
response = client.post("/register/", payload)
user = Account.objects.filter(email="[email protected]").first()
assert user.email == "[email protected]"
assert response.status_code == 302
assertTemplateUsed("account/register.html")
| 5,348,892 |
def remove_user_list():
"""
Endpoint to remove a specific list or a complete user
---
tags:
- User Methods
parameters:
- name: user
type: string
in: query
required: true
description: user you want to query
- name: list
type: string
in: query
required: false
description: specific list that belong to a user
responses:
400:
description: Incorrect dbs used
200:
description: Your list was deleted
"""
to_remove_user = request.args.get('user')
if to_remove_user is not None:
validation = data_validator.validate_json_for_user(to_remove_user)
to_remove_list = request.args.get('list', default=None)
if to_remove_list is not None:
data_validator.validate_json_for_list(to_remove_list)
return_object = logic_for_users_controller_delete.worker_for_delete(to_remove_user, to_remove_list)
if return_object['status_code'] is 200:
res = json.dumps(return_object['body'], indent=4)
return res, 200
else:
res = json.dumps(return_object['body'].__dict__, indent=4)
return res, return_object['status_code']
| 5,348,893 |
def upload(f, content_type, token, api_key):
"""Upload a file with the given content type to Climate
This example supports files up to 5 MiB (5,242,880 bytes).
Returns The upload id if the upload is successful, False otherwise.
"""
uri = '{}/v4/uploads'.format(api_uri)
headers = {
'authorization': bearer_token(token),
'x-api-key': api_key
}
md5 = file.md5(f)
length = file.length(f)
data = {
'md5': md5,
'length': length,
'contentType': content_type
}
# initiate upload
res = requests.post(uri, headers=headers, json=data)
Logger().info(to_curl(res.request))
if res.status_code == 201:
upload_id = res.json()
Logger().info("Upload Id: %s" % upload_id)
put_uri = '{}/{}'.format(uri, upload_id)
# for this example, size is assumed to be small enough for a
# single upload (less than or equal to 5 MiB)
headers['content-range'] = 'bytes {}-{}/{}'.format(0,
(length - 1),
length)
headers['content-type'] = binary_content_type
f.seek(0)
# send image
for position in range(0, length, CHUNK_SIZE):
buf = f.read(CHUNK_SIZE)
headers['content-range'] = 'bytes {}-{}/{}'.format(
position, position + len(buf) - 1, length)
try:
res = requests.put(put_uri, headers=headers, data=buf)
Logger().info(headers)
except Exception as e:
Logger().error("Exception: %s" % e)
if res.status_code == 204:
return upload_id
return False
| 5,348,894 |
def log_info_level(message):
"""
Add info level log to the app's logger
======================================
Parameters
----------
message : str
Message to log.
"""
app.logger.info('{}:{}'.format(asctime(), message))
| 5,348,895 |
def get_node_name_centres(nodeset: Nodeset, coordinates_field: Field, name_field: Field):
"""
Find mean locations of node coordinate with the same names.
:param nodeset: Zinc Nodeset or NodesetGroup to search.
:param coordinates_field: The coordinate field to evaluate.
:param name_field: The name field to match.
:return: Dict of names -> coordinates.
"""
components_count = coordinates_field.getNumberOfComponents()
fieldmodule = nodeset.getFieldmodule()
fieldcache = fieldmodule.createFieldcache()
name_records = {} # name -> (coordinates, count)
nodeiter = nodeset.createNodeiterator()
node = nodeiter.next()
while node.isValid():
fieldcache.setNode(node)
name = name_field.evaluateString(fieldcache)
coordinates_result, coordinates = coordinates_field.evaluateReal(fieldcache, components_count)
if name and (coordinates_result == RESULT_OK):
name_record = name_records.get(name)
if name_record:
name_centre = name_record[0]
for c in range(components_count):
name_centre[c] += coordinates[c]
name_record[1] += 1
else:
name_records[name] = [ coordinates, 1 ]
node = nodeiter.next()
# divide centre coordinates by count
name_centres = {}
for name in name_records:
name_record = name_records[name]
name_count = name_record[1]
name_centre = name_record[0]
if name_count > 1:
scale = 1.0/name_count
for c in range(components_count):
name_centre[c] *= scale
name_centres[name] = name_centre
return name_centres
| 5,348,896 |
def pred(a):
"""
pred :: a -> a
the predecessor of a value. For numeric types, pred subtracts 1.
"""
return Enum[a].pred(a)
| 5,348,897 |
def main():
"""
Entry point of the app.
"""
if len(sys.argv) != 2:
print(f"{sys.argv[0]} [SERVER_LIST_FILE]")
return 1
return process(server_list_file=sys.argv[1])
| 5,348,898 |
def pascal_to_snake(pascal_string):
"""Return a snake_string for a given PascalString."""
camel_string = _pascal_to_camel(pascal_string)
snake_string = _camel_to_snake(camel_string)
return "".join(snake_string)
| 5,348,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.