content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def new(request, pk=""):
""" New CodeStand Entry
When user presses 'Associate new project' there is a Project Container
associated, then you need reuse this information in the form
:param request: HttpResponse
:param pk: int - Indicates which project must be loaded
"""
if request.path != request.session[constants.ACTUAL_TEMPLATE]:
clear_session(request)
request.session[constants.REM_LINKS] = []
request.session[constants.REM_TAGS] = []
request.session[constants.REM_DOCS] = []
request.session[constants.REM_CONTACTS] = []
request.session[constants.ADD_CONTACTS] = []
request.session[constants.ADD_LINKS] = []
request.session[constants.ADD_TAGS] = []
request.session[constants.ADD_DOCS] = []
request.session[constants.MAINTAIN_STATE] = True
if pk != "":
request.session[constants.ACTUAL_PROJECT] = get_object_or_404(ProjectContainer, id=pk)
# User must have permission to add new CodeStand
if not is_user_allowed(request.user, "canaddmatch"):
raise Http404
return save_code(request, False, pk)
| 5,347,100 |
def sideral(
date, longitude=0.0, model="mean", eop_correction=True, terms=106
): # pragma: no cover
"""Sideral time as a rotation matrix
"""
theta = _sideral(date, longitude, model, eop_correction, terms)
return rot3(np.deg2rad(-theta))
| 5,347,101 |
def _make_mount_handler_command(
handler_name: str, handler: Type[ForeignDataWrapperDataSource]
) -> Command:
"""Turn the mount handler function into a Click subcommand
with help text and kwarg/connection string passing"""
help_text, handler_options_help = _generate_handler_help(handler)
params = [
click.Argument(["schema"]),
click.Option(
["--connection", "-c"],
help="Connection string in the form username:password@server:port",
),
click.Option(
["--handler-options", "-o"], help=handler_options_help, default="{}", type=JsonType()
),
]
from splitgraph.core.output import conn_string_to_dict
def _callback(schema, connection, handler_options):
handler_options.update(conn_string_to_dict(connection))
mount(schema, mount_handler=handler_name, handler_kwargs=handler_options)
cmd = click.Command(handler_name, params=params, callback=_callback, help=help_text)
return cmd
| 5,347,102 |
def test_load_settings_onto_instrument(tmp_test_data_dir):
"""
Test that we can successfully load the settings of a dummy instrument
"""
# Always set datadir before instruments
set_datadir(tmp_test_data_dir)
def get_func():
return 20
tuid = "20210319-094728-327-69b211"
instr = Instrument("DummyInstrument")
# A parameter that is both settable and gettable
instr.add_parameter(
"settable_param", initial_value=10, parameter_class=ManualParameter
)
# A parameter that is only gettable
instr.add_parameter("gettable_param", set_cmd=False, get_cmd=get_func)
# A boolean parameter that is True by defualt
instr.add_parameter(
"boolean_param", initial_value=True, parameter_class=ManualParameter
)
# A parameter which is already set to None
instr.add_parameter(
"none_param",
initial_value=None,
parameter_class=ManualParameter,
vals=validators.Numbers(),
)
# A parameter which our function will try to set to None, giving a warning
instr.add_parameter(
"none_param_warning",
initial_value=1,
parameter_class=ManualParameter,
vals=validators.Numbers(),
)
# The snapshot also contains an 'obsolete_param', that is not included here.
# This represents a parameter which is no longer in the qcodes driver.
with pytest.warns(
UserWarning,
match="Parameter none_param_warning of instrument DummyInstrument could not be "
"set to None due to error",
):
load_settings_onto_instrument(instr, tuid)
with pytest.warns(
UserWarning,
match="Could not set parameter obsolete_param in DummyInstrument. "
"DummyInstrument does not possess a parameter named obsolete_param.",
):
load_settings_onto_instrument(instr, tuid)
assert instr.get("IDN") == {
"vendor": None,
"model": "DummyInstrument",
"serial": None,
"firmware": None,
}
assert instr.get("settable_param") == 5
assert instr.get("gettable_param") == 20
assert instr.get("none_param") is None
assert instr.get("none_param_warning") == 1
assert not instr.get("boolean_param")
instr.close()
| 5,347,103 |
def dashboard_3_update_graphs(n_intervals):
"""Update all the graphs."""
figures = load_data_make_graphs()
main_page_layout = html.Div(children=[
html.Div(className='row', children=[
make_sub_plot(figures, LAYOUT_COLUMNS[0]),
make_sub_plot(figures, LAYOUT_COLUMNS[1]),
]),
html.Div(className='row', children=[
make_sub_plot(figures, LAYOUT_COLUMNS[2]),
make_sub_plot(figures, LAYOUT_COLUMNS[3]),
]),
])
return main_page_layout
| 5,347,104 |
def connect_base_layer(graph, membership2node_translator):
"""
Currently builds full list of permutations, may switch to generator
Use 4 loop through combinations to do a memory friendly use of the generator w/ add edges - for larger graphs
"""
# Fully connect lowest layer
for nodes in membership2node_translator[0].values():
graph.add_edges_from(list(itertools.permutations(nodes, 2)))
| 5,347,105 |
def fx(sl, dataset, roi_ids, results):
"""this requires the searchlight conditional attribute 'roi_feature_ids'
to be enabled"""
import numpy as np
from mvpa2.datasets import Dataset
resmap = None
probmap = None
for resblock in results:
for res in resblock:
if resmap is None:
# prepare the result container
resmap = np.zeros((len(res), dataset.nfeatures), dtype=res.samples.dtype)
observ_counter = np.zeros(dataset.nfeatures, dtype=int)
#project the result onto all features -- love broadcasting!
resmap[:, res.a.roi_feature_ids] += res.samples
# increment observation counter for all relevant features
observ_counter[res.a.roi_feature_ids] += 1
# when all results have been added up average them according to the number
# of observations
observ_mask = observ_counter > 0
resmap[:, observ_mask] /= observ_counter[observ_mask]
# transpose to make broadcasting work -- creates a view, so in-place
# modification still does the job
result_ds = Dataset(resmap,
fa={'observations': observ_counter})
if 'mapper' in dataset.a:
import copy
result_ds.a['mapper'] = copy.copy(dataset.a.mapper)
return result_ds
| 5,347,106 |
def check_file(file_name):
"""
test if file: exists and is writable or can be created
Args:
file_name (str): the file name
Returns:
(pathlib.Path): the path or None if problems
"""
if not file_name:
return None
path = pathlib.Path(file_name)
# if file exists test if writable
if path.exists() and path.is_file():
handle = None
try:
handle = open(path, 'w')
except PermissionError:
return None
finally:
if handle:
handle.close()
# crate file with write permissions
try:
path.touch(stat.S_IWUSR)
except PermissionError:
return None
return path
| 5,347,107 |
def insert_article(cur, article_id, article):
"""Inserts article into articles table."""
sql = 'INSERT IGNORE INTO articles VALUES(%s,%s,%s,%s,%s,%s,%s,%s)'
title = truncate_value(article['title'], CONSTANTS.max_title_length)
journal = truncate_value(article['journal'], CONSTANTS.max_journal_length)
license = truncate_value(article['license'], CONSTANTS.max_license_length)
data = [article_id, title, article['description'], article['doi'],
article['comments'], license, journal, article['datestamp']]
cur.execute(sql, data)
| 5,347,108 |
async def push(request):
"""Push handler. Authenticate, then return generator."""
if request.method != "POST":
return 405, {}, "Invalid request"
fingerprint = authenticate(request)
if not fingerprint:
return 403, {}, "Access denied"
# Get given file
payload = await request.get_body(100 * 2 ** 20) # 100 MiB limit
# Also validate it
if not validate_payload(request, payload):
return 403, {}, "Payload could not be verified."
# Return generator -> do a deploy while streaming feedback on status
gen = push_generator(fingerprint, payload)
return 200, {"content-type": "text/plain"}, gen
| 5,347,109 |
def getfile(url, outdir=None):
"""Function to fetch files using urllib
Works with ftp
"""
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if not os.path.exists(fn):
#Find appropriate urlretrieve for Python 2 and 3
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
print("Retrieving: %s" % url)
#Add progress bar
urlretrieve(url, fn)
return fn
| 5,347,110 |
def duplicatesby(iteratee, seq, *seqs):
"""
Like :func:`duplicates` except that an `iteratee` is used to modify each element in the
sequences. The modified values are then used for comparison.
Examples:
>>> list(duplicatesby('a', [{'a':1}, {'a':3}, {'a':2}, {'a':3}, {'a':1}]))
[{'a': 3}, {'a': 1}]
Args:
iteratee (object): Iteratee applied per iteration.
seq (Iterable): Iterable to check for duplicates
*seqs (Iterable): Other iterables to compare with.
Yields:
Each element in `seq` that doesn't appear in `seqs`.
"""
if iteratee is not None:
iteratee = fnc.iteratee(iteratee)
seen = Container()
yielded = Container()
for item in itertools.chain(seq, *seqs):
if iteratee is not None:
value = iteratee(item)
else:
value = item
if value not in seen:
seen.add(value)
continue
if value not in yielded:
yield item
yielded.add(value)
| 5,347,111 |
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
| 5,347,112 |
def account_upload_avatar():
"""Admin Account Upload Avatar Action.
*for Ajax only.
Methods:
POST
Args:
files: [name: 'userfile']
Returns:
status: {success: true/false}
"""
if request.method == 'POST':
re_helper = ReHelper()
data = request.files['userfile']
filename = re_helper.r_slash(data.filename.encode('utf-8'))
helper = UpYunHelper()
url = helper.up_to_upyun('account', data, filename)
if url:
return json.dumps({'success': 'true', 'url': url})
else:
return json.dumps({'success': 'false'})
| 5,347,113 |
def get_front_end_url_expression(model_name, pk_expression, url_suffix=''):
"""
Gets an SQL expression that returns a front-end URL for an object.
:param model_name: key in settings.DATAHUB_FRONTEND_URL_PREFIXES
:param pk_expression: expression that resolves to the pk for the model
:param url_suffix: Optional: string appended to the end of the url
"""
return Concat(
Value(f'{settings.DATAHUB_FRONTEND_URL_PREFIXES[model_name]}/'),
pk_expression,
Value(url_suffix),
)
| 5,347,114 |
def mock_config_entry() -> MockConfigEntry:
"""Return the default mocked config entry."""
return MockConfigEntry(
title="KNX",
domain=KNX_DOMAIN,
data={
CONF_KNX_INDIVIDUAL_ADDRESS: XKNX.DEFAULT_ADDRESS,
ConnectionSchema.CONF_KNX_MCAST_GRP: DEFAULT_MCAST_GRP,
ConnectionSchema.CONF_KNX_MCAST_PORT: DEFAULT_MCAST_PORT,
CONF_KNX_CONNECTION_TYPE: CONF_KNX_AUTOMATIC,
},
)
| 5,347,115 |
def load_escores(name_dataset, classifier, folds):
"""Excluir xxxxxxx Return escore in fold. """
escores=[]
escores.append(load_dict_file("escores/"+name_dataset +"_"+classifier +"_escore_grid_train"+str(folds)))
return escores
for index in range(folds):
escores.append(load_dict_file("escores/"+name_dataset +"_"+classifier +"_escore_grid_train"+str(index)))
return escores
| 5,347,116 |
def get_devices():
""" will also get devices ready
:return: a list of avaiable devices names, e.g., emulator-5556
"""
ret = []
p = sub.Popen(settings.ADB + ' devices', stdout=sub.PIPE, stderr=sub.PIPE, shell=True)
output, errors = p.communicate()
print output
segs = output.split("\n")
for seg in segs:
device = seg.split("\t")[0].strip()
if seg.startswith("emulator-"):
p = sub.Popen(settings.ADB + ' -s ' + device + ' shell getprop init.svc.bootanim', stdout=sub.PIPE, stderr=sub.PIPE, shell=True)
output, errors = p.communicate()
if output.strip() != "stopped":
time.sleep(10)
print "waiting for the emulator:", device
return get_devices()
else:
ret.append(device)
assert len(ret) > 0
return ret
| 5,347,117 |
def _proc_event_full(st, **kwargs):
"""
processings including
:param st:
:param kwargs:
:return:
"""
# instrument response removal, spectral whitening, temporal normalization
# autocorrelation and filter, then output results.
def iter3c(stream):
# for an event, there is always "onset"
return IterMultipleComponents(stream, key="onset", number_components=(2, 3))
# resp removal, rotation, spectral whitening, temporal normalization
tasks = iter3c(st)
# loop over streams in each stream containing the 3-component traces
do_work = partial(_proc_event_rst, **kwargs)
njobs = kwargs["njobs"]
numbers = []
logging.info("deep processing for full event correlogram.")
print("deep processing for full event correlogram.")
if njobs == 1:
logging.info('do work sequential (%d cores)', njobs)
for task in tqdm(tasks, total=len(tasks)):
num = do_work(task)
numbers.append(num)
else:
logging.info('do work parallel (%d cores)', njobs)
pool = multiprocessing.Pool(njobs)
for num in tqdm(pool.imap_unordered(do_work, tasks), total=len(tasks)):
numbers.append(num)
pool.close()
pool.join()
logging.info("%d/%d files processed.", sum(numbers), len(tasks))
| 5,347,118 |
def optional_is_none(context, builder, sig, args):
"""Check if an Optional value is invalid
"""
[lty, rty] = sig.args
[lval, rval] = args
# Make sure None is on the right
if lty == types.none:
lty, rty = rty, lty
lval, rval = rval, lval
opt_type = lty
opt_val = lval
del lty, rty, lval, rval
opt = context.make_optional(opt_type)(context, builder, opt_val)
res = builder.not_(cgutils.as_bool_bit(builder, opt.valid))
return impl_ret_untracked(context, builder, sig.return_type, res)
| 5,347,119 |
def get_volume_disk_capacity(pod_name, namespace, volume_name):
"""
Find the container in the specified pod that has a volume named
`volume_name` and run df -h or du -sb in that container to determine
the available space in the volume.
"""
api = get_api("v1", "Pod")
res = api.get(name=pod_name, namespace=namespace)
if res.kind == "PodList":
# make sure there is only one pod with the requested name
if len(res.items) == 1:
pod = res.items[0]
else:
return {}
else:
pod = res
containers = pod.spec.get("initContainers", []) + pod.spec.get("containers", [])
for container in containers:
for volume_mount in container.get("volumeMounts", []):
if volume_mount.get("name") == volume_name:
mount_path = volume_mount.get("mountPath")
volume = list(filter(lambda x: x.name == volume_name, pod.spec.volumes))
volume = volume[0] if len(volume) == 1 else {}
if (
"emptyDir" in volume.keys()
and volume["emptyDir"].get("sizeLimit") is not None
):
# empty dir is used for the session
command = ["sh", "-c", f"du -sb {mount_path}"]
used_bytes = parse_du_command(
pod_exec(
pod_name,
namespace,
container.name,
command,
)
)
total_bytes = convert_to_bytes(volume["emptyDir"]["sizeLimit"])
available_bytes = (
0 if total_bytes - used_bytes < 0 else total_bytes - used_bytes
)
return {
"total_bytes": total_bytes,
"used_bytes": used_bytes,
"available_bytes": available_bytes,
}
else:
# PVC is used for the session
command = ["sh", "-c", f"df -Pk {mount_path}"]
try:
disk_cap_raw = pod_exec(
pod_name,
namespace,
container.name,
command,
)
except ApiException:
disk_cap_raw = ""
logging.warning(
f"Checking disk capacity failed with {pod_name}, "
f"{namespace}, {container.name}, {command}."
)
else:
logging.info(
f"Checking disk capacity succeeded with {pod_name}, "
f"{namespace}, {container.name}, {command}."
)
disk_cap = parse_df_command(disk_cap_raw)
# make sure `df -h` returned the results from only one mount point
if len(disk_cap) == 1:
return disk_cap[0]
return {}
| 5,347,120 |
def is_valid_battlefy_id(battlefy_id: str) -> bool:
"""
Verify a str is a Battlefy Id (20 <= length < 30) and is alphanumeric.
:param battlefy_id:
:return: Validity true/false
"""
return 20 <= len(battlefy_id) < 30 and re.match("^[A-Fa-f0-9]*$", battlefy_id)
| 5,347,121 |
def update_embedded_documents_using_multiple_field_matches():
"""当query有关多个field时, 使用$elemMatch
"""
_id = 5
col.insert({
"_id": _id,
"grades": [
{"grade": 80, "mean": 75, "std": 8},
{"grade": 85, "mean": 90, "std": 5},
{"grade": 90, "mean": 85, "std": 3},
{"grade": 85, "mean": 81, "std": 9},
]
})
col.update(
{
"_id": _id,
"grades": {"$elemMatch": {"grade": {"$lte": 90}, "mean": {"$gt": 80}}},
},
{"$set": {"grades.$.std": 6}},
)
doc = col.find_one({"_id": _id})
assert doc["grades"][1]["std"] == 6
# The fourth doc (second match) is not updated
assert doc["grades"][3]["std"] == 9
| 5,347,122 |
def echo(word:str, n:int, toupper:bool=False) -> str:
"""
Repeat a given word some number of times.
:param word: word to repeat
:type word: str
:param n: number of repeats
:type n: int
:param toupper: return in all caps?
:type toupper: bool
:return: result
:return type: str
"""
res=word*n
if (toupper):
res=res.upper()
return res
| 5,347,123 |
def verify_package_info(package_info):
"""Check if package_info points to a valid package dir (i.e. contains
at least an osg/ dir or an upstream/ dir).
"""
url = package_info['canon_url']
rev = package_info['revision']
command = ["svn", "ls", url, "-r", rev]
out, err = utils.sbacktick(command, clocale=True, err2out=True)
if err:
raise SVNError("Exit code %d getting SVN listing of %s (rev %s). Output:\n%s" % (err, url, rev, out))
for line in out.split("\n"):
if line.startswith('osg/') or line.startswith('upstream/'):
return True
return False
| 5,347,124 |
def test_custom_taper():
"""Test setting custom taper."""
test_win = windows.blackman
dspec = DelaySpectrum(taper=test_win)
assert test_win == dspec.taper
| 5,347,125 |
def add(a,b):
""" This function adds two numbers together """
return a+b
| 5,347,126 |
def decode_ADCP(data):
"""
Decodes ADCP data read in over UDP. Returns two lists: header and current.
input: Raw data string from ADCP UDP stream
Output:
header: [timestamp, nCells, nBeams, pressure]
- timestamp in unix format
- nBeams x nCells gives dimensions of current data
- pressure is hydrostatic pressure in dBar
current: nBeams x nCells current values in m/s
"""
data = data.decode("utf-8")
if data.endswith('ZZZZ') and data.startswith('AAAA'):
data = data.split(' ')
timestamp = float(data[1]) + float(data[2])/1000
nCells = int(data[3])
nBeams = int(data[4])
pressure = int(data[5])
current = np.array(list(map(float, list(data[6:-2]))))/1000
current = np.resize(current, (nBeams, nCells)).round(3)
header = [timestamp, nCells, nBeams, pressure]
else:
header = []
current = []
return current, header
| 5,347,127 |
def RowToModelInput(row, kind):
"""
This converts a patient row into inputs for the SVR.
In this model we use RNAseq values as inputs.
"""
SampleID = row[TissueSampleRow(kind)]
TrueSampleIDs = [r for r in TissueSamples.columns
if r.startswith(SampleID)]
if not TrueSampleIDs:
return None
TrueSampleID = TrueSampleIDs[0]
assert len(TrueSampleIDs) <= 1
try:
sample = TissueSamples[[TrueSampleID]]
Masked = sample[GeneMask[kind]]
return Masked.values.reshape(-1,)
except KeyError as e:
print("Key error: %s" % SampleID)
return None
| 5,347,128 |
def foldlM(f, b, ta):
"""``foldrM :: (Foldable t, Monad m) => (a -> b -> m b) -> b -> t a -> m b``
Monadic fold over the elements of a structure, associating to the right,
i.e. from right to left.
"""
raise NotImplementedError()
| 5,347,129 |
def update_video(secret, site_id, media_id, body):
"""
Function which allows you to update a video
:param secret: <string> Secret value for your JWPlatform API key
:param site_id: <string> ID of a JWPlatform site
:param media_id: <string> Video's object ID. Can be found within JWPlayer Dashboard.
:param kwargs: Arguments conforming to standards found @ https://developer.jwplayer.com/jwplayer/reference#patch_v2-sites-site-id-media-media-id-
:return:
"""
# Setup API client
jwplatform_client = jwplatform.client.JWPlatformClient(secret)
logging.info("Updating Video")
try:
response = jwplatform_client.Media.update(site_id=site_id, media_id=media_id, body=body)
except jwplatform.errors.APIError as e:
logging.error("Encountered an error updating the video\n{}".format(e))
sys.exit(str(e))
logging.info(response.json_body)
| 5,347,130 |
def test_should_setup_method_returns_true_if_container_does_not_exist(mock_docker, setup_course_environ):
"""
Does the should_setup method return True when the container not was found?
"""
course = Course(org='org1', course_id='example', domain='example.com')
def _container_not_exists(name):
raise NotFound(f'container: {name} not exists')
mock_docker.get.side_effect = lambda name: _container_not_exists(name)
assert course.should_setup() is True
| 5,347,131 |
def str_to_timedelta(td_str):
"""Parses a human-readable time delta string to a timedelta"""
if "d" in td_str:
day_str, time_str = td_str.split("d", 1)
d = int(day_str.strip())
else:
time_str = td_str
d = 0
time_str = time_str.strip()
if not time_str:
return datetime.timedelta(days=d)
colon_count = time_str.count(":")
if (not colon_count) or colon_count > 2:
raise ValueError("Time format [dd d] hh:mm[:ss] or dd d")
elif colon_count == 1:
h_str, m_str = time_str.split(":", 1)
h, m, s = int(h_str.strip()), int(m_str.strip()), 0
elif colon_count == 2:
h_str, m_str, s_str = time_str.split(":", 2)
h, m, s = int(h_str.strip()), int(m_str.strip()), int(s_str.strip())
return tuple_to_timedelta((d, h, m, s))
| 5,347,132 |
def check_deletion_key_pair(key_filter: List[Dict], aws_region: str) -> None:
"""Verify Deletion of a key pair by filter."""
ec2_resource = boto3.resource("ec2", region_name=aws_region)
key_pairs_list = ec2_resource.key_pairs.filter(**key_filter)
assert not list(key_pairs_list)
| 5,347,133 |
def filter_safe_enter(s):
"""正文 换行替换"""
return '<p>' + cgi.escape(s).replace("\n", "</p><p>") + '</p>'
| 5,347,134 |
def fig2png(filename, title, rat, begin, end):
"""
Args:
filename:
title:
rat:
begin:
end:
"""
raise NotImplemented
matfile = loadmat(filename, squeeze_me=True, struct_as_record=False)
print(matfile)
data = np.array(matfile['LMG_muscle']).T
for i, k in enumerate(data):
plt.plot(np.arange(len(k)) * 0.25, k + i)
plt.show()
raise Exception
ax1 = d['hgS_070000'].children
if np.size(ax1) > 1:
ax1 = ax1[0]
plt.figure(figsize=(16, 9))
yticks = []
plt.suptitle(f"{title} [{begin} : {end}] \n {rat}")
y_data = []
proper_index = 0
for i, line in enumerate(ax1.children, 1):
if line.type == 'graph2d.lineseries':
if begin <= i <= end:
x = line.properties.XData
y = line.properties.YData #- 3 * proper_index
y_data += list(y)
proper_index += 1
plt.plot(x, y)
if line.type == 'text':
break
plt.plot()
Y = np.array(y_data)
centered = center_data_by_line(Y)
normalized_data = normalization(centered, save_centering=True)
slice_length = int(1000 / 40 / 0.1)
slice_number = len(normalized_data) // slice_length
sliced = np.split(normalized_data, slice_number)
with open(f"{fold}/{new_filename}", 'w') as file:
for d in sliced:
file.write(" ".join(map(str, d)) + "\n")
return
raise Exception
# plt.xlim(ax1.properties.XLim)
plt.yticks(yticks, range(1, len(yticks) + 1))
folder = "/home/alex/bio_data_png"
# title_for_file = '_'.join(title.split())
plt.tight_layout()
plt.show()
#plt.savefig(f"{folder}/{title_for_file}_{rat.replace('.fig', '')}.png", format="png", dpi=200)
plt.close()
| 5,347,135 |
def _full_rank(X, cmax=1e15):
"""
This function possibly adds a scalar matrix to X
to guarantee that the condition number is smaller than a given threshold.
Parameters
----------
X: array of shape(nrows, ncols)
cmax=1.e-15, float tolerance for condition number
Returns
-------
X: array of shape(nrows, ncols) after regularization
cmax=1.e-15, float tolerance for condition number
"""
U, s, V = np.linalg.svd(X, 0)
smax, smin = s.max(), s.min()
c = smax / smin
if c < cmax:
return X, c
warn('Matrix is singular at working precision, regularizing...')
lda = (smax - cmax * smin) / (cmax - 1)
s = s + lda
X = np.dot(U, np.dot(np.diag(s), V))
return X, cmax
| 5,347,136 |
def create(
session: Session,
instance: Instance,
name: str,
description: Optional[str] = None,
external_id: Optional[str] = None,
unified_dataset_name: Optional[str] = None,
) -> Project:
"""Create a Mastering project in Tamr.
Args:
instance: Tamr instance
name: Project name
description: Project description
external_id: External ID of the project
unified_dataset_name: Unified dataset name. If None, will be set to project name + _'unified_dataset'
Returns:
Project created in Tamr
Raises:
project.AlreadyExists: If a project with these specifications already exists.
requests.HTTPError: If any other HTTP error is encountered.
"""
return project._create(
session=session,
instance=instance,
name=name,
project_type="DEDUP",
description=description,
external_id=external_id,
unified_dataset_name=unified_dataset_name,
)
| 5,347,137 |
def main(argv):
"""
Solr Core loader
:param list argv: the list elements should be:
[1]: source IMPC parquet file
[2]: Output Path
"""
stats_results_parquet_path = argv[1]
ontology_parquet_path = argv[2]
output_path = argv[3]
spark = SparkSession.builder.getOrCreate()
stats_results_df = spark.read.parquet(stats_results_parquet_path)
ontology_df = spark.read.parquet(ontology_parquet_path)
genotype_phenotype_df = stats_results_df.where(col("significant") == True).select(
GENOTYPE_PHENOTYPE_COLUMNS + STATS_RESULTS_COLUMNS
)
genotype_phenotype_df = genotype_phenotype_df.withColumn(
"mp_term", explode_outer("full_mp_term")
)
genotype_phenotype_df = genotype_phenotype_df.withColumn("sex", col("mp_term.sex"))
genotype_phenotype_df = genotype_phenotype_df.withColumn(
"mp_term_id", col("mp_term.term_id")
)
genotype_phenotype_df = genotype_phenotype_df.withColumn(
"mp_term_id", regexp_replace("mp_term_id", " ", "")
)
for bad_mp in BAD_MP_MAP.keys():
genotype_phenotype_df = genotype_phenotype_df.withColumn(
"mp_term_id",
when(col("mp_term_id") == bad_mp, lit(BAD_MP_MAP[bad_mp])).otherwise(
col("mp_term_id")
),
)
genotype_phenotype_df = genotype_phenotype_df.join(
ontology_df, col("mp_term_id") == col("id"), "left_outer"
)
for column_name, ontology_column in ONTOLOGY_STATS_MAP.items():
genotype_phenotype_df = genotype_phenotype_df.withColumn(
f"{column_name}", col(ontology_column)
)
genotype_phenotype_df = genotype_phenotype_df.withColumn(
"p_value",
when(
col("statistical_method").isin(["Manual", "Supplied as data"]),
col("p_value"),
)
.when(
col("statistical_method").contains("Reference Range Plus"),
when(
col("sex") == "male",
least(
col("male_pvalue_low_vs_normal_high"),
col("male_pvalue_low_normal_vs_high"),
),
)
.when(
col("sex") == "female",
least(
col("female_pvalue_low_vs_normal_high"),
col("female_pvalue_low_normal_vs_high"),
),
)
.otherwise(
least(
"genotype_pvalue_low_normal_vs_high",
"genotype_pvalue_low_vs_normal_high",
)
),
)
.otherwise(
when(col("sex") == "male", col("male_ko_effect_p_value"))
.when(col("sex") == "female", col("female_ko_effect_p_value"))
.otherwise(
when(
col("statistical_method").contains("Fisher Exact Test framework"),
col("p_value"),
).otherwise(col("genotype_effect_p_value"))
)
),
)
genotype_phenotype_df = genotype_phenotype_df.withColumn(
"effect_size",
when(col("statistical_method").isin(["Manual", "Supplied as data"]), lit(1.0))
.when(
~col("statistical_method").contains("Reference Range Plus"),
when(col("sex") == "male", col("male_effect_size"))
.when(col("sex") == "female", col("female_effect_size"))
.otherwise(col("effect_size")),
)
.otherwise(
when(
col("sex") == "male",
when(
col("male_effect_size_low_vs_normal_high")
<= col("male_effect_size_low_normal_vs_high"),
col("genotype_effect_size_low_vs_normal_high"),
).otherwise(col("genotype_effect_size_low_normal_vs_high")),
)
.when(
col("sex") == "female",
when(
col("female_effect_size_low_vs_normal_high")
<= col("female_effect_size_low_normal_vs_high"),
col("genotype_effect_size_low_vs_normal_high"),
).otherwise(col("genotype_effect_size_low_normal_vs_high")),
)
.otherwise(col("effect_size"))
),
)
genotype_phenotype_df = genotype_phenotype_df.withColumn(
"percentage_change",
when(col("sex") == "male", col("male_percentage_change"))
.when(col("sex") == "female", col("female_percentage_change"))
.otherwise(col("percentage_change")),
)
genotype_phenotype_df = genotype_phenotype_df.withColumn(
"assertion_type_id",
when(
col("statistical_method").isin(["Manual", "Supplied as data"]),
lit("ECO:0000218"),
).otherwise(lit("ECO:0000203")),
)
genotype_phenotype_df = genotype_phenotype_df.withColumn(
"assertion_type",
when(
col("statistical_method").isin(["Manual", "Supplied as data"]),
lit("manual"),
).otherwise(lit("automatic")),
)
genotype_phenotype_df = genotype_phenotype_df.select(
GENOTYPE_PHENOTYPE_COLUMNS
+ list(ONTOLOGY_STATS_MAP.keys())
+ ["assertion_type_id", "assertion_type"]
)
genotype_phenotype_df = genotype_phenotype_df.withColumn(
"doc_id", monotonically_increasing_id().astype(StringType())
)
ontology_field_prefixes = ["mpath_", "anatomy_"]
for prefix in ontology_field_prefixes:
for col_name in genotype_phenotype_df.columns:
if prefix in col_name:
genotype_phenotype_df = genotype_phenotype_df.withColumn(
col_name,
when(
col(col_name).isNotNull(), col(col_name.replace(prefix, "mp_"))
).otherwise(col(col_name)),
)
genotype_phenotype_df.distinct().write.parquet(output_path)
| 5,347,138 |
def users_with_pending_lab(connection, **kwargs):
"""Define comma seperated emails in scope
if you want to work on a subset of all the results"""
check = CheckResult(connection, 'users_with_pending_lab')
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
check.action = 'finalize_user_pending_labs'
check.full_output = []
check.status = 'PASS'
cached_items = {} # store labs/PIs for performance
mismatch_users = []
# do not look for deleted/replaced users
scope = kwargs.get('scope')
search_q = '/search/?type=User&pending_lab!=No+value&frame=object'
# want to see all results or a subset defined by the scope
if scope == 'all':
pass
else:
emails = [mail.strip() for mail in scope.split(',')]
for an_email in emails:
search_q += '&email=' + an_email
search_res = ff_utils.search_metadata(search_q, key=connection.ff_keys)
for res in search_res:
user_fields = ['uuid', 'email', 'pending_lab', 'lab', 'title', 'job_title']
user_append = {k: res.get(k) for k in user_fields}
check.full_output.append(user_append)
# Fail if we have a pending lab and lab that do not match
if user_append['lab'] and user_append['pending_lab'] != user_append['lab']:
check.status = 'FAIL'
mismatch_users.append(user_append['uuid'])
continue
# cache the lab and PI contact info
if user_append['pending_lab'] not in cached_items:
to_cache = {}
pending_meta = ff_utils.get_metadata(user_append['pending_lab'], key=connection.ff_keys,
add_on='frame=object')
to_cache['lab_title'] = pending_meta['display_title']
if 'pi' in pending_meta:
pi_meta = ff_utils.get_metadata(pending_meta['pi'], key=connection.ff_keys,
add_on='frame=object')
to_cache['lab_PI_email'] = pi_meta['email']
to_cache['lab_PI_title'] = pi_meta['title']
to_cache['lab_PI_viewing_groups'] = pi_meta['viewing_groups']
cached_items[user_append['pending_lab']] = to_cache
# now use the cache to fill fields
for lab_field in ['lab_title', 'lab_PI_email', 'lab_PI_title', 'lab_PI_viewing_groups']:
user_append[lab_field] = cached_items[user_append['pending_lab']].get(lab_field)
if check.full_output:
check.summary = 'Users found with pending_lab.'
if check.status == 'PASS':
check.status = 'WARN'
check.description = check.summary + ' Run the action to add lab and remove pending_lab'
check.allow_action = True
check.action_message = 'Will attempt to patch lab and remove pending_lab for %s users' % len(check.full_output)
if check.status == 'FAIL':
check.summary += '. Mismatches found for pending_lab and existing lab'
check.description = check.summary + '. Resolve conflicts for mismatching users before running action. See brief_output'
check.brief_output = mismatch_users
else:
check.summary = 'No users found with pending_lab'
return check
| 5,347,139 |
def test_check_family_naming_recommendations():
""" Font follows the family naming recommendations ? """
from fontbakery.profiles.name import com_google_fonts_check_family_naming_recommendations as check
# Our reference Mada Medium is known to be good
ttFont = TTFont(TEST_FILE("mada/Mada-Medium.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# We'll test rule violations in all entries one-by-one
for index, name in enumerate(ttFont["name"].names):
# and we'll test all INFO/PASS code-paths for each of the rules:
def name_test(value, expected, keyword=None):
assert_name_table_check_result(ttFont, index, name, check, value, expected, keyword) #pylint: disable=cell-var-from-loop
if name.nameID == NameID.POSTSCRIPT_NAME:
print ("== NameID.POST_SCRIPT_NAME ==")
print ("Test INFO: May contain only a-zA-Z0-9 characters and an hyphen...")
# The '@' and '!' chars here are the expected rule violations:
name_test("B@zinga!", INFO, "bad-entries")
print ("Test PASS: A name with a single hyphen is OK...")
# A single hypen in the name is OK:
name_test("Big-Bang", PASS)
print ("Test INFO: May not contain more than a single hyphen...")
# The second hyphen char here is the expected rule violation:
name_test("Big-Bang-Theory", INFO, "bad-entries")
print ("Test INFO: Exceeds max length (63)...")
name_test("A"*64, INFO, "bad-entries")
print ("Test PASS: Does not exceed max length...")
name_test("A"*63, PASS)
elif name.nameID == NameID.FULL_FONT_NAME:
print ("== NameID.FULL_FONT_NAME ==")
print ("Test INFO: Exceeds max length (63)...")
name_test("A"*64, INFO, "bad-entries")
print ("Test PASS: Does not exceeds max length...")
name_test("A"*63, PASS)
elif name.nameID == NameID.FONT_FAMILY_NAME:
print ("== NameID.FONT_FAMILY_NAME ==")
print ("Test INFO: Exceeds max length (31)...")
name_test("A"*32, INFO, "bad-entries")
print ("Test PASS: Does not exceeds max length...")
name_test("A"*31, PASS)
elif name.nameID == NameID.FONT_SUBFAMILY_NAME:
print ("== NameID.FONT_SUBFAMILY_NAME ==")
print ("Test INFO: Exceeds max length (31)...")
name_test("A"*32, INFO, "bad-entries")
print ("Test PASS: Does not exceeds max length...")
name_test("A"*31, PASS)
elif name.nameID == NameID.TYPOGRAPHIC_FAMILY_NAME:
print ("== NameID.TYPOGRAPHIC_FAMILY_NAME ==")
print ("Test INFO: Exceeds max length (31)...")
name_test("A"*32, INFO, "bad-entries")
print ("Test PASS: Does not exceeds max length...")
name_test("A"*31, PASS)
elif name.nameID == NameID.TYPOGRAPHIC_SUBFAMILY_NAME:
print ("== NameID.FONT_TYPOGRAPHIC_SUBFAMILY_NAME ==")
print ("Test INFO: Exceeds max length (31)...")
name_test("A"*32, INFO, "bad-entries")
print ("Test PASS: Does not exceeds max length...")
name_test("A"*31, PASS)
| 5,347,140 |
def hpat_pandas_series_lt(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.lt` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method lt().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_lt_impl(self, other):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data < other._data)
return hpat_pandas_series_lt_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_lt_impl(self, other):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data < other)
return hpat_pandas_series_lt_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
| 5,347,141 |
def convert_kv(
key: str,
val: str | numbers.Number,
attr_type: bool,
attr: dict[str, Any] = {},
cdata: bool = False,
) -> str:
"""Converts a number or string into an XML element"""
if DEBUGMODE: # pragma: no cover
LOG.info(
f'Inside convert_kv(): key="{str(key)}", val="{str(val)}", type(val) is: "{type(val).__name__}"'
)
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr["type"] = get_xml_type(val)
attrstring = make_attrstring(attr)
return f"<{key}{attrstring}>{wrap_cdata(val) if cdata else escape_xml(val)}</{key}>"
| 5,347,142 |
def getBanner(host, port):
"""
Connects to host:port and returns the banner.
"""
try:
s = socket.socket()
s.connect((host, port))
banner = s.recv(1024)
return str(banner).strip()
except Exception, e:
error(str(host) + ':' + str(port) + ' ' + str(e))
| 5,347,143 |
def create_cmd_table(table_data: List[List[str]], width: int = 15) -> BorderedTable:
"""Create a bordered table for cmd2 output.
Args:
table_data: list of lists with the string data to display
width: integer width of the columns. Default is 15 which generally works for ~4 columns
Returns:
BorderedTable: generated table for printing
"""
columns = table_data[0]
auto_column = partial(Column, width=width)
bt = BorderedTable([*map(auto_column, columns)])
rows = table_data[1:]
return bt.generate_table(rows)
| 5,347,144 |
def main():
"""Apply different types of convolutions to an image.
"""
# construct the argument parse and parse the arguments
args = argparse.ArgumentParser()
args.add_argument("-i", "--image", required=True, help="path to the input image")
args = vars(args.parse_args())
# construct average blurring kernels used to smooth an image
small_blur = np.ones((7, 7), dtype="float") * (1.0 / (7 * 7))
large_blur = np.ones((21, 21), dtype="float") * (1.0 / (21 * 21))
# construct a sharpening filter
sharpen = np.array(([0, -1, 0], [-1, 5, -1], [0, -1, 0]), dtype="int")
# construct the Laplacian kernel used to detect edge-like regions of an image
laplacian = np.array(([0, 1, 0], [1, -4, 1], [0, 1, 0]), dtype="int")
# construct the Sobel x-axis kernel
sobel_x = np.array(([-1, 0, 1], [-2, 0, 2], [-1, 0, 1]), dtype="int")
# construct the Sobel y-axis kernel
sobel_y = np.array(([-1, -2, -1], [0, 0, 0], [1, 2, 1]), dtype="int")
# construct an emboss kernel
emboss = np.array(([-2, -1, 0], [-1, 1, 1], [0, 1, 2]), dtype="int")
# construct the kernel bank, a list of kernels we're going to apply using
# both our custom `convolve` function and OpenCV's `filter2D` function
kernel_bank = (
("small_blur", small_blur),
("large_blur", large_blur),
("sharpen", sharpen),
("laplacian", laplacian),
("sobel_x", sobel_x),
("sobel_y", sobel_y),
("emboss", emboss),
)
# load the input image and convert it to grayscale
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# loop over the kernels
for (kernel_name, kernel) in kernel_bank:
# apply the kernel to the grayscale image using both our custom
# `convolve` function and OpenCV's `filter2D` function
print("[INFO] applying {} kernel".format(kernel_name))
convolve_output = convolve(gray, kernel)
opencv_output = cv2.filter2D(gray, -1, kernel)
# show the output images
cv2.imshow("Original", gray)
cv2.imshow("{} - convolve".format(kernel_name), convolve_output)
cv2.imshow("{} - opencv".format(kernel_name), opencv_output)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 5,347,145 |
def gen_log_files():
"""Generate log files"""
for root, _, files in os.walk(DEFAULT_LOG_DIR):
for name in files:
if is_pref_file(name):
yield os.path.join(root, name)
| 5,347,146 |
def get_parser(dataset_name):
"""Returns a csv line parser function for the given dataset."""
def inat_parser(line, is_train=True):
if is_train:
user_id, image_id, class_id, _ = line
return user_id, image_id, class_id
else:
image_id, class_id, _ = line
return image_id, class_id
def landmarks_parser(line, is_train=True):
if is_train:
user_id, image_id, class_id = line
return user_id, image_id, class_id
else:
image_id, class_id = line
return image_id, class_id
parsers = {
'inat': inat_parser,
'landmarks': landmarks_parser,
'cifar': landmarks_parser # landmarks and cifar uses the same parser.
}
return parsers[dataset_name]
| 5,347,147 |
def read_all_bdds(project_path):
"""
Read all feature files from project
:param project_path: base path of the project
:return: Array of Feature objects
"""
features = []
for root, _, files in os.walk(project_path + '/features/'):
for file in files:
if file.endswith(".feature"):
file_path = os.path.join(root, file)
feature = read_feature(file_path)
features.append(feature)
return features
| 5,347,148 |
def one_sentence_to_ids(sentence, sentence_length=SENTENCE_LENGTH):
"""Convert one sentence to a list of word IDs."
Crop or pad to 0 the sentences to ensure equal length if necessary.
Words without ID are assigned ID 1.
>>> one_sentence_to_ids(['my','first','sentence'], 2)
([11095, 121], 2)
>>> one_sentence_to_ids(['my','ssecond','sentence'], 2)
([11095, 1], 2)
>>> one_sentence_to_ids(['yes'], 2)
([21402, 0], 1)
"""
vectordb = sqlite3.connect(DB)
c = vectordb.cursor()
word_ids = []
for w in sentence:
if len(word_ids) >= sentence_length:
break
c.execute("""SELECT word_index, word
FROM vectors
INDEXED BY word_idx
WHERE word=?""", (w, ))
r = c.fetchall()
if len(r) > 0:
word_ids.append(r[0][0])
else:
word_ids.append(1)
# Pad with zeros if necessary
num_words = len(word_ids)
if num_words < sentence_length:
word_ids += [0]*(sentence_length-num_words)
vectordb.close()
return word_ids, num_words
| 5,347,149 |
def main() -> NoReturn:
"""
Command-line processor. See ``--help`` for details.
"""
logging.basicConfig()
log.setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(
description="Send an e-mail from the command line.")
parser.add_argument("sender", action="store",
help="Sender's e-mail address")
parser.add_argument("host", action="store",
help="SMTP server hostname")
parser.add_argument("user", action="store",
help="SMTP username")
parser.add_argument("password", action="store",
help="SMTP password")
parser.add_argument("recipient", action="append",
help="Recipient e-mail address(es)")
parser.add_argument("subject", action="store",
help="Message subject")
parser.add_argument("body", action="store",
help="Message body")
parser.add_argument("--attach", nargs="*",
help="Filename(s) to attach")
parser.add_argument("--tls", action="store_false",
help="Use TLS connection security")
parser.add_argument("--verbose", action="store_true",
help="Be verbose")
parser.add_argument("-h --help", action="help",
help="Prints this help")
args = parser.parse_args()
(result, msg) = send_email(
from_addr=args.sender,
to=args.recipient,
subject=args.subject,
body=args.body,
host=args.host,
user=args.user,
password=args.password,
use_tls=args.tls,
attachment_filenames=args.attach,
verbose=args.verbose,
)
if result:
log.info("Success")
else:
log.info("Failure")
# log.error(msg)
sys.exit(0 if result else 1)
| 5,347,150 |
def full_class_name(class_):
"""
Returns the absolute name of a class, with all nesting namespaces included
"""
return '::'.join(get_scope(class_) + [class_.name])
| 5,347,151 |
def mul_pdf(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var, scale_factor).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF. `scale_factor` provides this proportionality constant
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
scale_factor : scalar
proportionality constant
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1. / (1./var1 + 1./var2)
S = math.exp(-(mean1 - mean2)**2 / (2*(var1 + var2))) / \
math.sqrt(2 * math.pi * (var1 + var2))
return mean, var, S
| 5,347,152 |
def mean_squared_error(y_true, y_pred):
"""
Mean squared error loss.
:param y_true: groundtruth.
:param y_pred: prediction.
:return: loss symbolic value.
"""
P = norm_saliency(y_pred) # Normalized to sum = 1
Q = norm_saliency(y_true) # Normalized to sum = 1
return K.mean(K.square(P - Q))
| 5,347,153 |
def format_epilog():
"""Program entry point.
:param argv: command-line arguments
:type argv: :class:`list`
"""
author_strings = []
for name, email in zip(metadata.authors, metadata.emails):
author_strings.append('Author: {0} <{1}>'.format(name, email))
epilog = '''
{project} {version}
{authors}
URL: <{url}>
'''.format(
project=metadata.project,
version=metadata.version,
authors='\n'.join(author_strings),
url=metadata.url)
return epilog
| 5,347,154 |
def _get_process_environment(process, proxy):
"""Get env to be used by the script process.
This env must at the very least contain the proxy url, and a PATH
allowing bash scripts to use `ctx`, which is expected to live next to
the current executable.
"""
env = os.environ.copy()
env.setdefault('TMPDIR', get_exec_tempdir())
process_env = process.get('env', {})
env.update(process_env)
env[CTX_SOCKET_URL] = proxy.socket_url
env_path = env.get('PATH')
bin_dir = os.path.dirname(sys.executable)
if env_path:
if bin_dir not in env_path.split(os.pathsep):
env['PATH'] = os.pathsep.join([env_path, bin_dir])
else:
env['PATH'] = bin_dir
return env
| 5,347,155 |
def text_differences(original_text: List[str], new_text_version: List[str]) -> TextDifferences:
"""
Builds text differences from input texts.
Parameters
----------
original_text: List[str]
original text (as a list of lines)
new_text_version: List[str]
new text version (as a list of lines)
Returns
-------
text_differences: TextDifferences
TextDifferences object built on top of diffline output
"""
diffs = list(difflib.Differ().compare(_cleanup_text(original_text), _cleanup_text(new_text_version)))
return TextDifferences(_build_difflines(diffs))
| 5,347,156 |
def find_install_requires():
"""Return a list of dependencies and non-pypi dependency links.
A supported version of tensorflow and/or tensorflow-gpu is required. If not
found, then tensorflow is added to the install_requires list.
Depending on the version of tensorflow found or installed, either
keras-contrib or tensorflow-addons needs to be installed as well.
"""
install_requires = [
'sktime==0.7.0',
'h5py>=2.8.0',
'matplotlib',
'seaborn',
'keras-self-attention'
]
# tensorflow version requirements
# by default, make sure anything already installed is above 1.8.0,
# or if installing from new get the most recent stable (i.e. not
# nightly) version
MINIMUM_TF_VERSION = '2.0.0'
tf_requires = 'tensorflow>=' + MINIMUM_TF_VERSION
has_tf_gpu = False
has_tf = False
tf = working_set.find(Requirement.parse('tensorflow'))
tf_gpu = working_set.find(Requirement.parse('tensorflow-gpu'))
if tf is not None:
has_tf = True
tf_version = tf._version
if tf_gpu is not None:
has_tf_gpu = True
tf_gpu_version = tf_gpu._version
if has_tf_gpu and not has_tf:
# have -gpu only (1.x), make sure it's above 1.9.0
# Specify tensorflow-gpu version if it is already installed.
tf_requires = 'tensorflow-gpu>=' + MINIMUM_TF_VERSION
install_requires.append(tf_requires)
# tensorflow itself handled, now find out what add-on package to use
if (not has_tf and not has_tf_gpu) or (has_tf and tf_version >= '2.1.0'):
# tensorflow will be up-to-date enough to use most recent
# tensorflow-addons, the replacement for keras-contrib
install_requires.append('tensorflow-addons')
else:
# fall back to keras-contrib, not on pypi so need to install it
# separately not printing. TODO
print(
'Existing version of tensorflow older than version 2.1.0 '
'detected. You shall need to install keras-contrib (for tf.keras) '
'in order to use all the features of sktime-dl. '
'See https://github.com/keras-team/keras-contrib#install-keras_contrib-for-tensorflowkeras')
return install_requires
| 5,347,157 |
def test_get_valid_coupon_versions_over_redeemed(basket_and_coupons, order_status):
"""
Verify that CouponPaymentVersions that have exceeded redemption limits are not returned
"""
with unprotect_version_tables():
civ_worst = basket_and_coupons.coupongroup_worst.coupon_version.payment_version
civ_worst.max_redemptions = 1
civ_worst.save()
CouponRedemptionFactory(
coupon_version=basket_and_coupons.coupongroup_worst.coupon_version,
order=OrderFactory(status=order_status),
)
civ_best = basket_and_coupons.coupongroup_best.coupon_version.payment_version
civ_best.max_redemptions_per_user = 1
civ_best.save()
CouponRedemptionFactory(
coupon_version=basket_and_coupons.coupongroup_best.coupon_version,
order=OrderFactory(
purchaser=basket_and_coupons.basket_item.basket.user,
status=order_status,
),
)
best_versions = list(
get_valid_coupon_versions(
basket_and_coupons.basket_item.product,
basket_and_coupons.basket_item.basket.user,
)
)
if order_status in (Order.FULFILLED, Order.REFUNDED):
assert best_versions == []
else:
assert best_versions == [
basket_and_coupons.coupongroup_best.coupon_version,
basket_and_coupons.coupongroup_worst.coupon_version,
]
| 5,347,158 |
def plot_rms(data, POL='xx'):
"""Plots rms values at input to ADC's by looking at the autos.
Args:
data (dict): Dictionary of auto's.
POL (str): String of polarization."""
fig = plt.figure(figsize=(20, 12))
CHUNK = 256
BINS = 24
rmscolors = 'bgrcmy'
ants = sorted([i for (i, pp) in data if pp == POL])
Nrow = np.ceil(np.sqrt(len(ants)))
Mcol = np.ceil(len(ants) / float(Nrow))
bins = np.logspace(-2, 4, BINS, base=2.)
for cnt, ant in enumerate(ants):
ax = plt.subplot(Nrow, Mcol, cnt + 1)
for ch in xrange(0, 1024, CHUNK):
dchunk = data[ant, POL][ch:ch + CHUNK].flatten()
hist, hbins = np.histogram(np.sqrt(dchunk / 2), bins)
hist = 10**np.log10(hist + .1)
hbins = 0.5 * (hbins[1:] + hbins[:-1])
ax.fill_between(np.log2(bins), hist, .1, where=hist > .1, color=rmscolors[j], alpha=.5)
bounds = np.where(bins < 2**0, dchunk.size, np.where(bins > 2**2, dchunk.size, 0))
ax.fill_between(np.log2(bins), bounds, .1, where=bounds > .1, color='black', alpha=.6)
ax.set_yscale('log')
plt.xlim(-2, 3)
plt.ylim(d.size / 1e2, d.size)
if ant in ants_connected:
lcolor = 'r'
else:
lcolor = 'b'
plt.text(0.5, 0.8, str(ant), fontsize=12, transform=ax.transAxes, color=lcolor)
ax.get_yaxis().set_visible(False)
if cnt < (Nrow - 1) * Mcol:
ax.get_xaxis().set_ticklabels([])
else:
plt.xlabel(r'$V_{\rm rms}$ [bits]')
plt.grid()
fig.suptitle(str(latest.datetime) + ' UTC' + ' {0}'.format(POL) + '; JD=' + str(latest.jd))
plt.tight_layout(rect=(0, 0, 1, .95))
| 5,347,159 |
def remote_fixture():
"""Patch the samsungctl Remote."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote"
) as remote_class, patch(
"homeassistant.components.samsungtv.config_flow.socket"
) as socket_class:
remote = mock.Mock()
remote.__enter__ = mock.Mock()
remote.__exit__ = mock.Mock()
remote_class.return_value = remote
socket = mock.Mock()
socket_class.return_value = socket
yield remote
| 5,347,160 |
async def delete_port(request : VueRequest):
"""
删除端口的接口
:param:
:return: str response: 需要返回的数据
"""
try:
response = {'code': '', 'message': '', 'data': ''}
request = rsa_crypto.decrypt(request.data)
request = json.loads(request)
target = request['target']
scan_ip = request['scan_ip']
port = request['port']
token = request['token']
query_str = {
'type': 'token',
'data': token
}
username_result = mysqldb.username(query_str)
if username_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
elif username_result == None:
response['code'] = 'L1003'
response['message'] = '认证失败'
return response
else:
delete_result = mysqldb.delete_port(username_result['username'], target, scan_ip, port)
if delete_result == 'L1000':
response['code'] = 'L1000'
response['message'] = '请求成功'
elif delete_result == 'L1001':
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
except Exception as e:
print(e)
response['code'] = 'L1001'
response['message'] = '系统异常'
return response
| 5,347,161 |
def get_start_indices_labesl(waveform_length, start_times, end_times):
"""
Returns: a waveform_length size boolean array where the ith entry says wheter or not a frame starting from the ith
sample is covered by an event
"""
label = np.zeros(waveform_length)
for start, end in zip(start_times, end_times):
event_first_start_index = int(start * cfg.working_sample_rate - cfg.frame_size * (1 - cfg.min_event_percentage_in_positive_frame))
event_last_start_index = int(end * cfg.working_sample_rate - cfg.frame_size * cfg.min_event_percentage_in_positive_frame)
label[event_first_start_index: event_last_start_index] = 1
return label
| 5,347,162 |
def get_function_euler_and_module():
"""
Function return tuple with value function euler and module.
This tuple is namedtuple and we can use it this way:
euler = tuple.euler
module = tuple.module
Thanks to this, we will not be mistaken.
"""
first_number_prime, second_number_prime = get_two_numbers_prime()
results = euler_and_module(
euler=(first_number_prime - 1) * (second_number_prime - 1),
module=first_number_prime * second_number_prime
)
return results
| 5,347,163 |
def _read_host_df(host, seq=True):
"""Reads the metrics data for the host and returns a DataFrame.
Args:
host (str): Hostname, one of wally113, wally117, wally122, wally123,
wally124
seq (bool): If sequential or concurrent metrics should be read
Returns:
DataFrame: Containing all the metrics as columns
"""
filepath = ''
if seq:
filepath = '%s/interim/sequential_data/metrics/%s_metrics.csv' % (DATA_DIR, host)
else:
filepath = '%s/interim/concurrent_data/metrics/%s_metrics_concurrent.csv' % (DATA_DIR, host)
metrics_df = pd.read_csv(
filepath,
dtype={'now': str, 'load.cpucore': np.float64, 'load.min1': np.float64,
'load.min5': np.float64, 'load.min15': np.float64,
'mem.used': np.float64})
metrics_df['now'] = pd.to_datetime(metrics_df['now'])
metrics_df = metrics_df.set_index('now')
metrics_df = metrics_df.add_prefix('%s.' % host)
return metrics_df.pivot_table(metrics_df, index=['now'], aggfunc='mean')
| 5,347,164 |
def compacify(train_seq, test_seq, dev_seq, theano=False):
"""
Create a map for indices that is be compact (do not have unused indices)
"""
# REDO DICTS
new_x_dict = LabelDictionary()
new_y_dict = LabelDictionary(['noun'])
for corpus_seq in [train_seq, test_seq, dev_seq]:
for seq in corpus_seq:
for index in seq.x:
word = corpus_seq.x_dict.get_label_name(index)
if word not in new_x_dict:
new_x_dict.add(word)
for index in seq.y:
tag = corpus_seq.y_dict.get_label_name(index)
if tag not in new_y_dict:
new_y_dict.add(tag)
# REDO INDICES
# for corpus_seq in [train_seq2, test_seq2, dev_seq2]:
for corpus_seq in [train_seq, test_seq, dev_seq]:
for seq in corpus_seq:
for i in seq.x:
if corpus_seq.x_dict.get_label_name(i) not in new_x_dict:
pass
for i in seq.y:
if corpus_seq.y_dict.get_label_name(i) not in new_y_dict:
pass
seq.x = [new_x_dict[corpus_seq.x_dict.get_label_name(i)] for i in seq.x]
seq.y = [new_y_dict[corpus_seq.y_dict.get_label_name(i)] for i in seq.y]
# For compatibility with GPUs store as numpy arrays and cats to int
# 32
if theano:
seq.x = np.array(seq.x, dtype='int32')
seq.y = np.array(seq.y, dtype='int32')
# Reinstate new dicts
corpus_seq.x_dict = new_x_dict
corpus_seq.y_dict = new_y_dict
# Add reverse indices
corpus_seq.word_dict = {v: k for k, v in new_x_dict.items()}
corpus_seq.tag_dict = {v: k for k, v in new_y_dict.items()}
# SANITY CHECK:
# These must be the same
# tmap = {v: k for k, v in train_seq.x_dict.items()}
# tmap2 = {v: k for k, v in train_seq2.x_dict.items()}
# [tmap[i] for i in train_seq[0].x]
# [tmap2[i] for i in train_seq2[0].x]
return train_seq, test_seq, dev_seq
| 5,347,165 |
def obfuscate_email(email):
"""Takes an email address and returns an obfuscated version of it.
For example: [email protected] would turn into t**t@e*********m
"""
if email is None:
return None
splitmail = email.split("@")
# If the prefix is 1 character, then we can't obfuscate it
if len(splitmail[0]) <= 1:
prefix = splitmail[0]
else:
prefix = f'{splitmail[0][0]}{"*"*(len(splitmail[0])-2)}{splitmail[0][-1]}'
# If the domain is missing or 1 character, then we can't obfuscate it
if len(splitmail) <= 1 or len(splitmail[1]) <= 1:
return f"{prefix}"
else:
domain = f'{splitmail[1][0]}{"*"*(len(splitmail[1])-2)}{splitmail[1][-1]}'
return f"{prefix}@{domain}"
| 5,347,166 |
def list_small_kernels():
"""Return list of small kernels to generate."""
kernels1d = [
NS(length= 1, threads_per_block= 64, threads_per_transform= 1, factors=(1,)),
NS(length= 2, threads_per_block= 64, threads_per_transform= 1, factors=(2,)),
NS(length= 3, threads_per_block= 64, threads_per_transform= 1, factors=(3,)),
NS(length= 4, threads_per_block=128, threads_per_transform= 1, factors=(4,)),
NS(length= 5, threads_per_block=128, threads_per_transform= 1, factors=(5,)),
NS(length= 6, threads_per_block=128, threads_per_transform= 1, factors=(6,)),
NS(length= 7, threads_per_block= 64, threads_per_transform= 1, factors=(7,)),
NS(length= 8, threads_per_block= 64, threads_per_transform= 4, factors=(4, 2)),
NS(length= 9, threads_per_block= 64, threads_per_transform= 3, factors=(3, 3)),
NS(length= 10, threads_per_block= 64, threads_per_transform= 1, factors=(10,)),
NS(length= 11, threads_per_block=128, threads_per_transform= 1, factors=(11,)),
NS(length= 12, threads_per_block=128, threads_per_transform= 6, factors=(6, 2)),
NS(length= 13, threads_per_block= 64, threads_per_transform= 1, factors=(13,)),
NS(length= 14, threads_per_block=128, threads_per_transform= 7, factors=(7, 2)),
NS(length= 15, threads_per_block=128, threads_per_transform= 5, factors=(3, 5)),
NS(length= 16, threads_per_block= 64, threads_per_transform= 4, factors=(4, 4)),
NS(length= 17, threads_per_block=256, threads_per_transform= 1, factors=(17,)),
NS(length= 18, threads_per_block= 64, threads_per_transform= 6, factors=(3, 6)),
NS(length= 20, threads_per_block=256, threads_per_transform= 10, factors=(5, 4)),
NS(length= 21, threads_per_block=128, threads_per_transform= 7, factors=(3, 7)),
NS(length= 22, threads_per_block= 64, threads_per_transform= 2, factors=(11, 2)),
NS(length= 24, threads_per_block=256, threads_per_transform= 8, factors=(8, 3)),
NS(length= 25, threads_per_block=256, threads_per_transform= 5, factors=(5, 5)),
NS(length= 26, threads_per_block= 64, threads_per_transform= 2, factors=(13, 2)),
NS(length= 27, threads_per_block=256, threads_per_transform= 9, factors=(3, 3, 3)),
NS(length= 28, threads_per_block= 64, threads_per_transform= 4, factors=(7, 4)),
NS(length= 30, threads_per_block=128, threads_per_transform= 10, factors=(10, 3)),
NS(length= 32, threads_per_block= 64, threads_per_transform= 16, factors=(16, 2)),
NS(length= 36, threads_per_block= 64, threads_per_transform= 6, factors=(6, 6)),
NS(length= 40, threads_per_block=128, threads_per_transform= 10, factors=(10, 4)),
NS(length= 42, threads_per_block=256, threads_per_transform= 7, factors=(7, 6)),
NS(length= 44, threads_per_block= 64, threads_per_transform= 4, factors=(11, 4)),
NS(length= 45, threads_per_block=128, threads_per_transform= 15, factors=(5, 3, 3)),
NS(length= 48, threads_per_block= 64, threads_per_transform= 16, factors=(4, 3, 4)),
NS(length= 49, threads_per_block= 64, threads_per_transform= 7, factors=(7, 7)),
NS(length= 50, threads_per_block=256, threads_per_transform= 10, factors=(10, 5)),
NS(length= 52, threads_per_block= 64, threads_per_transform= 4, factors=(13, 4)),
NS(length= 54, threads_per_block=256, threads_per_transform= 18, factors=(6, 3, 3)),
NS(length= 56, threads_per_block=128, threads_per_transform= 8, factors=(7, 8)),
NS(length= 60, threads_per_block= 64, threads_per_transform= 10, factors=(6, 10)),
NS(length= 64, threads_per_block= 64, threads_per_transform= 16, factors=(4, 4, 4)),
NS(length= 72, threads_per_block= 64, threads_per_transform= 9, factors=(8, 3, 3)),
NS(length= 75, threads_per_block=256, threads_per_transform= 25, factors=(5, 5, 3)),
NS(length= 80, threads_per_block= 64, threads_per_transform= 10, factors=(5, 2, 8)),
NS(length= 81, threads_per_block=128, threads_per_transform= 27, factors=(3, 3, 3, 3)),
NS(length= 84, threads_per_block=128, threads_per_transform= 12, factors=(7, 2, 6)),
NS(length= 88, threads_per_block=128, threads_per_transform= 11, factors=(11, 8)),
NS(length= 90, threads_per_block= 64, threads_per_transform= 9, factors=(3, 3, 10)),
NS(length= 96, threads_per_block=128, threads_per_transform= 16, factors=(6, 16)),
NS(length= 100, threads_per_block= 64, threads_per_transform= 10, factors=(10, 10)),
NS(length= 104, threads_per_block= 64, threads_per_transform= 8, factors=(13, 8)),
NS(length= 108, threads_per_block=256, threads_per_transform= 36, factors=(6, 6, 3)),
NS(length= 112, threads_per_block=256, threads_per_transform= 16, factors=(4, 7, 4), half_lds=False),
NS(length= 120, threads_per_block= 64, threads_per_transform= 12, factors=(6, 10, 2)),
NS(length= 121, threads_per_block=128, threads_per_transform= 11, factors=(11, 11)),
NS(length= 125, threads_per_block=256, threads_per_transform= 25, factors=(5, 5, 5), half_lds=False),
NS(length= 128, threads_per_block=256, threads_per_transform= 16, factors=(16, 8)),
NS(length= 135, threads_per_block=128, threads_per_transform= 9, factors=(5, 3, 3, 3)),
NS(length= 144, threads_per_block=128, threads_per_transform= 12, factors=(6, 6, 4)),
NS(length= 150, threads_per_block= 64, threads_per_transform= 5, factors=(10, 5, 3)),
NS(length= 160, threads_per_block=256, threads_per_transform= 16, factors=(16, 10)),
NS(length= 162, threads_per_block=256, threads_per_transform= 27, factors=(6, 3, 3, 3)),
NS(length= 168, threads_per_block=256, threads_per_transform= 56, factors=(8, 7, 3), half_lds=False),
NS(length= 169, threads_per_block=256, threads_per_transform= 13, factors=(13, 13)),
NS(length= 176, threads_per_block= 64, threads_per_transform= 16, factors=(11, 16)),
NS(length= 180, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 3), half_lds=False),
NS(length= 192, threads_per_block=128, threads_per_transform= 16, factors=(6, 4, 4, 2)),
NS(length= 200, threads_per_block= 64, threads_per_transform= 20, factors=(10, 10, 2)),
NS(length= 208, threads_per_block= 64, threads_per_transform= 16, factors=(13, 16)),
NS(length= 216, threads_per_block=256, threads_per_transform= 36, factors=(6, 6, 6)),
NS(length= 224, threads_per_block= 64, threads_per_transform= 16, factors=(7, 2, 2, 2, 2, 2)),
NS(length= 225, threads_per_block=256, threads_per_transform= 75, factors=(5, 5, 3, 3)),
NS(length= 240, threads_per_block=128, threads_per_transform= 48, factors=(8, 5, 6)),
NS(length= 243, threads_per_block=256, threads_per_transform= 81, factors=(3, 3, 3, 3, 3)),
NS(length= 250, threads_per_block=128, threads_per_transform= 25, factors=(10, 5, 5)),
NS(length= 256, threads_per_block= 64, threads_per_transform= 64, factors=(4, 4, 4, 4)),
NS(length= 270, threads_per_block=128, threads_per_transform= 27, factors=(10, 3, 3, 3)),
NS(length= 272, threads_per_block=128, threads_per_transform= 17, factors=(16, 17)),
NS(length= 288, threads_per_block=128, threads_per_transform= 24, factors=(6, 6, 4, 2)),
NS(length= 300, threads_per_block= 64, threads_per_transform= 30, factors=(10, 10, 3)),
NS(length= 320, threads_per_block= 64, threads_per_transform= 16, factors=(10, 4, 4, 2)),
NS(length= 324, threads_per_block= 64, threads_per_transform= 54, factors=(3, 6, 6, 3)),
NS(length= 336, threads_per_block=128, threads_per_transform= 56, factors=(8, 7, 6)),
NS(length= 343, threads_per_block=256, threads_per_transform= 49, factors=(7, 7, 7)),
NS(length= 360, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 6)),
NS(length= 375, threads_per_block=128, threads_per_transform= 25, factors=(5, 5, 5, 3)),
NS(length= 384, threads_per_block=128, threads_per_transform= 32, factors=(6, 4, 4, 4)),
NS(length= 400, threads_per_block=128, threads_per_transform= 40, factors=(4, 10, 10)),
NS(length= 405, threads_per_block=128, threads_per_transform= 27, factors=(5, 3, 3, 3, 3)),
NS(length= 432, threads_per_block= 64, threads_per_transform= 27, factors=(3, 16, 3, 3)),
NS(length= 450, threads_per_block=128, threads_per_transform= 30, factors=(10, 5, 3, 3)),
NS(length= 480, threads_per_block= 64, threads_per_transform= 16, factors=(10, 8, 6)),
NS(length= 486, threads_per_block=256, threads_per_transform=162, factors=(6, 3, 3, 3, 3)),
NS(length= 500, threads_per_block=128, threads_per_transform=100, factors=(10, 5, 10)),
NS(length= 512, threads_per_block= 64, threads_per_transform= 64, factors=(8, 8, 8)),
NS(length= 528, threads_per_block= 64, threads_per_transform= 48, factors=(4, 4, 3, 11)),
NS(length= 540, threads_per_block=256, threads_per_transform= 54, factors=(3, 10, 6, 3)),
NS(length= 576, threads_per_block=128, threads_per_transform= 96, factors=(16, 6, 6)),
NS(length= 600, threads_per_block= 64, threads_per_transform= 60, factors=(10, 6, 10)),
NS(length= 625, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 5)),
NS(length= 640, threads_per_block=128, threads_per_transform= 64, factors=(8, 10, 8)),
NS(length= 648, threads_per_block=256, threads_per_transform=216, factors=(8, 3, 3, 3, 3)),
NS(length= 675, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 3, 3, 3)),
NS(length= 720, threads_per_block=256, threads_per_transform=120, factors=(10, 3, 8, 3)),
NS(length= 729, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 3, 3)),
NS(length= 750, threads_per_block=256, threads_per_transform=250, factors=(10, 5, 3, 5)),
NS(length= 768, threads_per_block= 64, threads_per_transform= 48, factors=(16, 3, 16)),
NS(length= 800, threads_per_block=256, threads_per_transform=160, factors=(16, 5, 10)),
NS(length= 810, threads_per_block=128, threads_per_transform= 81, factors=(3, 10, 3, 3, 3)),
NS(length= 864, threads_per_block= 64, threads_per_transform= 54, factors=(3, 6, 16, 3)),
NS(length= 900, threads_per_block=256, threads_per_transform= 90, factors=(10, 10, 3, 3)),
NS(length= 960, threads_per_block=256, threads_per_transform=160, factors=(16, 10, 6), half_lds=False),
NS(length= 972, threads_per_block=256, threads_per_transform=162, factors=(3, 6, 3, 6, 3)),
NS(length=1000, threads_per_block=128, threads_per_transform=100, factors=(10, 10, 10)),
NS(length=1024, threads_per_block=128, threads_per_transform=128, factors=(8, 8, 4, 4)),
NS(length=1040, threads_per_block=256, threads_per_transform=208, factors=(13, 16, 5)),
NS(length=1080, threads_per_block=256, threads_per_transform=108, factors=(6, 10, 6, 3)),
NS(length=1125, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 3, 3, 5)),
NS(length=1152, threads_per_block=256, threads_per_transform=144, factors=(4, 3, 8, 3, 4)),
NS(length=1200, threads_per_block=256, threads_per_transform= 75, factors=(5, 5, 16, 3)),
NS(length=1215, threads_per_block=256, threads_per_transform=243, factors=(5, 3, 3, 3, 3, 3)),
NS(length=1250, threads_per_block=256, threads_per_transform=250, factors=(5, 10, 5, 5)),
NS(length=1280, threads_per_block=128, threads_per_transform= 80, factors=(16, 5, 16)),
NS(length=1296, threads_per_block=128, threads_per_transform=108, factors=(6, 6, 6, 6)),
NS(length=1350, threads_per_block=256, threads_per_transform=135, factors=(5, 10, 3, 3, 3)),
NS(length=1440, threads_per_block=128, threads_per_transform= 90, factors=(10, 16, 3, 3)),
NS(length=1458, threads_per_block=256, threads_per_transform=243, factors=(6, 3, 3, 3, 3, 3)),
NS(length=1500, threads_per_block=256, threads_per_transform=150, factors=(5, 10, 10, 3)),
NS(length=1536, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 6)),
NS(length=1600, threads_per_block=256, threads_per_transform=100, factors=(10, 16, 10)),
NS(length=1620, threads_per_block=256, threads_per_transform=162, factors=(10, 3, 3, 6, 3)),
NS(length=1728, threads_per_block=128, threads_per_transform=108, factors=(3, 6, 6, 16)),
NS(length=1800, threads_per_block=256, threads_per_transform=180, factors=(10, 6, 10, 3)),
NS(length=1875, threads_per_block=256, threads_per_transform=125, factors=(5, 5, 5, 5, 3)),
NS(length=1920, threads_per_block=256, threads_per_transform=120, factors=(10, 6, 16, 2)),
NS(length=1944, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 8, 3)),
NS(length=2000, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 16)),
NS(length=2025, threads_per_block=256, threads_per_transform=135, factors=(3, 3, 5, 5, 3, 3)),
NS(length=2048, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 8)),
NS(length=2160, threads_per_block=256, threads_per_transform= 60, factors=(10, 6, 6, 6)),
NS(length=2187, threads_per_block=256, threads_per_transform=243, factors=(3, 3, 3, 3, 3, 3, 3)),
NS(length=2250, threads_per_block=256, threads_per_transform= 90, factors=(10, 3, 5, 3, 5)),
NS(length=2304, threads_per_block=256, threads_per_transform=192, factors=(6, 6, 4, 4, 4), runtime_compile=True),
NS(length=2400, threads_per_block=256, threads_per_transform=240, factors=(4, 10, 10, 6)),
NS(length=2430, threads_per_block=256, threads_per_transform= 81, factors=(10, 3, 3, 3, 3, 3)),
NS(length=2500, threads_per_block=256, threads_per_transform=250, factors=(10, 5, 10, 5)),
NS(length=2560, threads_per_block=128, threads_per_transform=128, factors=(4, 4, 4, 10, 4)),
NS(length=2592, threads_per_block=256, threads_per_transform=216, factors=(6, 6, 6, 6, 2)),
NS(length=2700, threads_per_block=128, threads_per_transform= 90, factors=(3, 10, 10, 3, 3)),
NS(length=2880, threads_per_block=256, threads_per_transform= 96, factors=(10, 6, 6, 2, 2, 2)),
NS(length=2916, threads_per_block=256, threads_per_transform=243, factors=(6, 6, 3, 3, 3, 3)),
NS(length=3000, threads_per_block=128, threads_per_transform=100, factors=(10, 3, 10, 10)),
NS(length=3072, threads_per_block=256, threads_per_transform=256, factors=(6, 4, 4, 4, 4, 2)),
NS(length=3125, threads_per_block=128, threads_per_transform=125, factors=(5, 5, 5, 5, 5)),
NS(length=3200, threads_per_block=256, threads_per_transform=160, factors=(10, 10, 4, 4, 2)),
NS(length=3240, threads_per_block=128, threads_per_transform=108, factors=(3, 3, 10, 6, 6)),
NS(length=3375, threads_per_block=256, threads_per_transform=225, factors=(5, 5, 5, 3, 3, 3)),
NS(length=3456, threads_per_block=256, threads_per_transform=144, factors=(6, 6, 6, 4, 4)),
NS(length=3600, threads_per_block=256, threads_per_transform=120, factors=(10, 10, 6, 6)),
NS(length=3645, threads_per_block=256, threads_per_transform=243, factors=(5, 3, 3, 3, 3, 3, 3)),
NS(length=3750, threads_per_block=256, threads_per_transform=125, factors=(3, 5, 5, 10, 5)),
NS(length=3840, threads_per_block=256, threads_per_transform=128, factors=(10, 6, 2, 2, 2, 2, 2, 2)),
NS(length=3888, threads_per_block=512, threads_per_transform=324, factors=(16, 3, 3, 3, 3, 3)),
NS(length=4000, threads_per_block=256, threads_per_transform=200, factors=(10, 10, 10, 4)),
NS(length=4050, threads_per_block=256, threads_per_transform=135, factors=(10, 5, 3, 3, 3, 3)),
NS(length=4096, threads_per_block=256, threads_per_transform=256, factors=(16, 16, 16)),
]
kernels = [NS(**kernel.__dict__,
scheme='CS_KERNEL_STOCKHAM',
precision=['sp', 'dp']) for kernel in kernels1d]
return kernels
| 5,347,167 |
def init_weights_kaiming_uniform(m):
""" Weight initialization for linear layers """
if type(m) == nn.Linear:
nn.init.kaiming_uniform(m.weight)
m.bias.data.fill_(0.01)
| 5,347,168 |
def parse_boolean(arg: str):
"""Returns boolean representation of argument."""
arg = str(arg).lower()
if 'true'.startswith(arg):
return True
return False
| 5,347,169 |
def test_multiple_repos_bad_default() -> None:
"""Check error for prepare_repository_configurations with a missing default_repo."""
from valiant.config import Config
from valiant.repositories import RepositoryConfiguration
with pytest.raises(ValueError):
Config.prepare_repository_configurations(
repository_configurations=[
RepositoryConfiguration(
name="test", base_url="https://www.example.com"
),
RepositoryConfiguration(
name="test2", base_url="https://www.differentexample.com"
),
],
default_repository="otter",
)
| 5,347,170 |
def test_amb_file():
"""
The amb file records the appearance of non-ATGC characters (e.g. N)
"""
amb_file = expanduser("~/.genome/F1L3/") + "F1L3.fa.gz.amb"
with open(amb_file, "r") as f:
assert '5252759 94 1 575 10 N' == ' '.join(f.read().splitlines())
| 5,347,171 |
async def log_rtsp_urls(device: VivintCamera):
"""Logs the rtsp urls of a Vivint camera."""
_LOGGER.info(
"%s rtsp urls:\n direct hd: %s\n direct sd: %s\n internal hd: %s\n internal sd: %s\n external hd: %s\n external sd: %s",
device.name,
await device.get_direct_rtsp_url(hd=True),
await device.get_direct_rtsp_url(hd=False),
await device.get_rtsp_url(internal=True, hd=True),
await device.get_rtsp_url(internal=True, hd=False),
await device.get_rtsp_url(internal=False, hd=True),
await device.get_rtsp_url(internal=False, hd=False),
)
| 5,347,172 |
def ascending_coin(coin):
"""Returns the next ascending coin in order.
>>> ascending_coin(1)
5
>>> ascending_coin(5)
10
>>> ascending_coin(10)
25
>>> ascending_coin(2) # Other values return None
"""
if coin == 1:
return 5
elif coin == 5:
return 10
elif coin == 10:
return 25
| 5,347,173 |
def start_game() -> None:
"""Start the game by asking
the number of people and setting their attributes."""
number_of_people: int = ask_number_of_people()
ask_and_set_player_attributes(number_of_people)
| 5,347,174 |
def to_numpy(qg8_tensor):
"""
Convert qg8_tensor to dense numpy array
"""
dtype = dtype_to_name(qg8_tensor.dtype_id)
ndarray = np.zeros(qg8_tensor.dims, dtype=dtype)
if np.iscomplexobj(ndarray):
ndarray[tuple(qg8_tensor.indices)] = np.asfortranarray(qg8_tensor.re)\
+ 1j*np.asfortranarray(qg8_tensor.im)
else:
ndarray[tuple(qg8_tensor.indices)] = np.asfortranarray(qg8_tensor.re)
return ndarray
| 5,347,175 |
def _get_results(report):
"""Limit the number of documents to REPORT_MAX_DOCUMENTS so as not to crash the server."""
query = _build_query(report)
try:
session.execute(f"SET statement_timeout TO {int(REPORT_COUNT_TIMEOUT * 1000)}; commit;")
if query.count() == 0:
return None
except OperationalError:
pass
session.execute("SET statement_timeout TO 0; commit;")
results = query.order_by(Result.start_time.desc()).limit(REPORT_MAX_DOCUMENTS).all()
return [result.to_dict() for result in results]
| 5,347,176 |
def mark(metric_name):
"""
Mark event in metrics collector.
"""
if settings.COLLECT_METRICS:
logger.info('New mark event: {}'.format(metric_name), extra={
'metric_name': metric_name
})
counter = Metrology.meter(metric_name)
counter.mark()
| 5,347,177 |
def connect_input(source_node, source_attr_name, target_node, target_attr_name):
"""Basic connection from source to target ndoe
:param source_node:
:param source_attr_name:
:param target_node:
:param target_attr_name:
:return:
"""
inputs = source_node.attr(source_attr_name).inputs(p=1)
if inputs:
inputs[0] >> target_node.attr(target_attr_name)
else:
# or set it to the same value
target_node.attr(target_attr_name).set(
source_node.attr(source_attr_name).get()
)
| 5,347,178 |
def test_missing_node_name_raises(topology, validator):
"""
Verify topology schema is valid
"""
test_topology = topology["missing_node_name"]
with pytest.raises(jsonschema.exceptions.ValidationError) as e:
validator.validate(test_topology)
assert "'name' is a required property" in str(e.value)
assert "'required' in schema['properties']['nodes']['items']" in str(e.value)
| 5,347,179 |
def text_match_one_hot(df, column=None, text_phrases=None, new_col_name=None, return_df=False, case=False,
supress_warnings: bool=False):
"""Given a dataframe, text column to search and a list of text phrases, return a binary
column with 1s when text is present and 0 otherwise
"""
# Ignore regex group match warning
warnings.filterwarnings("ignore", 'This pattern has match groups')
# Check params
assert text_phrases, print(f"Must specify 'text_phrases' as a list of strings")
if (column not in df.columns.values.tolist()):
if not suppress_warnings:
warnings.warn(f'Column "{column}" not found in dataframe. No matches attempted')
return
# Create regex pattern to match any phrase in list
# The first phrase will be placed in its own groups
regex_pattern = '({})'.format(text_phrases[0])
# If there's more than one phrase
# Each phrase is placed in its own group () with an OR operand in front of it |
# and added to the original phrase
if len(text_phrases) > 1:
subsquent_phrases = "".join(['|({})'.format(phrase) for phrase in text_phrases[1:]])
regex_pattern += subsquent_phrases
# Cast to string to ensure .str methods work
df_copy = df.copy()
df_copy[column] = df_copy[column].astype(str)
matches = df_copy[column].str.contains(regex_pattern, na=False, case=case).astype(int)
## Alter name
if not new_col_name:
# If none provided use column name and values matched
new_col_name = column+'_match_for: '+str(text_phrases)[1:-1].replace(r"'", "")
matches.name = new_col_name
if return_df:
df_copy = df.copy()
df_copy[new_col_name] = matches
return df_copy
else:
return matches
| 5,347,180 |
def pack_feed_dict(name_prefixs, origin_datas, paddings, input_fields):
"""
Args:
name_prefixs: A prefix string of a list of strings.
origin_datas: Data list or a list of data lists.
paddings: A padding id or a list of padding ids.
input_fields: The input fieds dict.
Returns: A dict for while loop.
"""
data = dict()
data["feed_dict"] = dict()
def map_fn(n, d, p):
# n: name prefix
# d: data list
# p: padding symbol
data[concat_name(n, Constants.IDS_NAME)] = d
n_samples = len(d)
n_devices = len(input_fields)
n_samples_per_gpu = n_samples // n_devices
if n_samples % n_devices > 0:
n_samples_per_gpu += 1
def _feed_batchs(_start_idx, _inpf):
if _start_idx * n_samples_per_gpu >= n_samples:
return 0
x, x_len = padding_batch_data(
d[_start_idx * n_samples_per_gpu:(_start_idx + 1) * n_samples_per_gpu], p)
data["feed_dict"][_inpf[concat_name(n, Constants.IDS_NAME)]] = x
data["feed_dict"][_inpf[concat_name(n, Constants.LENGTH_NAME)]] = x_len
return len(x_len)
parallels = repeat_n_times(
n_devices, _feed_batchs,
range(n_devices), input_fields)
data["feed_dict"]["parallels"] = parallels
if isinstance(name_prefixs, six.string_types):
map_fn(name_prefixs, origin_datas, paddings)
else:
[map_fn(n, d, p) for n, d, p in zip(name_prefixs, origin_datas, paddings)]
return data
| 5,347,181 |
async def test_singleswitch_requests(zigpy_device_from_quirk, quirk):
"""Test tuya single switch."""
switch_dev = zigpy_device_from_quirk(quirk)
switch_cluster = switch_dev.endpoints[1].on_off
tuya_cluster = switch_dev.endpoints[1].tuya_manufacturer
with mock.patch.object(
tuya_cluster.endpoint, "request", return_value=foundation.Status.SUCCESS
) as m1:
status = await switch_cluster.command(0x0000)
m1.assert_called_with(
61184,
1,
b"\x01\x01\x00\x00\x00\x01\x01\x00\x01\x00",
expect_reply=True,
command_id=0,
)
assert status == 0
status = await switch_cluster.command(0x0001)
m1.assert_called_with(
61184,
2,
b"\x01\x02\x00\x00\x00\x01\x01\x00\x01\x01",
expect_reply=True,
command_id=0,
)
assert status == 0
status = await switch_cluster.command(0x0002)
assert status == foundation.Status.UNSUP_CLUSTER_COMMAND
| 5,347,182 |
def get_directory(**kwargs):
"""
Wrapper function for PyQt4.QtGui.QFileDialog.getExistingDirectory().
Returns the absolute directory of the chosen directory.
Parameters
----------
None
Returns
-------
filename : string of absolute directory.
"""
from PyQt4 import QtGui
filename = str(QtGui.QFileDialog.getExistingDirectory())
return filename
| 5,347,183 |
def single_particle_relative_pzbt_metafit(fitfn, exp_list, **kwargs):
"""Fit to single-particle energies plus zero body term, relative to the
first point
"""
return single_particle_metafit_int(
fitfn, exp_list,
dpath_sources=DPATH_FILES_INT, dpath_plots=DPATH_PLOTS,
transform=relative_y_zbt,
code='sprpz', mf_name='single_particle_relative_pzbt_metafit',
xlabel='A',
ylabel='Relative Single Particle Energy + Zero Body Term (MeV)',
**kwargs
)
| 5,347,184 |
def hetmat_from_permuted_graph(hetmat, permutation_id, permuted_graph):
"""
Assumes subdirectory structure and that permutations inherit nodes but not
edges.
"""
permuted_hetmat = initialize_permutation_directory(hetmat, permutation_id)
permuted_hetmat = hetmat_from_graph(
permuted_graph,
permuted_hetmat.directory,
save_metagraph=False,
save_nodes=False,
)
return permuted_hetmat
| 5,347,185 |
def can_see_all_content(requesting_user: types.User, course_key: CourseKey) -> bool:
"""
Global staff, course staff, and instructors can see everything.
There's no need to run processors to restrict results for these users.
"""
return (
GlobalStaff().has_user(requesting_user) or
CourseStaffRole(course_key).has_user(requesting_user) or
CourseInstructorRole(course_key).has_user(requesting_user)
)
| 5,347,186 |
def part2(steps, workers=2, extra_time=0):
""" Time is in seconds """
workers = [Worker() for _ in range(workers)]
steps_to_time = {
step: alphabet.index(step) + 1 + extra_time
for step in alphabet
}
time = 0
graph = build_graph(steps)
chain = find_orphans(graph)
while chain or Worker.busy(workers):
for worker in workers:
if time == worker.current_step_end_time:
worker.finish_step()
new_children = []
for i, node in enumerate(chain):
if node.ready:
for worker in workers:
if worker.idle:
current_node = chain.pop(i)
new_children += current_node.children
current_step = current_node
end_time = time + steps_to_time[current_node.step]
worker.begin_step(current_step, end_time)
break
chain = list(set(new_children).union(set(chain)))
chain = sorted(chain, key=lambda node: node.step)
time += 1
return time - 1
| 5,347,187 |
def test_class_weight_auto_classifies():
"""Test that class_weight="auto" improves f1-score"""
# This test is broken; its success depends on:
# * a rare fortuitous RNG seed for make_classification; and
# * the use of binary F1 over a seemingly arbitrary positive class for two
# datasets, and weighted average F1 for the third.
# Its expectations need to be clarified and reimplemented.
raise SkipTest('This test requires redefinition')
classifiers = all_estimators(type_filter='classifier')
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):
# create unbalanced dataset
X, y = make_classification(n_classes=n_classes, n_samples=200,
n_features=10, weights=weights,
random_state=0, n_informative=n_classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.startswith("RidgeClassifier"):
# RidgeClassifier behaves unexpected
# FIXME!
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
yield (check_class_weight_auto_classifiers, name, Classifier,
X_train, y_train, X_test, y_test, weights)
| 5,347,188 |
def to_local_df(df: Any, schema: Any = None, metadata: Any = None) -> LocalDataFrame:
"""Convert a data structure to :class:`~fugue.dataframe.dataframe.LocalDataFrame`
:param df: :class:`~fugue.dataframe.dataframe.DataFrame`, pandas DataFramme and
list or iterable of arrays
:param schema: |SchemaLikeObject|, defaults to None, it should not be set for
:class:`~fugue.dataframe.dataframe.DataFrame` type
:param metadata: dict-like object with string keys, defaults to None
:raises ValueError: if ``df`` is :class:`~fugue.dataframe.dataframe.DataFrame`
but you set ``schema`` or ``metadata``
:raises TypeError: if ``df`` is not compatible
:return: the dataframe itself if it's
:class:`~fugue.dataframe.dataframe.LocalDataFrame` else a converted one
:Examples:
>>> a = to_local_df([[0,'a'],[1,'b']],"a:int,b:str")
>>> assert to_local_df(a) is a
>>> to_local_df(SparkDataFrame([[0,'a'],[1,'b']],"a:int,b:str"))
"""
assert_arg_not_none(df, "df")
if isinstance(df, DataFrame):
aot(
schema is None and metadata is None,
ValueError("schema and metadata must be None when df is a DataFrame"),
)
return df.as_local()
if isinstance(df, pd.DataFrame):
return PandasDataFrame(df, schema, metadata)
if isinstance(df, List):
return ArrayDataFrame(df, schema, metadata)
if isinstance(df, Iterable):
return IterableDataFrame(df, schema, metadata)
raise TypeError(f"{df} cannot convert to a LocalDataFrame")
| 5,347,189 |
def create_local_meta(name):
"""
Create the metadata dictionary for this level of execution.
Parameters
----------
name : str
String to describe the current level of execution.
Returns
-------
dict
Dictionary containing the metadata.
"""
local_meta = {
'name': name,
'timestamp': None,
'success': 1,
'msg': '',
}
return local_meta
| 5,347,190 |
def matches_uri_ref_syntax(s):
"""
This function returns true if the given string could be a URI reference,
as defined in RFC 3986, just based on the string's syntax.
A URI reference can be a URI or certain portions of one, including the
empty string, and it can have a fragment component.
"""
if not _validation_setup_completed:
_init_uri_validation_regex()
return URI_REF_PATTERN.match(s) is not None
| 5,347,191 |
def get_end_hour(dt=None):
"""根据日期、或时间取得该小时59:59的时间;参数可以是date或datetime类型"""
end = None
if not dt:
dt = datetime.date.today()
if isinstance(dt, datetime.date):
dt_str = dt.strftime("%Y-%m-%d %H") + ":59:59"
end = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S")
return end
| 5,347,192 |
def all_ped_combos_strs(num_locs=4, val_set=("0", "1")):
"""Return a list of all pedestrian observation combinations (in string format) for a vehicle under the 4 location scheme"""
res = []
lsts = all_ped_combos_lsts(num_locs, val_set)
for lst in lsts:
res.append(" ".join(lst))
return res
| 5,347,193 |
def create_image(
image_request: ImageRequest,
background_tasks: BackgroundTasks,
db: Session = Depends(get_db),
):
"""
(2) add database record
(3) give background_tasks a reference of image record
"""
image = Images()
image.url = image_request.image
# image.output = "4" # <<< image_request.output
db.add(image)
db.commit()
background_tasks.add_task(predict_image, image.id)
return {
"code": "success",
"message": "image added",
}
| 5,347,194 |
def test_list_policies_freebsd():
"""
Test if it return a dictionary of policies nested by vhost
and name based on the data returned from rabbitmqctl list_policies.
"""
mock_run = MagicMock(
return_value={"retcode": 0, "stdout": "saltstack", "stderr": ""}
)
mock_pkg = MagicMock(return_value="3.7")
with patch.dict(
rabbitmq.__salt__, {"cmd.run_all": mock_run, "pkg.version": mock_pkg}
), patch.dict(rabbitmq.__grains__, {"os_family": "FreeBSD"}):
assert rabbitmq.list_policies() == {}
| 5,347,195 |
def get_key(rule_tracker, value):
"""
Given an event index, its corresponding key from the dictionary is returned.
Parameters:
rule_tracker (dict): Key-value pairs specific to a rule where key is an activity, pair is an event index
value (int): Index of event in event log
Returns:
key (int): Position of value in rule_tracker
"""
for key in rule_tracker:
if rule_tracker[key] == value:
return key
| 5,347,196 |
def get_cxml(filename):
""" Create and return CXML object from File or LocalCache """
cxml = Cxml(filename)
return cxml
| 5,347,197 |
def save_layer(index, settings) -> Action:
"""Action to save layer settings"""
return {"kind": SAVE_LAYER, "payload": {"index": index, "settings": settings}}
| 5,347,198 |
def csvProcessing(args):
"""iterate through mudviz/plain dirs, collecting the associated usernames and cvs file paths"""
groupPath1 = args['dir']+'/mudviz'
groupPath2 = args['dir']+'/plain'
#avoid potential user-input trouble with extra forward slashes
if args['dir'][-1] == '/':
groupPath1 = args['dir'][:-1]+'/mudviz'
groupPath2 = args['dir'][:-1]+'/plain'
mudviz_paths = pathCollection(groupPath1)
plain_paths = pathCollection(groupPath2)
#printPathDict(plain_paths)
#somewhat arbitrary dict is used to simplify passing the groupname (mudviz/plain) to function
dirDict = {"mudviz":[mudviz_paths,groupPath1], "plain":[plain_paths,groupPath2]}
#printPathDict(mudviz_paths)
for directory in dirDict:
path_dict = dirDict[directory][0]
groupPath = dirDict[directory][1]
for user in path_dict:
#merge all user csv files into single user.csv file
path_dict[user] = csvMerge(directory, user, path_dict[user], groupPath) #NOTE: dict value is now a string, not a list
#add the headers/values columns
csvPrepend(directory, user, path_dict[user])
for directory in dirDict:
path_dict = dirDict[directory][0]
groupPath = dirDict[directory][1]
#merge user.csv's into a single group.csv for each group
print(csvMergeGroup(directory, path_dict, groupPath))
| 5,347,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.