content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def collect_targets_from_attrs(rule_attrs, attrs):
"""Returns a list of targets from the given attributes."""
result = []
for attr_name in attrs:
_collect_target_from_attr(rule_attrs, attr_name, result)
return [target for target in result if is_valid_aspect_target(target)]
| 5,348,900 |
def quit(*modules):
"""Quit the given module."""
if len(modules) == 0:
modules = (sdl2.SDL_INIT_EVERYTHING,)
for module in modules:
try:
sdl2.SDL_QUIT(module)
except:
pass
| 5,348,901 |
def extract_codes(text: str) -> Tuple[str, ...]:
"""Extract names of warnings from full warning text."""
match = CODES_PAT.search(text)
if not match:
raise ValueError("No warning code found")
return tuple(match.group(1).split(","))
| 5,348,902 |
def task_list(request, pk):
"""
View to get task list based on user list for forms
"""
user_model = User.objects.filter(is_staff=False)
task_model = Task.objects.filter(user=pk)
user_detail = User.objects.get(pk=pk)
query = request.GET.get('q')
if query:
task_model = task_model.filter(
Q(title__icontains=query)
)
return render(request, 'home.html',
{"user_model": user_model, 'task_model': task_model, 'user_detail': user_detail})
| 5,348,903 |
def load_credentials():
"""
load_credentials
:return: dict
"""
with open("credentials.json", "r", encoding="UTF-8") as stream:
content = json.loads(stream.read())
return content
| 5,348,904 |
def get_utm_string_from_sr(spatialreference):
"""
return utm zone string from spatial reference instance
"""
zone_number = spatialreference.GetUTMZone()
if zone_number > 0:
return str(zone_number) + 'N'
elif zone_number < 0:
return str(abs(zone_number)) + 'S'
else:
return str(zone_number)
| 5,348,905 |
def messages_count(name):
"""
Get message count for queue
curl -X GET -H 'Accept: application/json' http://localhost:8080/queues/C13470112/msgs/count
curl -X GET -H 'Accept: application/json' 83.212.127.232:8080/queues/C13470112/msgs/count
"""
conn = get_conn()
queue = conn.get_queue(name)
count = queue.count()
resp = "Queue "+name+" has "+str(count)+" messages\n"
return Response(response=resp, mimetype="application/json")
| 5,348,906 |
def return_intersect(cameraList):
"""
Calculates the intersection of the Camera objects in the *cameraList*.
Function returns an empty Camera if there exists no intersection.
Parameters:
cameraList : *list* of *camera.Camera* objects
A list of cameras from the camera.Camera class, each containing
a *poly* and a *coordsList*.
Returns:
intersectCam : *camera.Camera* object
An object from the camera.Camera class that is the
intersection between all cameras in the cameraList. If there
exists no intersection between any cameras in the camerList,
an empty Camera will be returned.
"""
intersectCam = None
for camera in cameraList:
if intersectCam is None: # Initiates the intersectCam variable
intersectCam = camera
else:
intersectCam = intersectCam.intersect(camera)
return intersectCam
| 5,348,907 |
def get_returned_attr_set_node(tree):
"""
Get the NODE_ATTR_SET containing the attributes which are returned by the module
"""
# TODO: fix HACK, currently we assume the node containing `imports` is the returned attr set
# but this may not always be the case?
imports_node = get_imports_node(tree)
imports_key_node, _ = [e for e in imports_node.elems if isinstance(e, syntax_tree.Node)]
imports_key_value_node = tree.get_parent(imports_key_node)
returned_attr_set_node = tree.get_parent(imports_key_value_node)
return returned_attr_set_node
| 5,348,908 |
def choose_time_format_method(expression,format):
"""
:Summary: strftime("%s") is not a valid string formatting method in python,
therefore it works on linux servers but not windows. To handle this, this function
checks for python version and decides what conversion method to use.
the "format" parameter makes sure that that the correct required type is always returned
"""
# if we are running python3.3 or greater
if(version >= version_3_3):
# if the datetime object is offset aware
if(expression.tzinfo != None):
if(format == "str"):
return str(int(expression.timestamp()))
else:
return int(expression.timestamp())
# else if the datetime object is offset naive
else:
if(format == "str"):
return str(int((expression - datetime(1970, 1, 1)).total_seconds()))
else:
return int((expression - datetime(1970, 1, 1)).total_seconds())
# else if we are running python version lower than python3.3 i.e most linux servers
else:
if(format == "str"):
return expression.strftime("%s")
else:
return int(expression.strftime("%s"))
| 5,348,909 |
def import_measurements(task, subject, gsrn, session):
"""
Imports measurements for a single MeteringPoint, and starts a
start_submit_measurement_pipeline() pipeline for each of the newly
imported measurements.
:param celery.Task task:
:param str subject:
:param str gsrn:
:param sqlalchemy.orm.Session session:
"""
__log_extra = {
'gsrn': gsrn,
'subject': subject,
'pipeline': 'import_measurements',
'task': 'import_measurements',
}
@atomic
def __import_measurements(session):
"""
Import and save to DB as an atomic operation
"""
return importer.import_measurements_for(meteringpoint, session)
# Load MeteringPoint from DB
try:
meteringpoint = MeteringPointQuery(session) \
.is_active() \
.has_gsrn(gsrn) \
.one()
except orm.exc.NoResultFound:
raise
except Exception as e:
raise task.retry(exc=e)
# Import measurements into DB
try:
measurements = __import_measurements()
except Exception as e:
logger.exception('Failed to import measurements from ElOverblik, retrying...', extra=__log_extra)
raise task.retry(exc=e)
# Submit each measurement to ledger in parallel
for measurement in measurements:
task = build_submit_measurement_pipeline(
measurement, meteringpoint, session)
task.apply_async()
# if measurements:
# tasks = [
# build_submit_measurement_pipeline(measurement, meteringpoint, session)
# for measurement in measurements
# ]
#
# group(*tasks).apply_async()
| 5,348,910 |
def main():
"""run synergy commands
"""
# args
MODEL_DIR = sys.argv[1]
MODEL = sys.argv[2]
INFER_DIR = sys.argv[3]
OUT_DIR = sys.argv[4]
motifs_file = "{}/motifs.sig/motifs.adjust.diff.rna_filt.dmim/summary/ggr.pwms_patterns_summary.txt".format(
INFER_DIR)
bash_script = "/datasets/software/git/tronn/scripts/ggr/kube/sims.predict.spacing.ggr.bash"
# prediction sample
prediction_sample_dir = "/datasets/inference.2019-02-05/motifs.input_x_grad.background"
# first get the desired motif list
motifs = pd.read_csv(motifs_file, sep="\t", header=0, index_col=0)
motifs = list(motifs.index)
# go through motifs
for motif in motifs:
# set up out dir (but if it exists don't run)
out_dir = "{}/{}".format(OUT_DIR, motif)
if os.path.isdir(out_dir):
continue
os.system("mkdir -p {}".format(out_dir))
# set up input file (grammar file)
pwm_idx = int(motif.split("_")[0].split("-")[1])
grammar_file = "{}/input.gml".format(out_dir)
graph = nx.Graph()
graph.add_node("{}-A".format(motif), pwmidx=pwm_idx)
graph.add_node("{}-B".format(motif), pwmidx=pwm_idx)
nx.write_gml(graph, grammar_file)
# set up run cmd
run_cmd = "{0} {1} {2} {3} {4} {5}".format(
bash_script,
MODEL_DIR,
MODEL,
prediction_sample_dir,
grammar_file,
out_dir)
print run_cmd
os.system(run_cmd)
return
| 5,348,911 |
def add_agg_series_to_df(
df: pandas.DataFrame, grouped_levels: List[str], bottom_levels: List[str]
) -> pandas.DataFrame:
"""
Add aggregate series columns to wide dataframe.
Parameters
----------
df : pandas.DataFrame
Wide dataframe containing bottom level series.
grouped_levels : List[str]
Grouped level, underscore delimited, column names.
bottom_levels : List[str]
Bottom level, underscore delimited, column names.
Returns
-------
pandas.DataFrame
Wide dataframe with all series in hierarchy.
"""
component_cols = _get_bl(grouped_levels, bottom_levels)
# Add series as specified grouping levels
for i, cols in enumerate(component_cols):
df[grouped_levels[i]] = df[cols].sum(axis=1)
return df
| 5,348,912 |
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
convOut, conv_cache = layers.conv_forward(x, w, b, conv_param)
reluOut, relu_cache = layers.relu_forward(convOut)
out, pool_cache = layers.max_pool_forward(reluOut, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache
| 5,348,913 |
def avg_pixelwise_var(images_seen: np.int16):
"""
Computes the variance for every pixel p across all images, resulting in a matrix holding
the variance for eack pixel p, then calculates the average of that variance across all
pixels. This allows us to compensate for different fov sizes.
Note: images are normalized to [-1,1] before calculations
Params
------
images_seen
A numpy matrix holding numpy versions of all of our images
Returns
-------
The aaverage pixelwise variation across all images, as a float
"""
# Computes the variance
images = (images_seen.astype(np.float32) - 127.5) / 127.5 # Normalize to [-1,1]
variance_matrix = np.var(images, 0)
# Returns the average of that variance
return(np.sum(variance_matrix)/variance_matrix.size)
| 5,348,914 |
def test_wrong_patterns_in_values(
code,
first,
second,
assert_errors,
parse_ast_tree,
default_options,
):
"""Testing safe in patterns."""
tree = parse_ast_tree(code.format(first, second))
visitor = ImplicitBoolPatternsVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ImplicitInConditionViolation])
| 5,348,915 |
def main(ip_address: str, count: int = 3):
"""
Ping IP_ADDRESS
"""
status = ping_ip(ip_address, count)
if status:
typer.secho(f"ICMP reply received from address {ip_address}", fg="green")
else:
typer.secho(f"No ICMP reply received from address {ip_address}", fg="red")
| 5,348,916 |
def get_r2_matrix(ts):
"""
Returns the matrix for the specified tree sequence. This is computed
via a straightforward Python algorithm.
"""
n = ts.get_sample_size()
m = ts.get_num_mutations()
A = np.zeros((m, m), dtype=float)
for t1 in ts.trees():
for sA in t1.sites():
assert len(sA.mutations) == 1
mA = sA.mutations[0]
A[sA.id, sA.id] = 1
fA = t1.get_num_samples(mA.node) / n
samples = list(t1.samples(mA.node))
for t2 in ts.trees(tracked_samples=samples):
for sB in t2.sites():
assert len(sB.mutations) == 1
mB = sB.mutations[0]
if sB.position > sA.position:
fB = t2.get_num_samples(mB.node) / n
fAB = t2.get_num_tracked_samples(mB.node) / n
D = fAB - fA * fB
r2 = D * D / (fA * fB * (1 - fA) * (1 - fB))
A[sA.id, sB.id] = r2
A[sB.id, sA.id] = r2
return A
| 5,348,917 |
def load_or_run(filepath, fun, *args, **kwargs):
"""
계산된 결과 파일이 있으면 로딩하고, 없으면 계산후 저장
ex)
res = load_or_run('file_loadorsave', funlongtime, ...., force=False)
:param filepath:
:param fun:
:param force:
:return:
"""
force = kwargs.pop('force', False)
compress = kwargs.pop('compress', True)
if not filepath.startswith('/') or not filepath.startswith('~'):
filepath = os.path.join('/tmp/snipy/load_or_run/', filepath)
if not force and os.path.exists(filepath):
# 저장되어 있는 것 로딩
mmap_mode = 'r+' if not compress else None
return loadfile(filepath, mmap_mode=mmap_mode)
res = fun(*args, **kwargs)
savefile(res, filepath, compress=compress)
return res
| 5,348,918 |
def main():
""" Find the common kmers between the two input files """
args = get_args()
file1 = args.file1.read().split()
file2 = args.file2.read().split()
words1 = count_kmers(file1, args.kmer)
words2 = count_kmers(file2, args.kmer)
common = set(words1).intersection(set(words2))
for word in common:
if len(common) != 0:
print(f'{word:10} {words1.get(word):5} {words2.get(word):5}')
| 5,348,919 |
def init_build_env():
""" Cleans and initializes the build environment. """
BUILD_DIR.rmtree()
if not BUILD_DIR.exists():
BUILD_DIR.makedirs()
if not BUILD_WORK_DIR.exists():
BUILD_WORK_DIR.makedirs()
# FIXME - maybe a full tree not needed?
for subdir in ('RPMS', 'SPECS', 'SOURCES', 'BUILD', 'SRPMS'):
(RPM_TOPDIR / path(subdir)).makedirs()
DIST_DIR.rmtree()
if not DIST_DIR.exists():
DIST_DIR.makedirs()
| 5,348,920 |
def seq2seq_output_ids_to_file(output_ids, trg_vocab, out_file):
"""
Devectorize and Detokenize the translated token ids and write the
translations to a text file
"""
output_tokens = devectorize(output_ids.tolist(),
trg_vocab.id2tok,
trg_vocab.EOS_id,
strip_eos=True,
pp=True)
with open(out_file, "w") as fo:
for sent in output_tokens:
text = trg_vocab.detokenize(sent)
fo.write(text + "\n")
| 5,348,921 |
def omdb_title(
api_key: str,
id_imdb: str = None,
media: str = None,
title: str = None,
season: int = None,
episode: int = None,
year: int = None,
plot: str = None,
cache: bool = True,
) -> dict:
"""
Looks up media by id using the Open Movie Database.
Online docs: http://www.omdbapi.com/#parameters
"""
if (not title and not id_imdb) or (title and id_imdb):
raise MnamerException("either id_imdb or title must be specified")
elif plot and plot not in OMDB_PLOT_TYPES:
raise MnamerException(
"plot must be one of %s" % ",".join(OMDB_PLOT_TYPES)
)
url = "http://www.omdbapi.com"
parameters = {
"apikey": api_key,
"i": id_imdb,
"t": title,
"y": year,
"season": season,
"episode": episode,
"type": media,
"plot": plot,
}
parameters = clean_dict(parameters)
status, content = request_json(url, parameters, cache=cache)
error = content.get("Error") if isinstance(content, dict) else None
if status == 401:
raise MnamerException("invalid API key")
elif status != 200 or not isinstance(content, dict):
raise MnamerNetworkException("OMDb down or unavailable?")
elif error:
raise MnamerNotFoundException(error)
return content
| 5,348,922 |
def get_status(addr):
"""Get the current status of a minecraft server.
addr -- server address
Returns an mcstatus object.
"""
server = MinecraftServer.lookup(addr)
try:
return server.status()
except Exception:
return None
| 5,348,923 |
def calculate_losses(estimator, input_fn, labels):
"""Get predictions and losses for samples.
The assumptions are 1) the loss is cross-entropy loss, and 2) user have
specified prediction mode to return predictions, e.g.,
when mode == tf.estimator.ModeKeys.PREDICT, the model function returns
tf.estimator.EstimatorSpec(mode=mode, predictions=tf.nn.softmax(logits)).
Args:
estimator: model to make prediction
input_fn: input function to be used in estimator.predict
labels: array of size (n_samples, ), true labels of samples (integer valued)
Returns:
preds: probability vector of each sample
loss: cross entropy loss of each sample
"""
pred = np.array(list(estimator.predict(input_fn=input_fn)))
loss = log_loss(labels, pred)
return pred, loss
| 5,348,924 |
def text_cleaning(value, stopwords=None):
"""Applies the four cleaning funtions to a value.
Turns value into string, makes lowercase, strips trailing and leading spaces, and removes digits, punctuation, and stopwords
Args:
value (str): string to be cleaned
Returns:
str_out (str): string after cleaning
"""
value = str_lower_strip(value)
value = remove_digits(value)
value = remove_punctuation(value)
value = remove_stopwords(value, stopwords)
str_out = value
return str_out
| 5,348,925 |
def calc_pts_lag(npts=20):
"""
Returns Gauss-Laguerre quadrature points rescaled for line scan integration
Parameters
----------
npts : {15, 20, 25}, optional
The number of points to
Notes
-----
The scale is set internally as the best rescaling for a line scan
integral; it was checked numerically for the allowed npts.
Acceptable pts/scls/approximate line integral scan error:
(pts, scl ) : ERR
------------------------------------
(15, 0.072144) : 0.002193
(20, 0.051532) : 0.001498
(25, 0.043266) : 0.001209
The previous HG(20) error was ~0.13ish
"""
scl = { 15:0.072144,
20:0.051532,
25:0.043266}[npts]
pts0, wts0 = np.polynomial.laguerre.laggauss(npts)
pts = np.sinh(pts0*scl)
wts = scl*wts0*np.cosh(pts0*scl)*np.exp(pts0)
return pts, wts
| 5,348,926 |
def APPEND(*ext, **kw):
"""Decorator to call XDWAPI with trailing arguments *ext.
N.B. Decorated function must be of the same name as XDWAPI's one.
"""
def deco(api):
@wraps(api)
def func(*args, **kw):
args = list(args)
if "codepage" in kw:
args.append(kw["codepage"])
args.extend(ext)
return TRY(getattr(DLL, api.__name__), *args)
return func
return deco
| 5,348,927 |
def stations_within_radius(stations, centre, r):
"""Returns a list of all stations (type MonitoringStation) within radius r of a geographic coordinate x."""
stations_inside_radius = []
for station, distance in stations_by_distance(stations, centre):
# Check if distance is inside the requried radius
if distance < r:
stations_inside_radius.append(station)
# Return the list
return stations_inside_radius
| 5,348,928 |
def manage_topseller(request, template_name="manage/marketing/topseller.html"):
"""
"""
inline = manage_topseller_inline(request, as_string=True)
# amount options
amount_options = []
for value in (10, 25, 50, 100):
amount_options.append({
"value": value,
"selected": value == request.session.get("topseller-amount")
})
return render_to_string(template_name, request=request, context={
"topseller_inline": inline,
"amount_options": amount_options,
})
| 5,348,929 |
def create_stabil_mat(problem):
"""Using the stabilization material stub make it the true material."""
from sfepy.base.base import dict_to_struct, debug
from sfepy.fem.functions import Function
# Identity map...
ns = {'p' : 'p', 'q' : 'q',
'u' : 'u', 'b' : 'b', 'v' : 'v',
'fluid' : 'fluid', 'omega' : 'omega', 'i1' : 'i1', 'i2' : 'i2'}
variables = problem.get_variables()
# Indices to the state vector.
ii = {}
ii['u'] = variables.get_indx(ns['u'])
ii['us'] = variables.get_indx(ns['u'], stripped=True)
ii['ps'] = variables.get_indx(ns['p'], stripped=True)
stabil_mat = problem.materials['stabil']
stabil = dict_to_struct(stabil_mat.datas['special'], flag=(1,))
# The viscosity.
fluid_mat = problem.materials[ns['fluid']]
viscosity = fluid_mat.function()['viscosity']
# The Friedrich's constant.
c_friedrichs = problem.domain.get_diameter()
sigma = 1e-12 # 1 / dt.
# Element diameter modes.
diameter_modes = {'edge' : 0, 'volume' : 1, 'max' : 2}
def mat_fun(ts, coor, mode=None, region=None, ig=None,
b_norm=1.0):
if mode != 'qp': return
print '|b|_max (mat_fun):', b_norm
gamma = viscosity + b_norm * c_friedrichs
data = {}
if stabil.gamma is None:
data['gamma'] = stabil.gamma_mul * gamma
else:
data['gamma'] = nm.asarray( stabil.gamma_mul * stabil.gamma,
dtype = nm.float64 )
data['gamma'] = nm.tile(data['gamma'], (coor.shape[0], 1, 1))
if stabil.delta is None:
term = problem.equations['balance'].terms['dw_lin_convect']
for ig in term.iter_groups():
# This sets term.ig - for 1 group only!!!
break
var = variables[ns['u']]
ap, vg = term.get_approximation(var)
delta = 1.0
mode = diameter_modes[stabil.diameter_mode]
cells = region.get_cells( ig )
diameters2 = problem.domain.get_element_diameters( ig, cells, vg,
mode )
val1 = min( 1.0, 1.0 / sigma )
val2 = sigma * c_friedrichs**2
val3 = (b_norm**2) * min( (c_friedrichs**2) / viscosity, 1.0 / sigma )
# print val1, gamma, val2, val3
delta = stabil.delta_mul * val1 * diameters2 / (gamma + val2 + val3)
n_qp = coor.shape[0] / diameters2.shape[0]
data['diameters2'] = nm.repeat(diameters2, n_qp)
data['diameters2'].shape = data['diameters2'].shape + (1, 1)
data['delta'] = nm.repeat(delta, n_qp)
data['delta'].shape = data['delta'].shape + (1, 1)
else:
val = stabil.delta_mul * stabil.delta
data['delta'] = nm.tile(data['delta'], (coor.shape[0], 1, 1))
if stabil.tau is None:
data['tau'] = stabil.tau_red * data['delta']
else:
data['tau'] = nm.asarray( stabil.tau_mul * stabil.tau,
dtype = nm.float64 )
data['tau'] = nm.tile(data['tau'], (coor.shape[0], 1, 1))
return data
stabil_mat.set_function(Function('stabil', mat_fun))
return stabil_mat, ns, ii
| 5,348,930 |
def embedded_services(request: FixtureRequest) -> Optional[str]:
"""
Enable parametrization for the same cli option
"""
return getattr(request, 'param', None) or request.config.getoption('embedded_services', None)
| 5,348,931 |
def vigenere(plaintext,cypher):
"""Implementation of vigenere cypher"""
i = 0
cyphertext = ""
for character in plaintext:
n = ord(cypher[i%len(cypher)].lower())-97
new_char = rot_char(character, n)
cyphertext += new_char
if new_char != ' ':
i += 1
return cyphertext
| 5,348,932 |
def get_local_unit_slip_vector_SS(strike, dip, rake):
"""
Compute the STRIKE SLIP components of a unit slip vector.
Args:
strike (float): Clockwise angle (deg) from north of the line at the
intersection of the rupture plane and the horizontal plane.
dip (float): Angle (degrees) between rupture plane and the horizontal
plane normal to the strike (0-90 using right hand rule).
rake (float): Direction of motion of the hanging wall relative to the
foot wall, as measured by the angle (deg) from the strike vector.
Returns:
Vector: Unit slip vector in 'local' N-S, E-W, U-D coordinates.
"""
strike = np.radians(strike)
dip = np.radians(dip)
rake = np.radians(rake)
sx = np.cos(rake) * np.sin(strike)
sy = np.cos(rake) * np.cos(strike)
sz = 0.0
return Vector(sx, sy, sz)
| 5,348,933 |
def caWaitStable(pvs, values, vallo, valhi, **kwargs):
"""read pvs and wait until the std less than epsilon
Parameters
-----------
pvs : list or tuple. A list of PVs
values : list or tuple. Same size as elemfld
vallo : list or tuple with low boundary values.
valhi : list or tuple with high boundary values.
sample : int, optional, default 3, averaged over to compare
timeout : int, optional, default 5, in seconds.
dt : float, default 0.1 second. waiting between each check.
verbose : int.
Examples
---------
>>> cors = getElements("COR")
>>> pvs = [cors[0].pv(field='x', handle="readback")[0], ]
>>> cors[0].x = 0
>>> waitReadback(pvs, [0.0, ], [-0.001,], [0.001,])
"""
nsample = kwargs.pop("sample", 3)
dt = kwargs.pop("dt", 0.2)
verbose = kwargs.get("verbose", 0)
n, t0 = len(pvs), datetime.now()
buf = np.zeros((nsample, n), 'd')
iloop = 0
while True:
for i in range(nsample):
# delay a bit
time.sleep(dt/(nsample+1.0))
buf[i,:] = caget(pvs, **kwargs)
avg = np.average(buf, axis=0)
#if verbose > 0:
# print "V:", avg
# print vallo
# print valhi
if all([vallo[i] <= avg[i] <= valhi[i] for i in range(n)]):
break
t1 = datetime.now()
if (t1 - t0).total_seconds() > kwargs.get("timeout", CA_TIMEOUT):
vdiff = [avg[i] - values[i] for i in range(n)]
raise RuntimeError("Timeout, tried {0} times, pv={1} "
"avg_vals= {2} lo= {3} hi={4}\n"
"above: {5}\nbelow: {6}".format(
iloop, pvs, avg, vallo, valhi, avg-vallo, valhi-avg))
iloop = iloop + 1
time.sleep(dt)
| 5,348,934 |
def get_country_code(country_name):
"""Gets the code of the country given its name"""
for code, name in COUNTRIES.items():
if name == country_name:
return code
| 5,348,935 |
def build_model(cfg, train_cfg=None, test_cfg=None):
"""Build model."""
if train_cfg is None and test_cfg is None:
return build(cfg, MODELS)
else:
return build(cfg, MODELS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
| 5,348,936 |
def send_plot_convergence(results, experiment=None, channel_name='convergence'):
"""Logs skopt plot_convergence figure to neptune.
Image channel `convergence` is created and the output of the
plot_convergence function is first covented to `neptune.Image` and
then sent to neptune.
Args:
results('scipy.optimize.OptimizeResult'): Results object that is typically an
output of the function like `skopt.forest_minimize(...)`
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
Examples:
Run skopt training::
...
results = skopt.forest_minimize(objective, space,
base_estimator='ET', n_calls=100, n_random_starts=10)
Send skopt plot_convergence figure to neptune::
import neptune
import neptunecontrib.monitoring.skopt as sk_utils
neptune.init(project_qualified_name='USER_NAME/PROJECT_NAME')
sk_monitor.send_plot_convergence(results)
"""
_exp = experiment if experiment else neptune
fig, ax = plt.subplots(figsize=(16, 12))
sk_plots.plot_convergence(results, ax=ax)
with tempfile.NamedTemporaryFile(suffix='.png') as f:
fig.savefig(f.name)
_exp.send_image(channel_name, f.name)
| 5,348,937 |
def find_instruction_type(opcode: str) -> InstructionType:
"""Finds instruction type for object instruction
Parameters
----------
opcode : str
opcode of instruction in hex
Returns
-------
InstructionType
type of instruction using InstructionType enum
"""
# R type instructions always have opcode = 00
if opcode == "00":
i_type = InstructionType.R
# I type instructions have opcode > 03
elif opcode > "03":
i_type = InstructionType.I
return i_type
| 5,348,938 |
def gen_tier_id(inst, id_base, tier_type=None, alignment=None, no_hyphenate=False):
"""
Unified method to generate a tier ID string. (See: https://github.com/goodmami/xigt/wiki/Conventions)
"""
# In order to number this item correctly, we need to decide how many tiers of the same type
# there are. This is done by systematically adding filters to the list.
filters = []
# First, do we align with another item? (Either segmentation, alignment, or head/dep)
if alignment is not None:
filters.append(lambda x: aln_match(alignment)(x) or seg_match(alignment)(x) or ref_match(x, alignment, DS_HEAD_ATTRIBUTE))
# Next, does the type match ours?
if tier_type is not None:
filters.append(type_match(tier_type))
# Get the number of tiers that match this.
if not filters:
prev_tiers = []
num_tiers = 0
else:
prev_tiers = inst.findall(others=filters)
num_tiers = len(prev_tiers)
id_str = id_base
# Now, if we have specified the alignment, we also want to prepend
# that to the generated id string.
if alignment is not None:
if no_hyphenate:
return '{}{}'.format(alignment, id_str)
else:
id_str = '{}-{}'.format(alignment, id_str)
# Finally, if we have multiple tiers of the same type that annotate the
# same item, we should append a letter for the different analyses.
if num_tiers > 0 and inst.find(id=id_str) is not None:
while True:
letters = string.ascii_lowercase
assert num_tiers < 26, "More than 26 alternative analyses not currently supported"
potential_id = id_str + '_{}'.format(letters[num_tiers])
if inst.find(id=potential_id) is None:
id_str = potential_id
break
else:
num_tiers += 1
return id_str
| 5,348,939 |
def create_temporary_file(filename, contents=""):
""" Decorator for constructing a file which is available
during a single test and is deleted afterwards.
Example usage::
@grader.test
@create_temporary_file('hello.txt', 'Hello world!')
def hook_test(m):
with open('hello.txt') as file:
txt = file.read()
"""
def _inner(test_function):
before_test(create_file(filename, contents))(test_function)
after_test(delete_file(filename))(test_function)
return test_function
return _inner
| 5,348,940 |
def find_post_translational_modifications(filter=None, page=0, pageSize=100): # noqa: E501
"""Find values for an specific property, for example possible taxonomy values for Organism property
# noqa: E501
:param filter: Keyword to filter the list of possible values
:type filter: str
:param page: Number of the page with the possible values for the property
:type page: int
:param pageSize: Number of values with the possible values for the property
:type pageSize: int
:rtype: List[PostTranslationalModification]
"""
unimod_database = UnimodDatabase()
l = unimod_database.search_mods_by_keyword(keyword=filter)
list_found = l[(page * pageSize):(page * pageSize) + pageSize]
return list_found
| 5,348,941 |
def random_vector(A, b):
"""
Generates a random vector satisfying Ax <= b through rejection
sampling.
"""
dimension = A.shape[1]
not_feasible = True
while not_feasible == True:
config.reject_counter = config.reject_counter + 1
if config.reject_counter == config.milestone:
config.milestone = config.milestone * 10
print(config.reject_counter, 'random vectors have been generated so far')
rand_vec = np.random.uniform(-0.5, 0.5, dimension)
if np.all(np.dot(A, rand_vec) <= b) == True:
not_feasible = False
return rand_vec
| 5,348,942 |
def mapi(mapper: Callable[[TSource, int], TResult]) -> Projection[TSource, TResult]:
"""Returns an observable sequence whose elements are the result of
invoking the mapper function and incorporating the element's index
on each element of the source."""
from .transform import mapi
return mapi(mapper)
| 5,348,943 |
def game_to_screen(position):
"""
Converts coordinates from game view into screen coordinates for mouse interaction
"""
return (GAME_LEFT + position[0], GAME_TOP + position[1])
| 5,348,944 |
def get_graph_params(filename, nsize=1):
"""Load and process graph adjacency matrix and upsampling/downsampling matrices."""
data = np.load(filename, encoding='latin1')
A = data['A']
U = data['U']
D = data['D']
U, D = scipy_to_pytorch(A, U, D)
A = [adjmat_sparse(a, nsize=nsize) for a in A]
return A, U, D
| 5,348,945 |
def register_task(kind, task_class, override=False):
"""
Register a new task implementation with the execution system
"""
if kind in _task_registry and not override:
raise KeyError('Task of type %s is already defined and '
'override is False')
_task_registry[kind] = task_class
| 5,348,946 |
async def list_trainers_pokemon(message):
"""Lists all Pokemon caught by a trainer
Command from discord: p!ka list
p!ka - Bot command prefix
list - Specifies to list all Pokemon caught by trainer
"""
await list_pokemon(message.message, registered_trainers, sqlite_conn)
| 5,348,947 |
def _(dbmodel, backend):
"""
get_backend_entity for DummyModel DbComputer.
DummyModel instances are created when QueryBuilder queries the Django backend.
"""
from . import computers
djcomputer_instance = djmodels.DbComputer(
id=dbmodel.id,
uuid=dbmodel.uuid,
name=dbmodel.name,
hostname=dbmodel.hostname,
description=dbmodel.description,
transport_type=dbmodel.transport_type,
scheduler_type=dbmodel.scheduler_type,
metadata=dbmodel.metadata
)
return computers.DjangoComputer.from_dbmodel(djcomputer_instance, backend)
| 5,348,948 |
def persist(key, value):
""" Write into ini file """
parser.set(SECTION, key, value)
with open(INI_FILE, 'w') as f:
parser.write(f)
| 5,348,949 |
def retrieve(filename, conf, return_format='dict', save_to_local=False, delete_remote=False, timeout=60):
"""Retrieving Processed Session File from server via sFTP
1. Get xml file string from server and return object
2. If save_to_local, save to local file system
Args:
filename: filename of file in outbound folder at remote server with '.asc' as extension.
conf: An instance of utils.Configuration.
return_format: Return format. The default is ‘dict’. Could be one of ‘dict’, ‘object’ or ‘xml’.
save_to_local: whether save file to local. default is false.
delete_remote: If delete the remote file after download. The default is False
timeout: Timeout in second for ssh connection for sftp.
Returns:
response XML in desired format.
Raises:
Exception depends on when get it.
"""
if not isinstance(conf, utils.Configuration):
raise utils.VantivException('conf must be an instance of utils.Configuration')
if not isinstance(filename, six.string_types) or len(filename) < 4:
raise utils.VantivException('filename must be a string, and at least 4 chars')
if not isinstance(timeout, six.integer_types) or timeout < 0:
raise utils.VantivException('timeout must be an positive int')
response_xml = _get_file_str_from_sftp(filename, conf, delete_remote, timeout)
if save_to_local:
_save_str_file(response_xml, conf.batch_response_path, filename)
return _generate_response(response_xml, return_format, conf)
| 5,348,950 |
def test_Minimal():
"""Test the Minimal strategy."""
# Try to load the confounds, whithout PCA reduction
conf = lc.Minimal()
assert conf.strategy == ["high_pass", "motion", "wm_csf", "non_steady_state"]
assert hasattr(conf, "global_signal") == False
conf.load(file_confounds)
assert isinstance(conf.confounds_, pd.DataFrame)
# Check that all model categories have been successfully loaded
list_check = [
"trans_y",
"trans_z",
"rot_z",
"cosine00",
"csf",
"white_matter",
]
for check in list_check:
assert check in conf.confounds_.columns
# maker sure global signal works
conf = lc.Minimal(global_signal="basic")
assert conf.strategy == ["high_pass", "motion", "wm_csf", "non_steady_state", "global"]
assert conf.global_signal == "basic"
| 5,348,951 |
def get_simulate_func_options(
params,
options,
method="n_step_ahead_with_sampling",
df=None,
n_simulation_periods=None,
):
"""Rewrite respy's get_simulation_function such that options can be passed
and therefore the seed be changed before any run. Documentation is adapted
from :func:`respy.simulate.get_simulate_func()`
Parameters
----------
params : pandas.DataFrame
DataFrame containing the model parameters.
options : dict
Dictionary containing the model options.
method : {"n_step_ahead_with_sampling", "n_step_ahead_with_data", "one_step_ahead"}
The simulation method which can be one of three and is explained in more detail
in :func:`respy.simulate.simulate()`.
df : pandas.DataFrame or None, default None
DataFrame containing one or multiple observations per individual.
n_simulation_periods : int or None, default None
Simulate data for a number of periods. This options does not affect
``options["n_periods"]`` which controls the number of periods for which decision
rules are computed.
Returns
-------
simulate_function : :func:`simulate`
Simulation function where all arguments except the parameter vector
and the options are set.
"""
optim_paras, options = process_params_and_options(params, options)
n_simulation_periods, options = _harmonize_simulation_arguments(
method,
df,
n_simulation_periods,
options,
)
df = _process_input_df_for_simulation(df, method, options, optim_paras)
solve = get_solve_func(params, options)
n_observations = (
df.shape[0]
if method == "one_step_ahead"
else df.shape[0] * n_simulation_periods
)
shape = (n_observations, len(optim_paras["choices"]))
base_draws_sim = create_base_draws(
shape,
next(options["simulation_seed_startup"]),
"random",
)
base_draws_wage = create_base_draws(
shape,
next(options["simulation_seed_startup"]),
"random",
)
simulate_function = partial(
simulate,
base_draws_sim=base_draws_sim,
base_draws_wage=base_draws_wage,
df=df,
method=method,
n_simulation_periods=n_simulation_periods,
solve=solve,
)
return simulate_function
| 5,348,952 |
def read_xspec_log_files(es_dir, out_rel_name, boot_num=2):
"""
Read in all XSPEC log files (with chatter set to 4) that were generated in
sed_fit_bootstrap.sh, and append each bootstrap iteration's values to its
sed_pars.Parameter.
Parameters
----------
es_dir : str
The directory with all the energy spectroscopy files from
sed_fit_bootstrap.sh.
out_rel_name : str
The relative (i.e. local) name for the output files.
boot_num : int
Number of bootstrap iterations implemented.
Returns
-------
var_pars : np.array of sed_pars.Parameter()
2-D array of the SED parameters that vary with QPO phase, over
all the bootstrap iterations.
"""
all_par_vals = np.zeros((N_SPECTRA, N_PARAMS))
for i in range(1, boot_num + 1):
# for i in range(1, 10):
boot_log_file = es_dir + "/" + out_rel_name + "_b-" + str(i) + \
"_xspec.log"
# print log_file
if os.path.isfile(boot_log_file):
par_vals = xspec_boot_log_to_array(boot_log_file)
# print "Shape of par vals:", np.shape(par_vals)
all_par_vals = np.dstack((all_par_vals, par_vals))
else:
pass
# print "All par vals:", np.shape(all_par_vals)
all_par_vals = all_par_vals[:,:,1:]
# print "All par vals:", np.shape(all_par_vals)
good_boots = np.shape(all_par_vals)[-1]
# print "Good boots:", good_boots
n_varpars = 0
delete_index = []
for j in range(N_PARAMS):
if not check_equal(all_par_vals[:,j,0].flatten()):
n_varpars += 1
else:
delete_index.append(j)
# print "Shape all par vals:", np.shape(all_par_vals)
untied_varpar_vals = np.delete(all_par_vals, delete_index, axis=1)
# print "Shape untied par vals:", np.shape(untied_varpar_vals)
# print untied_varpar_vals
return untied_varpar_vals, n_varpars, good_boots
| 5,348,953 |
def transformLimits(*args, **kwargs):
"""
The transformLimits command allows us to set, edit, or query the limits of the transformation that can be applied to
objects. We can also turn any limits off which may have been previously set. When an object is first created, all the
transformation limits are off by default.Transformation limits allow us to control how much an object can be
transformed. This is most useful for joints, although it can be used any place we would like to limit the movement of an
object.Default values are:( -1, 1) for translation, ( -1, 1) for scaling, and (-45,45) for rotation. In query mode,
return type is based on queried flag.
Flags:
- enableRotationX : erx (bool, bool) [query]
enable/disable the lower and upper x-rotation limitsWhen queried, it returns boolean boolean
- enableRotationY : ery (bool, bool) [query]
enable/disable the lower and upper y-rotation limitsWhen queried, it returns boolean boolean
- enableRotationZ : erz (bool, bool) [query]
enable/disable the lower and upper z-rotation limitsWhen queried, it returns boolean boolean
- enableScaleX : esx (bool, bool) [query]
enable/disable the lower and upper x-scale limitsWhen queried, it returns boolean boolean
- enableScaleY : esy (bool, bool) [query]
enable/disable the lower and upper y-scale limitsWhen queried, it returns boolean boolean
- enableScaleZ : esz (bool, bool) [query]
enable/disable the lower and upper z-scale limitsWhen queried, it returns boolean booleanFlag can have multiple
arguments, passed either as a tuple or a list.
- enableTranslationX : etx (bool, bool) [query]
enable/disable the ower and upper x-translation limitsWhen queried, it returns boolean boolean
- enableTranslationY : ety (bool, bool) [query]
enable/disable the lower and upper y-translation limitsWhen queried, it returns boolean boolean
- enableTranslationZ : etz (bool, bool) [query]
enable/disable the lower and upper z-translation limitsWhen queried, it returns boolean boolean
- remove : rm (bool) [create]
turn all the limits off and reset them to their default values
- rotationX : rx (float, float) [query]
set the lower and upper x-rotation limitsWhen queried, it returns angle angle
- rotationY : ry (float, float) [query]
set the lower and upper y-rotation limitsWhen queried, it returns angle angle
- rotationZ : rz (float, float) [query]
set the lower and upper z-rotation limitsWhen queried, it returns angle angle
- scaleX : sx (float, float) [query]
set the lower and upper x-scale limitsWhen queried, it returns float float
- scaleY : sy (float, float) [query]
set the lower and upper y-scale limitsWhen queried, it returns float float
- scaleZ : sz (float, float) [query]
set the lower and upper z-scale limitsWhen queried, it returns float float
- translationX : tx (float, float) [query]
set the lower and upper x-translation limitsWhen queried, it returns linear linear
- translationY : ty (float, float) [query]
set the lower and upper y-translation limitsWhen queried, it returns linear linear
- translationZ : tz (float, float) [query]
set the lower and upper z-translation limitsWhen queried, it returns linear linear
Derived from mel command `maya.cmds.transformLimits`
"""
pass
| 5,348,954 |
def get_host_user_and_ssh_key_path(instance_name, project, zone):
"""Return a tuple of (hostname, username and ssh_key_path)."""
output = api.local(
'gcloud compute ssh --project "%s" --zone "%s" %s --dry-run' %
(project, zone, instance_name),
capture=True)
print output
m = re.match('/usr/bin/ssh .*-i ([^ ]+)(?: -o [^ ]+)* ([^ ]+)@([^ ]+)',
output)
return (m.group(3), m.group(2), m.group(1))
| 5,348,955 |
def TDataStd_TreeNode_Find(*args):
"""
* class methods working on the node =================================== Returns true if the tree node T is found on the label L. Otherwise, false is returned.
:param L:
:type L: TDF_Label &
:param T:
:type T: Handle_TDataStd_TreeNode &
:rtype: bool
"""
return _TDataStd.TDataStd_TreeNode_Find(*args)
| 5,348,956 |
def make_group_corr_mat(df):
"""
This function reads in each subject's aal roi time series files and creates roi-roi correlation matrices
for each subject and then sums them all together. The final output is a 3d matrix of all subjects
roi-roi correlations, a mean roi-roi correlation matrix and a roi-roi covariance matrix.
**NOTE WELL** This returns correlations transformed by the Fisher z, aka arctanh, function.
"""
# for each subject do the following
for i, (sub, f_id) in enumerate(df[['SUB_ID', 'FILE_ID']].values):
#read each subjects aal roi time series files
ts_df = pd.read_table('DATA/{}_rois_aal.1D'.format(f_id))
#create a correlation matrix from the roi all time series files
corr_mat_r = ts_df.corr()
#the correlations need to be transformed to Fisher z, which is
#equivalent to the arctanh function.
corr_mat_z = np.arctanh(corr_mat_r)
#for the first subject, add a correlation matrix of zeros that is the same dimensions as the aal roi-roi matrix
if i == 0:
all_corr_mat = np.zeros([corr_mat_z.shape[0], corr_mat_z.shape[1], len(df)])
#now add the correlation matrix you just created for each subject to the all_corr_mat matrix (3D)
all_corr_mat[:, :, i] = corr_mat_z
#create the mean correlation matrix (ignore nas - sometime there are some...)
av_corr_mat = np.nanmean(all_corr_mat, axis=2)
#create the group covariance matrix (ignore nas - sometime there are some...)
var_corr_mat = np.nanvar(all_corr_mat, axis=2)
return all_corr_mat, av_corr_mat, var_corr_mat
| 5,348,957 |
def get_tradedate(begin, end):
"""
get tradedate between begin date and end date
Params:
begin:
str,eg: '1999-01-01'
end:
str,eg: '2017-12-31'
Return:
pd.DataFrame
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT calendar_date FROM trade_calendar WHERE is_trade_day= 1 AND \
calendar_date>='" + begin + "' AND calendar_date<='" + end + "';"
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
date.columns = ['date']
date = pd.DataFrame(pd.to_datetime(date['date']))
return date
finally:
if conn:
conn.close()
| 5,348,958 |
def collect_tweet(update: Update, context: CallbackContext) -> int:
"""Tweet caption collection for tweet without attachments"""
logger.info("'{update.message.text}' tweet type selected")
update.message.reply_text("Enter the tweet")
return TWEET
| 5,348,959 |
def get_unhandled_crictical_errors(req:HttpRequest, n:int):
"""
Preprocess errors before injection
and gets `n` unhandled errors
Typical Return Value if `n` errors were found...
{"es":[
{
"id": "192.168.1.51",
"title":"hey there"
}
]
}
"""
errors:Dict = {} # define return object
# get the neccessary errors
errors_query = Error.objects.filter(isHandled=False, ecode__lt=2000, ecode__gt=1000)[:n]
es_tmp:list = []
for error in errors_query:
e_tmp:dict
victim:object
# e_tmp["id"] = error.victim
code = error.ecode
# if ecode > 1500, then it belongs to the child
if code > 1500 and code < 2000:
victim = Child.objects.get(ip=error.victim).first()
e_tmp["id"] = victim.nickname | victim.ip
# if not then belongs to smart task
elif code > 1000 and code < 1500:
e_tmp["id"] = STask.objects.get(sid=error.victim).first().name
# rarely error record may be corrupted
else:
raise Exception(f"Given ecode{error.ecode} in the error{error.eid} obj is invalid")
e_tmp["title"] = get_error_title(error.ecode)
es_tmp.append(e_tmp)
del e_tmp, victim
# compile the return object
errors["es"] = es_tmp
del es_tmp
return JsonResponse(errors)
| 5,348,960 |
def ADO_mappings(N, K, level_cutoff):
"""
ADO (auxilary density operators) are indexed by a N by (K + 1) matrix
consisting of non-negative integers.
ADO_mappings calculates all possible matrices "ado_index" of size
N by (K+1) where np.sum(m) < level_cutoff
Parameters
----------
N : integer
number of states
K : integer
number of exponentials to include in the spectral density
correlation function
level_cutoff : integer
number of levels at which to terminate the heiarchy expansion
Returns
-------
ind_to_mat : list of matrices
maps index to np.array
mat_to_ind : function
maps the np.array to the index
---------------------------------------------------------------------------
Define S to be the set of all matrices of size N by (K + 1) with
non-negative integer values.
Define level L_i as:
L_i = {m \in S | np.sum(m) == i}
L_i can be found using the multichoose function. We will preserve the order
that multichoose uses in ordering L_i
L_i corresponds to the set of ADOs in the ith heiarchy.
L_0 is a singleton set, corresponding to the RDO (reduced density matrix)
"""
bins = N * (K + 1)
permutations = []
for c in range(level_cutoff):
permutations.extend(multichoose(bins, c))
inverted_permutations = {tuple(v): i for i, v in enumerate(permutations)}
def mat_to_ind(mat):
"""maps np.array to index"""
vec = mat.flatten()
try:
return inverted_permutations[tuple(vec)]
except KeyError:
return None
ind_to_mat = [np.array(vec).reshape((N, K + 1)) for vec in permutations]
return ind_to_mat, mat_to_ind
| 5,348,961 |
def f_beta(precision, recall, beta):
"""
Returns the F score for precision, recall and a beta parameter
:param precision: a double with the precision value
:param recall: a double with the recall value
:param beta: a double with the beta parameter of the F measure, which gives more or less weight to precision vs. recall
:return: a double value of the f(beta) measure.
"""
if np.isnan(precision) or np.isnan(recall) or (precision == 0 and recall == 0):
return np.nan
return ((1 + beta ** 2) * precision * recall) / (((beta ** 2) * precision) + recall)
| 5,348,962 |
def convert_image_to_dicom(image_file):
"""Read an image file, convert it to Dicom and return the file path"""
# Load pixel array from image.
img = Image.open(image_file)
if ('RGB' == img.mode) or ('RGBA' == img.mode):
# Assuming greyscale image, keep only one channel.
pix = np.array(img)[:, :, 0]
elif 'L' == img.mode:
# One black and white channel.
pix = np.array(img)[:, :]
else:
raise ValueError('Unhandled Image mode: {}'.format(img.mode))
# Write pixel array to Dicom file.
stk = sitk.GetImageFromArray(pix)
writer = sitk.ImageFileWriter()
writer.KeepOriginalImageUIDOn()
img_basename = os.path.splitext(os.path.basename(image_file))[0] + '_'
dicom_file = tempfile.NamedTemporaryFile(prefix=img_basename).name + '.dcm'
writer.SetFileName(dicom_file)
writer.Execute(stk)
return dicom_file
| 5,348,963 |
def get_highly_variable_genes(
adata,
normalized_counts_per_cell=10000,
min_counts=3,
min_cells=3,
min_gene_vscore_pctl=85,
):
"""
Get highly variable genes.
We assume that data preprocessing are already done, like removing low quality cells.
It first perform count normalization, then variable gene selection.
Parameters
----------
adata: :class:`~anndata.AnnData` object
normalized_counts_per_cell: `int`, optional (default: 1000)
count matrix normalization
min_counts: `int`, optional (default: 3)
Minimum number of UMIs per cell to be considered for selecting highly variable genes.
min_cells: `int`, optional (default: 3)
Minimum number of cells per gene to be considered for selecting highly variable genes.
min_gene_vscore_pctl: `int`, optional (default: 85)
Gene expression variability threshold, in the unit of percentile,
for selecting highly variable genes. Range: [0,100], with a higher
number selecting more variable genes.
Returns
-------
Modify adata.var['highly_variable'].
If 'highly_variable' existed before, save a copy at adata.obs['highly_variable_old']
"""
sc.pp.normalize_per_cell(adata, counts_per_cell_after=normalized_counts_per_cell)
verbose = logg._settings_verbosity_greater_or_equal_than(
2
) # the highest level is 3
logg.info("Finding highly variable genes...")
gene_list = adata.var_names
gene_idx = hf.filter_genes(
adata.X,
min_counts=min_counts,
min_cells=min_cells,
min_vscore_pctl=min_gene_vscore_pctl,
show_vscore_plot=verbose,
)
highvar_genes = gene_list[gene_idx]
if "highly_variable" in adata.var.keys():
adata.var["highly_variable_old"] = adata.var["highly_variable"].copy()
adata.var["highly_variable"] = False
adata.var.loc[highvar_genes, "highly_variable"] = True
logg.info(f"Keeping {len(highvar_genes)} genes")
| 5,348,964 |
async def sysdetails(sysd):
""" For .sysd command, get system info using neofetch. """
if not sysd.text[0].isalpha() and sysd.text[0] not in ("/", "#", "@", "!"):
try:
fetch = await asyncrunapp(
"neofetch",
"--stdout",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await fetch.communicate()
result = str(stdout.decode().strip()) + str(stderr.decode().strip())
await sysd.edit("`" + result + "`")
except FileNotFoundError:
await sysd.edit("**Instale o neofetch primeiro!**")
| 5,348,965 |
def get_cfg(existing_cfg, _log):
"""
generates
"""
_sanity_check(existing_cfg, _log)
import ntpath, os, yaml
with open(os.path.join(os.path.dirname(__file__), "{}.yml".format(ntpath.basename(__file__).split(".")[0])),
'r') as stream:
try:
ret = yaml.load(stream)
except yaml.YAMLError as exc:
assert "Default config yaml for '{}' not found!".format(os.path.splitext(__file__)[0])
return ret
| 5,348,966 |
def download_alphafold_cif(
proteins: list,
out_folder: str,
out_format: str = "{}.cif",
alphafold_cif_url: str = 'https://alphafold.ebi.ac.uk/files/AF-{}-F1-model_v1.cif',
timeout: int = 60,
verbose_log: bool = False,
) -> tuple:
"""
Function to download .cif files of protein structures predicted by AlphaFold.
Parameters
----------
proteins : list
List (or any other iterable) of UniProt protein accessions for which to
download the structures.
out_folder : str
Path to the output folder.
alphafold_cif_url : str
The base link from where to download cif files.
The brackets {} are replaced by a protein name from the proteins list.
Default is 'https://alphafold.ebi.ac.uk/files/AF-{}-F1-model_v1.cif'.
timeout : int
Time to wait for reconnection of downloads.
Default is 60.
verbose_log: bool
Whether to write verbose logging information.
Default is False.
Returns
-------
: (list, list, list)
The lists of valid, invalid and existing protein accessions.
"""
socket.setdefaulttimeout(timeout)
valid_proteins = []
invalid_proteins = []
existing_proteins = []
if not os.path.exists(out_folder):
os.makedirs(out_folder)
for protein in tqdm.tqdm(proteins):
name_in = alphafold_cif_url.format(protein)
name_out = os.path.join(
out_folder,
out_format.format(protein)
)
if os.path.isfile(name_out):
existing_proteins.append(protein)
else:
try:
urllib.request.urlretrieve(name_in, name_out)
valid_proteins.append(protein)
except urllib.error.HTTPError:
if verbose_log:
logging.info(f"Protein {protein} not available for CIF download.")
invalid_proteins.append(protein)
logging.info(f"Valid proteins: {len(valid_proteins)}")
logging.info(f"Invalid proteins: {len(invalid_proteins)}")
logging.info(f"Existing proteins: {len(existing_proteins)}")
return(valid_proteins, invalid_proteins, existing_proteins)
| 5,348,967 |
def test_staff_user_cannot_create_youth_profile_if_profile_does_not_exist(
rf, staff_user_gql_client, mocker, profile_api_response
):
"""Creating a youth profile will query Helsinki profile for the given profile id."""
mocker.patch.object(ProfileAPI, "fetch_profile", return_value={"id": ""})
profile_id = from_global_id(profile_api_response["id"])[1]
profile_global_id = to_global_id(type="YouthProfileNode", id=profile_id)
user = staff_user_gql_client.user
request = rf.post("/graphql")
request.user = user
today = date.today()
birth_date = today.replace(year=today.year - 13) - timedelta(days=1)
youth_profile_data = {
"birth_date": birth_date.strftime("%Y-%m-%d"),
"approver_email": "[email protected]",
}
t = Template(
"""
mutation {
createYouthProfile(
input: {
id: \"${id}\",
youthProfile: {
birthDate: \"${birth_date}\",
approverEmail: \"${approver_email}\",
}
profileApiToken: "token"
}
) {
youthProfile {
id
}
}
}
"""
)
query = t.substitute(
id=profile_global_id,
birth_date=youth_profile_data["birth_date"],
approver_email=youth_profile_data["approver_email"],
)
executed = staff_user_gql_client.execute(query, context=request)
assert (
executed["errors"][0].get("extensions").get("code")
== PROFILE_DOES_NOT_EXIST_ERROR
)
| 5,348,968 |
def test_show_tables():
"""
Tests show tables command.
"""
ret = {}
ret["stdout"] = "bad_hosts"
ret["retcode"] = 0
mock_cmd = MagicMock(return_value=ret)
with patch.dict(pf.__salt__, {"cmd.run_all": mock_cmd}):
res = pf.show("tables")
mock_cmd.assert_called_once_with(
"pfctl -s Tables", output_loglevel="trace", python_shell=False
)
assert not res["changes"]
| 5,348,969 |
def secret_age_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[SecretsManager.1] Secrets over 90 days old should be rotated"""
secret = list_secrets(cache=cache)
for secrets in secret["SecretList"]:
secretArn = str(secrets["ARN"])
secretName = str(secrets["Name"])
lastChangedDate = secrets["LastChangedDate"]
todaysDatetime = datetime.datetime.now(datetime.timezone.utc)
secretAgeFinder = todaysDatetime - lastChangedDate
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if secretAgeFinder >= datetime.timedelta(days=90):
finding = {
"SchemaVersion": "2018-10-08",
"Id": secretArn + "/secrets-manager-age-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": secretArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[SecretsManager.1] Secrets over 90 days old should be rotated",
"Description": secretName
+ " is over 90 days old and should be rotated. Refer to the remediation instructions if this configuration is not intended",
"Remediation": {
"Recommendation": {
"Text": "For more information on Secret Rotation refer to the Rotating Your AWS Secrets Manager Secrets section of the AWS Secrets Manager User Guide",
"Url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsSecretsManagerSecret",
"Id": secretArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"AwsSecretsManagerSecret": {"Name": secretName}}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": secretArn + "/secrets-manager-age-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": secretArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[SecretsManager.1] Secrets over 90 days old should be rotated",
"Description": secretName + " is over 90 days old and should be rotated.",
"Remediation": {
"Recommendation": {
"Text": "For more information on Secret Rotation refer to the Rotating Your AWS Secrets Manager Secrets section of the AWS Secrets Manager User Guide",
"Url": "https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsSecretsManagerSecret",
"Id": secretArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"AwsSecretsManagerSecret": {"Name": secretName}}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
| 5,348,970 |
def _gecko_path():
"""Either get the executable or raise an error"""
if sys.platform == "win32":
return os.path.join(PCKG_PATH, "win32", "geckodriver.exe")
if sys.platform == 'linux':
return os.path.join(PCKG_PATH, "linux", "geckodriver")
if sys.platform == 'darwin':
return os.path.join(PCKG_PATH, "macos", "geckodriver")
raise OSError("not supported yet")
| 5,348,971 |
def generate_makefiles(target_config, verbose, parts, tracing):
""" Generate the makefiles to build everything. target_config is the
target configuration. verbose is set if the output is to be displayed.
parts is the number of parts the generated code should be split into.
tracing is set if the generated code should include tracing calls.
"""
# For the top-level .pro file.
toplevel_pro = 'PyQt5.pro'
subdirs = []
# Set the SIP platform, version and feature flags.
sip_flags = get_sip_flags(target_config)
# Go through the modules.
pyqt_modules = list(target_config.pyqt_modules)
# Add the internal modules if they are required.
if not target_config.no_tools:
pyqt_modules.append('pylupdate')
pyqt_modules.append('pyrcc')
for mname in pyqt_modules:
metadata = MODULE_METADATA[mname]
if metadata.qpy_lib:
sp_qpy_dir = source_path('qpy', mname)
qpy_c_sources = [os.path.relpath(f, mname)
for f in glob.glob(os.path.join(sp_qpy_dir, '*.c'))]
qpy_cpp_sources = [os.path.relpath(f, mname)
for f in glob.glob(os.path.join(sp_qpy_dir, '*.cpp'))]
qpy_headers = [os.path.relpath(f, mname)
for f in glob.glob(os.path.join(sp_qpy_dir, '*.h'))]
qpy_sources = qpy_c_sources + qpy_cpp_sources
else:
qpy_sources = []
qpy_headers = []
generate_sip_module_code(target_config, verbose, parts, tracing, mname,
sip_flags, metadata.public, qpy_sources, qpy_headers)
subdirs.append(mname)
# Generate the composite module.
qtmod_sipdir = os.path.join('sip', 'Qt')
mk_clean_dir(qtmod_sipdir)
qtmod_sipfile = os.path.join(qtmod_sipdir, 'Qtmod.sip')
f = open_for_writing(qtmod_sipfile)
f.write('''%CompositeModule PyQt5.Qt
''')
for mname in COMPOSITE_COMPONENTS:
if mname in target_config.pyqt_modules:
f.write('%%Include %s/%smod.sip\n' % (mname, mname))
f.close()
generate_sip_module_code(target_config, verbose, parts, tracing, 'Qt',
sip_flags, False)
subdirs.append('Qt')
wrappers = []
if not target_config.no_tools:
# Generate the pylupdate5 and pyrcc5 wrappers.
for tool in ('pylupdate', 'pyrcc'):
wrappers.append((tool,
generate_tool_wrapper(target_config, tool + '5',
'PyQt5.%s_main' % tool)))
# Generate the pyuic5 wrapper.
wrappers.append(('pyuic',
generate_tool_wrapper(target_config, 'pyuic5',
'PyQt5.uic.pyuic')))
# Generate the Qt Designer plugin.
if not target_config.no_designer_plugin and 'QtDesigner' in target_config.pyqt_modules:
if generate_plugin_makefile(target_config, verbose, 'designer', target_config.designer_plugin_dir, "Qt Designer"):
subdirs.append('designer')
# Generate the qmlscene plugin.
if not target_config.no_qml_plugin and 'QtQml' in target_config.pyqt_modules:
if generate_plugin_makefile(target_config, verbose, 'qmlscene', target_config.qml_plugin_dir, "qmlscene"):
subdirs.append('qmlscene')
rewrite_qmldir(target_config, 'Charts',
source_path('examples', 'quick', 'tutorials', 'extending',
'chapter6-plugins'))
# Generate the QScintilla API file.
if target_config.qsci_api:
inform("Generating the QScintilla API file...")
f = open_for_writing('PyQt5.api')
for mname in target_config.pyqt_modules:
if MODULE_METADATA[mname].public:
api = open(mname + '.api')
for l in api:
f.write('PyQt5.' + l)
api.close()
os.remove(mname + '.api')
f.close()
# Generate the Python dbus module.
if target_config.pydbus_module_dir != '':
mname = 'dbus'
mk_dir(mname)
sp_src_dir = source_path(mname)
lib_dirs = ['-L' + l for l in target_config.dbus_lib_dirs]
lib_names = ['-l' + l for l in target_config.dbus_libs]
libs = ' '.join(lib_dirs + lib_names)
generate_module_makefile(target_config, verbose, mname,
include_paths=target_config.dbus_inc_dirs, libs=libs,
install_path=target_config.pydbus_module_dir,
src_dir=sp_src_dir)
subdirs.append(mname)
# Generate the top-level .pro file.
inform("Generating the top-level .pro file...")
out_f = open_for_writing(toplevel_pro)
root_dir = qmake_quote(target_config.pyqt_module_dir + '/PyQt5')
out_f.write('''TEMPLATE = subdirs
CONFIG += ordered nostrip
SUBDIRS = %s
init_py.files = %s
init_py.path = %s
INSTALLS += init_py
''' % (' '.join(subdirs), source_path('__init__.py'), root_dir))
# Install the uic module.
out_f.write('''
uic_package.files = %s
uic_package.path = %s
INSTALLS += uic_package
''' % (source_path('pyuic', 'uic'), root_dir))
# Install the tool main scripts and wrappers.
if wrappers:
wrapper_exes = []
for tool, wrapper in wrappers:
if tool != 'pyuic':
out_f.write('''
%s.files = %s
%s.path = %s
INSTALLS += %s
''' % (tool, source_path('sip', tool, tool + '_main.py'), tool, root_dir, tool))
wrapper_exes.append(wrapper)
out_f.write('''
tools.files = %s
tools.path = %s
INSTALLS += tools
''' % (' '.join(wrapper_exes), qmake_quote(target_config.pyqt_bin_dir)))
# Install the stub files.
if target_config.py_version >= 0x030500 and target_config.pyqt_stubs_dir:
out_f.write('''
pep484_stubs.files = %s Qt.pyi
pep484_stubs.path = %s
INSTALLS += pep484_stubs
''' % (' '.join([mname + '.pyi' for mname in target_config.pyqt_modules]),
qmake_quote(target_config.pyqt_stubs_dir)))
# Install the QScintilla .api file.
if target_config.qsci_api:
out_f.write('''
qscintilla_api.files = PyQt5.api
qscintilla_api.path = %s
INSTALLS += qscintilla_api
''' % qmake_quote(target_config.qsci_api_dir + '/api/python'))
out_f.close()
# Make the wrappers executable on platforms that support it. If we did it
# after running qmake then (on Linux) the execute bits would be stripped on
# installation.
if target_config.py_platform != 'win32':
for tool, wrapper in wrappers:
inform("Making the %s wrapper executable..." % wrapper)
sbuf = os.stat(wrapper)
mode = sbuf.st_mode
mode |= (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
os.chmod(wrapper, mode)
# Generate the makefiles.
inform("Generating the Makefiles...")
run_qmake(target_config, verbose, toplevel_pro, recursive=True)
| 5,348,972 |
def readShort(f):
"""Read 2 bytes as BE integer in file f"""
read_bytes = f.read(2)
return struct.unpack(">h", read_bytes)[0]
| 5,348,973 |
def get_color(thing):
"""Get color for thing.
:param thing: Thing to get color for.
:return: Color tuple if rule exists otherwise None.
"""
for rule in _COLOR_RULES:
color = rule(thing)
if color is not None:
return color
return None
| 5,348,974 |
def build_table(infos):
""" Builds markdown table. """
table_str = '| '
for key in infos[0].keys():
table_str += key + ' | '
table_str += '\n'
table_str += '| '
for key in infos[0].keys():
table_str += '--- | '
table_str += '\n'
for info in infos:
table_str += '| '
for value in info.values():
table_str += str(value) + ' | '
table_str += '\n'
return table_str
| 5,348,975 |
def notify_telegram(title, content, token=None, chat=None, mention_user=None, **kwargs):
"""
Sends a telegram notification and returns *True* on success. The communication with the telegram
API might have some delays and is therefore handled by a thread.
"""
# test import
import telegram # noqa: F401
cfg = Config.instance()
# get default token and chat
if not token:
token = cfg.get_expanded("notifications", "telegram_token")
if not chat:
chat = cfg.get_expanded("notifications", "telegram_chat")
if not token or not chat:
logger.warning("cannot send Telegram notification, token ({}) or chat ({}) empty".format(
token, chat))
return False
# append the user to mention to the title
# unless explicitly set to empty string
mention_text = ""
if mention_user is None:
mention_user = cfg.get_expanded("notifications", "telegram_mention_user")
if mention_user:
mention_text = " (@{})".format(mention_user)
# request data for the API call
request = {
"parse_mode": "Markdown",
}
# standard or attachment content?
if isinstance(content, six.string_types):
request["text"] = "{}{}\n\n{}".format(title, mention_text, content)
else:
# content is a dict, add some formatting
request["text"] = "{}{}\n\n".format(title, mention_text)
for key, value in content.items():
request["text"] += "_{}_: {}\n".format(key, value)
# extend by arbitrary kwargs
request.update(kwargs)
# threaded, non-blocking API communication
thread = threading.Thread(target=_notify_telegram, args=(token, chat, request))
thread.start()
return True
| 5,348,976 |
def convert_to_dapr_duration(td: timedelta) -> str:
"""Converts date.timedelta to Dapr duration format.
Args:
td (datetime.timedelta): python datetime object.
Returns:
str: dapr duration format string.
"""
total_minutes, secs = divmod(td.total_seconds(), 60.0)
hours, mins = divmod(total_minutes, 60.0)
return f'{hours:.0f}h{mins:.0f}m{secs:.0f}s'
| 5,348,977 |
def test_main_ls():
"""
If the ls command is issued, check the appropriate function is called.
"""
mock_serial = mock.MagicMock()
mock_class = mock.MagicMock()
mock_class.__enter__.return_value = mock_serial
with mock.patch(
"microfs.ls", return_value=["foo", "bar"]
) as mock_ls, mock.patch(
"microfs.get_serial", return_value=mock_class
), mock.patch.object(
builtins, "print"
) as mock_print:
microfs.main(argv=["ls"])
mock_ls.assert_called_once_with()
mock_print.assert_called_once_with("foo bar")
| 5,348,978 |
def get_slot(handler_input, slot_name):
# type: (HandlerInput, AnyStr) -> Optional[Slot]
"""Return the slot information from intent request.
The method retrieves the slot information
:py:class:`ask_sdk_model.slot.Slot` from the input intent request
for the given ``slot_name``. More information on the slots can be
found here :
https://developer.amazon.com/docs/custom-skills/request-types-reference.html#slot-object
If there is no such slot, then a ``None``
is returned. If the input request is not an
:py:class:`ask_sdk_model.intent_request.IntentRequest`, a
:py:class:`TypeError` is raised.
:param handler_input: The handler input instance that is generally
passed in the sdk's request and exception components
:type handler_input: ask_sdk_core.handler_input.HandlerInput
:param slot_name: Name of the slot that needs to be retrieved
:type slot_name: str
:return: Slot information for the provided slot name if it exists,
or a `None` value
:rtype: Optional[ask_sdk_model.slot.Slot]
:raises: TypeError if the input is not an IntentRequest
"""
request = handler_input.request_envelope.request
if isinstance(request, IntentRequest):
if request.intent.slots is not None:
return request.intent.slots.get(slot_name, None)
else:
return None
raise TypeError("The provided request is not an IntentRequest")
| 5,348,979 |
def password_to_key(
hash_implementation: Callable[[bytes], TDigestable], padding_length: int
) -> Callable[[bytes, bytes], bytes]:
"""
Create a helper function to convert passwords to SNMP compliant keys
according to :rfc:`3414`.
>>> hasher = password_to_key(hashlib.sha1, 20)
>>> key = hasher(b"mypasswd", b"target-engine-id")
>>> key.hex()
'999ec23ca66b9d3f187ab5208840c30b0450b452'
:param hash_implementation: A callable that creates an object with a
".digest()" method from a bytes-object. Usable examples are
`hashlib.md5` and `hashlib.sha1`
:param padding_length: The padding length to be used during hashing (as
defined in the SNMP rfc)
:returns: A callable which can be used to derive an SNMP compliant key
from a password.
"""
@lru_cache(maxsize=None)
def hasher(password: bytes, engine_id: bytes) -> bytes:
"""
Derive a key from a password and engine-id.
:param password: The user password
:param engine_id: The target engine ID
:returns: The derived key
"""
# Repeat the password for a total of 1MB worth of data (as per SNMP rfc)
hash_size = 1024 * 1024
num_words = hash_size // len(password)
tmp = (password * (num_words + 1))[:hash_size]
hash_instance = hash_implementation(tmp)
key = hash_instance.digest()
localised_buffer = (
key[:padding_length] + engine_id + key[:padding_length]
)
final_key = hash_implementation(localised_buffer).digest()
return final_key
hasher.__name__ = f"<hasher:{hash_implementation}>" # type: ignore
return hasher
| 5,348,980 |
def categorize_tag_key_characters(OSM_FILE = "data\\round_rock.xml", category = 'Summary'):
"""Categorizes attributes into those with:
all lower character, all lower after colon(:),
containing special/problem characters and
all all others that were not listed in above
which includes uppercase characters and/or
multiple colons.
Keyword arguments:
OSM_File -- .osm or .xml file (default "data\\round_rock.xml")
category -- print specific keys of categories of characters from regex search
(default 'Summary' ['All', 'lower', 'lower_colon', 'porblemchars', 'other'])
"""
if category == 'All':
category = ('lower', 'lower_colon', 'porblemchars', 'other')
category_list = list(category)
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
lower_set = set()
lower_colon_set = set()
problemchars_set = set()
other_set = set()
def key_type(element, keys):
if element.tag == "tag":
if lower.match(element.attrib['k']):
lower_set.add(element.attrib['k'])
keys["lower"] += 1
elif lower_colon.match(element.attrib['k']):
lower_colon_set.add(element.attrib['k'])
keys["lower_colon"] += 1
elif problemchars.match(element.attrib['k']):
problemchars_set.add(element.attrib['k'])
keys["problemchars"] += 1
else:
other_set.add(element.attrib['k'])
keys["other"] += 1
return keys
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
print(keys)
print(
"\nThere are:\n\
{} unique keys in lower,\n\
{} unique keys in lower_colon,\n\
{} unique keys in problemchars and\n\
{} unique keys in other.\n"
.format(len(lower_set), len(lower_colon_set), len(problemchars_set), len(other_set))
)
if 'lower' in category_list:
print('\n\nlower set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["lower"], sorted(lower_set)))
if 'lower_colon' in category_list:
print('lower_colon set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["lower_colon"], sorted(lower_colon_set)))
if 'problemchars' in category_list:
print('problemchars set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["problemchars"], sorted(problemchars_set)))
if 'other' in category_list:
print('other set has {} items. The unique items are: \n\n{} \n\n'
.format(keys["other"], sorted(other_set)))
return keys
keys_dicts = process_map(OSM_FILE)
return keys_dicts
| 5,348,981 |
def rotate_left(value, count, nbits, offset):
"""
Rotate a value to the left (or right)
@param value: value to rotate
@param count: number of times to rotate. negative counter means
rotate to the right
@param nbits: number of bits to rotate
@param offset: offset of the first bit to rotate
@return: the value with the specified field rotated
all other bits are not modified
"""
assert offset >= 0, "offset must be >= 0"
assert nbits > 0, "nbits must be > 0"
mask = 2**(offset+nbits) - 2**offset
tmp = value & mask
if count > 0:
for x in xrange(count):
if (tmp >> (offset+nbits-1)) & 1:
tmp = (tmp << 1) | (1 << offset)
else:
tmp = (tmp << 1)
else:
for x in xrange(-count):
if (tmp >> offset) & 1:
tmp = (tmp >> 1) | (1 << (offset+nbits-1))
else:
tmp = (tmp >> 1)
value = (value-(value&mask)) | (tmp & mask)
return value
| 5,348,982 |
def set_categories(categories):
""" Take a dictionary mapping video category IDs to name and retrieval
time. All items are stored into cache node 'videoCategories', but
for the ones with a retrieval time too long ago, the v3 API is queried
before.
"""
timestamp = time.time()
idlist = [cid for cid, item in categories.items()
if item.get('updated', 0) < timestamp - g.lifespan]
if len(idlist) > 0:
query = {'id': ','.join(idlist),
'part': 'snippet'}
catinfo = call_gdata('videoCategories', query)
try:
for item in catinfo.get('items', []):
cid = item['id']
title = item.get('snippet', {}).get('title', 'unknown')
categories[cid] = {'title':title, 'updated':timestamp}
except Exception:
raise IOError("Error fetching category name for IDs %s" % idlist)
cache('categories').update(categories)
| 5,348,983 |
def geo_to_string(value):
"""
Convert geo objects to strings, because they don't support equality.
"""
if isinstance(value, list):
return [geo_to_string(x) for x in value]
if isinstance(value, dict):
result = {}
for dict_key, dict_value in value.iteritems():
result[dict_key] = geo_to_string(dict_value)
return result
if isinstance(value, aerospike.GeoJSON):
return str(value)
return value
| 5,348,984 |
def setMaxSharedMemory():
"""
Check and verify that the kernel.shmmax kernel parameter is above 35mb
"""
# First verify that kernel.shmmax is not set and is below the requested value.
logging.debug("loading %s", basedefs.FILE_SYSCTL)
txtHandler = utils.TextConfigFileHandler(basedefs.FILE_SYSCTL)
txtHandler.open()
# Compare to basedefs.CONST_SHMMAX
currentShmmax = txtHandler.getParam("kernel.shmmax")
if currentShmmax and (int(currentShmmax) >= basedefs.CONST_SHMMAX):
logging.debug("current shared memory max in kernel is %s, there is no need to update the kernel parameters", currentShmmax)
return
# If we got here, it means we need to update kernel.shmmax in sysctl.conf
logging.debug("setting SHARED MEMORY MAX to: %s", basedefs.CONST_SHMMAX)
txtHandler.editParam("kernel.shmmax", basedefs.CONST_SHMMAX)
txtHandler.close()
# Execute sysctl -a
utils.execExternalCmd("%s -e -p" % basedefs.EXEC_SYSCTL, True, output_messages.ERR_EXP_FAILED_KERNEL_PARAMS)
| 5,348,985 |
def magma_dposv_gpu(uplo, n, nhrs, a_gpu, lda, b_gpu, ldb):
"""
Solve linear system with positive semidefinite coefficient matrix.
"""
uplo = _uplo_conversion[uplo]
info = c_int_type()
status = _libmagma.magma_dposv_gpu(uplo, n, nhrs, int(a_gpu), lda,
int(b_gpu), ldb, ctypes.byref(info))
magmaCheckStatus(status)
| 5,348,986 |
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="I'm a snitch",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('FILE1',
help='Input file 1',
metavar='FILE',
type=argparse.FileType('rt'))
parser.add_argument('FILE2',
help='Input file 2',
metavar='FILE',
type=argparse.FileType('rt'))
parser.add_argument('-o',
'--outfile',
help='Output filename',
metavar='FILE',
type=argparse.FileType('wt'),
default=sys.stdout)
return parser.parse_args()
| 5,348,987 |
def page_not_found(request, template_name='404.html'):
"""
Default 404 handler.
Templates: `404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
"""
t = loader.get_template(template_name) # You need to create a 404.html template.
return http.HttpResponseNotFound(t.render(RequestContext(request, {'request_path': request.path})))
| 5,348,988 |
def test_create_new_file_version__use_existing_file_handle(mocker, syn):
"""Verify the behavior of create_new_file_version when we can use an existing file handle"""
key = _MigrationKey('syn123', _MigrationType.FILE, 1, None, None)
from_file_handle_id = 1234
to_file_handle_id = 4321
storage_location_id = 9876
file_size = 123456
mock_file = mock.MagicMock(spec=synapseclient.File)
mock_syn_get = mocker.patch.object(syn, 'get')
mock_syn_store = mocker.patch.object(syn, 'store')
mock_multipart_copy = mocker.patch.object(migrate_functions, 'multipart_copy')
mock_syn_get.return_value = mock_file
assert to_file_handle_id == _create_new_file_version(
syn,
key,
from_file_handle_id,
to_file_handle_id,
file_size,
storage_location_id
)
mock_syn_get.assert_called_once_with(key.id, downloadFile=False)
mock_syn_store.assert_called_once_with(mock_file)
assert mock_multipart_copy.called is False
| 5,348,989 |
def load_data(filename):
"""
Load shopping data from a CSV file `filename` and convert into a list of
evidence lists and a list of labels. Return a tuple (evidence, labels).
evidence should be a list of lists, where each list contains the
following values, in order:
- Administrative, an integer
- Administrative_Duration, a floating point number
- Informational, an integer
- Informational_Duration, a floating point number
- ProductRelated, an integer
- ProductRelated_Duration, a floating point number
- BounceRates, a floating point number
- ExitRates, a floating point number
- PageValues, a floating point number
- SpecialDay, a floating point number
- Month, an index from 0 (January) to 11 (December)
- OperatingSystems, an integer
- Browser, an integer
- Region, an integer
- TrafficType, an integer
- VisitorType, an integer 0 (not returning) or 1 (returning)
- Weekend, an integer 0 (if false) or 1 (if true)
labels should be the corresponding list of labels, where each label
is 1 if Revenue is true, and 0 otherwise.
"""
with open("shopping.csv") as f:
reader = csv.reader(f)
next(reader)
months = ["Jan", "Feb", "Mar", "Apr", "May", "June",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
data = []
for row in reader:
data.append({
"evidence": [int(row[0]), float(row[1]), int(row[2]), float(row[3]), int(row[4]), float(row[5]), float(row[6]), float(row[7]), float(row[8]), float(row[9]),
months.index(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), 0 if row[15] == "New_Visitor" else 1, 0 if row[16] == "FALSE" else 1],
"label": 0 if row[17] == "FALSE" else 1
})
evidence = [row["evidence"] for row in data]
labels = [row["label"] for row in data]
return (evidence, labels)
| 5,348,990 |
def instance_mock(cls, request, name=None, spec_set=True, **kwargs):
"""
Return a mock for an instance of *cls* that draws its spec from the class
and does not allow new attributes to be set on the instance. If *name* is
missing or |None|, the name of the returned |Mock| instance is set to
*request.fixturename*. Additional keyword arguments are passed through to
the Mock() call that creates the mock.
"""
if name is None:
name = request.fixturename
return create_autospec(cls, _name=name, spec_set=spec_set, instance=True,
**kwargs)
| 5,348,991 |
def plot_data(y):
""" y is a 1D vector """
x = np.arange(y.size)
_ = plt.plot(x, y, 'o')
| 5,348,992 |
def auto_fp16(apply_to=None, out_fp32=False):
"""Decorator to enable fp16 training automatically.
This decorator is useful when you write custom modules and want to support
mixed precision training. If inputs arguments are fp32 tensors, they will
be converted to fp16 automatically. Arguments other than fp32 tensors are
ignored.
Args:
apply_to (Iterable, optional): The argument names to be converted.
`None` indicates all arguments.
out_fp32 (bool): Whether to convert the output back to fp32.
Example:
>>> import torch.nn as nn
>>> class MyModule1(nn.Module):
>>>
>>> # Convert x and y to fp16
>>> @auto_fp16()
>>> def forward(self, x, y):
>>> pass
>>> import torch.nn as nn
>>> class MyModule2(nn.Module):
>>>
>>> # convert pred to fp16
>>> @auto_fp16(apply_to=('pred', ))
>>> def do_something(self, pred, others):
>>> pass
"""
def auto_fp16_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# check if the module has set the attribute `fp16_enabled`, if not,
# just fallback to the original method.
if not isinstance(args[0], torch.nn.Module):
raise TypeError('@auto_fp16 can only be used to decorate the '
'method of nn.Module')
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
return old_func(*args, **kwargs)
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get the argument names to be casted
args_to_cast = args_info.args if apply_to is None else apply_to
# convert the args that need to be processed
new_args = []
# NOTE: default args are not taken into consideration
if args:
arg_names = args_info.args[:len(args)]
for i, arg_name in enumerate(arg_names):
if arg_name in args_to_cast:
new_args.append(
cast_tensor_type(args[i], torch.float, torch.half))
else:
new_args.append(args[i])
# convert the kwargs that need to be processed
new_kwargs = {}
if kwargs:
for arg_name, arg_value in kwargs.items():
if arg_name in args_to_cast:
new_kwargs[arg_name] = cast_tensor_type(
arg_value, torch.float, torch.half)
else:
new_kwargs[arg_name] = arg_value
# apply converted arguments to the decorated method
output = old_func(*new_args, **new_kwargs)
# cast the results back to fp32 if necessary
if out_fp32:
output = cast_tensor_type(output, torch.half, torch.float)
return output
return new_func
return auto_fp16_wrapper
| 5,348,993 |
def get_loci_score(state, loci_w, data_w, species_w, better_loci,
species_counts, total_individuals, total_species,
individuals):
"""
Scoring function with user-specified weights.
:param state:
:param loci_w: the included proportion of loci from the original data set (higher is better).
:param data_w: 1 - the proportion of missing data for the selected loci (higher is better).
:param species_w: the average proportion of species represented per locus (higher is better).
:param better_loci:
:param species_counts:
:param total_individuals:
:param total_species:
:param individuals:
:return:
"""
num_loci = sum(state)
species_loci_counts = {species: 0 for species in species_counts}
individual_count = 0
missing_counts = {individual: 0 for individual in individuals}
total_loci = len(better_loci)
for i in range(total_loci):
if state[i] == 0:
continue
found_species = set()
found_individuals = set()
lines = better_loci[i].split("\n")
for line in lines:
if line == "":
continue
(individual, sequence) = line[1:].split()
found_individuals.add(individual)
individual_count += 1
species = individual.split("_")[-1]
found_species.add(species)
for species in found_species:
species_loci_counts[species] += 1
# Keep track of the amount of missing data for each individual.
for individual in individuals:
if individual not in found_individuals:
missing_counts[individual] += 1
num_missing = num_loci * total_individuals - individual_count
score_comps = [loci_w * float(num_loci) / float(total_loci),
data_w * (1 - float(num_missing) / float(num_loci * total_individuals)),
species_w * float(sum([species_loci_counts[species] for species in species_loci_counts])) / (float(num_loci) * float(total_species))]
return score_comps, missing_counts
| 5,348,994 |
def make_per_cell_fastqs(
reads,
outdir,
channel_id,
output_format,
cell_barcode_pattern,
good_barcodes_filename):
"""Write the filtered cell barcodes in reads
from barcodes_with_significant_umi_file
fastq.gzs to outdir
Parameters
----------
reads : str
read records from fasta path
greater than or equal to min_umi_per_cell
outdir: str
write the per cell barcode fastq.gzs to outdir
channel_id: str
prefix to fastq
output_format: str
format of output files, can be either fastq or fastq.gz
cell_barcode_pattern: regex pattern
cell barcode pattern to detect in the record name
barcodes_with_significant_umi_file: list
list of containing barcodes that have significant umi counts
Returns
-------
Write the filtered cell barcodes in reads
from barcodes_with_significant_umi_file
fastq.gzs to outdir
"""
if channel_id is None:
channel_id = ""
good_barcodes = read_barcodes_file(good_barcodes_filename)
fastqs = []
record_count = 0
for record in screed.open(reads):
record_count += 1
if record_count == 0:
return fastqs
good_cell_barcode_records = get_good_cell_barcode_records(
reads, good_barcodes, cell_barcode_pattern)
for cell_barcode, records in good_cell_barcode_records.items():
if channel_id == "":
filename = "{}/{}.{}".format(
outdir, cell_barcode, output_format)
else:
filename = "{}/{}_{}.{}".format(
outdir, channel_id, cell_barcode, output_format)
write_fastq(records, filename)
fastqs.append(filename)
return fastqs
| 5,348,995 |
def _find_app(settings, app):
"""Looks for an installed application in the user's local $PATH.
Raises an ImportError if the application is not found.
"""
# Save in settings
name = app['name'].lower()
settings[name] = None
# Retrieve local paths
split_token = ';' if mh.__iswin__ else ':'
local_paths = os.environ['PATH'].rsplit(split_token)
# Look for app in local paths
for local_path in local_paths:
path = os.path.join(local_path, app['exec'])
# If the path exists, store & exit the loop
if os.path.isfile(path):
settings[name] = path
break
# Try .exe for windows
if mh.__iswin__:
win_path = path + '.exe'
if os.path.isfile(win_path):
settings[name] = win_path
break
# If app not found, raise ImportError
if settings[name] is None:
error = '{0} application not found'.format(app['name'])
raise ImportError(error)
| 5,348,996 |
def plot_roc_curve(data, cls_name, title='ROC curve'):
"""
:param data: list [(fpr, tpr), (), ...]
:param cls_name: tuple of names for each class
:param title: plot title
:return:
"""
def cal_auc(tpr, fpr):
return np.trapz(tpr, fpr)
def plot_single_curve(fpr, tpr, cls_ind):
auc = cal_auc(tpr, fpr)
plt.plot(fpr, tpr, label="%s ROC curve (area = %.2f)" % (cls_name[cls_ind], auc))
return auc
assert isinstance(data, list)
if len(cls_name) == 2:
assert len(data) == 1
else:
assert len(data) == len(cls_name)
fig = plt.figure()
args = [(fpr, tpr, i) for i, (fpr, tpr) in enumerate(data)]
if len(cls_name) > 2:
auc = np.mean(list(map(lambda x: plot_single_curve(*x), args)))
else:
fpr, tpr = data[0]
auc = cal_auc(tpr, fpr)
plt.plot(fpr, tpr, label="%s vs. %s ROC curve (area = %.2f)" % (cls_name[1], cls_name[0], auc))
ax = plt.gca()
ax.plot([0, 1], [0, 1], ls="--", c=".3")
plt.title(title + ' (mean area = %.4f)' % auc)
plt.ylabel('True positive rate')
plt.xlabel('False positive rate')
plt.legend()
return fig, auc
| 5,348,997 |
def train(elastic_coordinator, train_step, state):
"""
This is the main elastic data parallel loop. It starts from an initial 'state'.
Each iteration calls 'train_step' and returns a new state. 'train_step'
has the following interface:
state, worker_stats = train_step(state)
When 'train_step' exhausts all the data, a StopIteration exception should be
thrown.
"""
assert isinstance(state, torchelastic.State)
failure_count = 0
rank = 0
checkpoint_util = CheckpointUtil(elastic_coordinator)
while not elastic_coordinator.should_stop_training():
# See: https://github.com/pytorch/elastic/issues/7
if failure_count >= MAX_FAILURES:
e = RuntimeError(
"Exceeded max number of recoverable failures: {}".format(failure_count)
)
elastic_coordinator.on_error(e)
raise e
start_time = time.time()
snapshot = state.capture_snapshot()
try:
store, rank, world_size = elastic_coordinator.rendezvous_barrier()
elastic_coordinator.init_process_group()
# load checkpoint if necessary
state = checkpoint_util.load_checkpoint(state, rank)
state_sync_start_time = time.time()
state.sync(world_size, rank)
publish_metric(
"torchelastic",
"state_sync.duration.ms",
get_elapsed_time_ms(state_sync_start_time),
)
checkpoint_util.set_checkpoint_loaded()
elastic_coordinator.barrier()
log.info("Rank {0} synced state with other nodes".format(rank))
except StopException:
log.info("Rank {0} received stopped signal. Exiting training.".format(rank))
break
except RuntimeError as e:
# See: https://github.com/pytorch/elastic/issues/7
elastic_coordinator.on_error(e)
state.apply_snapshot(snapshot)
failure_count += 1
continue
except (NonRetryableException, Exception) as e:
elastic_coordinator.on_error(e)
raise
finally:
publish_metric(
"torch_elastic",
"outer_train_loop.duration.ms",
get_elapsed_time_ms(start_time),
)
# Note that the loop might not even start if the rendezvous was closed
# due to one of the trainer processes completing earlier.
while not elastic_coordinator.should_stop_training():
start_time = time.time()
snapshot = state.capture_snapshot()
try:
train_step_start_time = time.time()
state, worker_stats = train_step(state)
publish_metric(
"torchelastic",
"train_step.duration.ms",
get_elapsed_time_ms(train_step_start_time),
)
elastic_coordinator.monitor_progress(state, worker_stats)
checkpoint_util.save_checkpoint(state, rank)
if elastic_coordinator.should_rendezvous(state):
log.info("Rank {0} will re-rendezvous".format(rank))
# Executor told us, for whatever reason, to re-rendezvous.
# This can occur if another node encounters an error,
# if a new node becomes available to train,
# or potentially even if it's time to checkpoint.
break
elastic_coordinator.report_progress(state)
except StopIteration:
log.info("Rank {0} finished all the iterations".format(rank))
# Current trainer process completed processing assigned subset of
# examples. Other trainer processes need to stop as well.
# This sends an explicit signal on training completion.
elastic_coordinator.signal_training_done()
break
except RuntimeError as e:
# See: https://github.com/pytorch/elastic/issues/7
elastic_coordinator.on_error(e)
state.apply_snapshot(snapshot)
failure_count += 1
break
except Exception as e:
elastic_coordinator.on_error(e)
raise
finally:
publish_metric(
"torchelastic",
"inner_train_loop.duration.ms",
get_elapsed_time_ms(start_time),
)
if elastic_coordinator.should_stop_training():
return state
else:
# This is an error condition and should not happen.
raise Exception(
"Exiting without training complete. rank: {0},"
" should_stop_training: {1}".format(
rank, elastic_coordinator.should_stop_training()
)
)
| 5,348,998 |
def make_animation(data_list, **kwargs):
"""
Creates an animation from list of McStasData objects
Parameters
----------
data_list : list of McStasData
List of McStasData objects for animation
Keyword arguments
-----------------
filename : str
Filename for saving the gif
fps : float
Number of frames per second
"""
figsize, data_list = _handle_kwargs(data_list, **kwargs)
if "fps" in kwargs:
period_in_ms = 1000 / kwargs["fps"]
else:
period_in_ms = 200
# find limits for entire dataset
maximum_values = []
minimum_values = []
is_1D = False
is_2D = False
for data in data_list:
if isinstance(data.metadata.dimension, int):
is_1D = True
elif len(data.metadata.dimension) == 2:
is_2D = True
min_value, max_value = _find_min_max_I(data)
# When data empty, min and max value is 0, skip
if not (min_value == 0 and max_value == 0):
minimum_values.append(min_value)
maximum_values.append(max_value)
if is_1D and is_2D:
raise ValueError(
"Both 1D and 2D data in animation, only one allowed.")
if len(minimum_values) == 0:
raise ValueError(
"No data found for animation!")
maximum_value = np.array(maximum_values).max()
minimum_value = np.array(minimum_values).min()
if "orders_of_mag" in kwargs:
orders_of_mag = kwargs["orders_of_mag"]
mag_diff = np.log10(maximum_value) - np.log10(minimum_value)
if mag_diff > orders_of_mag:
minimum_value_log10 = np.log10(maximum_value) - orders_of_mag
minimum_value = 10**(minimum_value_log10)
kwargs["fixed_minimum_value"] = minimum_value
kwargs["fixed_maximum_value"] = maximum_value
fig, ax0 = plt.subplots(figsize=figsize)
im = _plot_fig_ax(data_list[0], fig, ax0, **kwargs)
def animate_2D(index):
data = data_list[index]
intensity = data.Intensity
im.set_array(intensity.ravel())
return im,
anim = animation.FuncAnimation(fig, animate_2D,
frames=len(data_list),
interval=period_in_ms,
blit=False, repeat=True)
plt.show()
# The animation doesn't play unless it is saved. Bug.
if "filename" in kwargs:
filename = kwargs["filename"]
if not filename.endswith(".gif"):
filename = filename + ".gif"
# check if imagemagick available?
print("Saving animation with filename : \"" + filename + "\"")
anim.save(filename, writer="imagemagick")
| 5,348,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.