content
stringlengths 42
6.51k
|
---|
def getterm(stream):
"""
This function travels through a given datastream and records all lines
until it finds the start of a new term or the end of a useful section
of the obofile.
"""
block = []
for line in stream:
if line.strip() == "[Term]" or line.strip() == "[Typedef]":
break
else:
if line.strip() != "":
block.append(line.strip())
return block
|
def _deepmerge(source, destination):
"""
Merge the first provided ``dict`` into the second.
:param dict source: The ``dict`` to merge into ``destination``
:param dict destination: The ``dict`` that should get updated
:rtype: dict
:returns: ``destination`` modified
"""
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = destination.setdefault(key, {})
_deepmerge(value, node)
else:
destination[key] = value
return destination
|
def unique_keys(keys):
"""
From a list of given keys, return the unique keys.
:param keys: List of keys
:return: List of unique keys
"""
unique = []
for k in keys:
if k not in unique:
unique.append(k)
return unique
|
def _stringify(data):
"""
Convert unicode to string.
"""
if isinstance(data, dict):
data_str = {}
for _k, v in data.items():
data_str[_stringify(_k)] = _stringify(v)
return data_str
elif isinstance(data, list):
return [_stringify(element) for element in data]
else:
return data
|
def empty_string(value):
"""Convert a :obj:`None <python:None>` ``value`` to an empty string."""
# pylint: disable=unused-argument
return ''
|
def add_stoichiometry(stm, rxns, rctnids, obfunc):
"""Add stoichiometry information to matrix"""
for r in rxns:
rid = r.getId()
rctnids.append(rid)
for sp in r.getListOfReactants():
species_name = sp.getSpecies()
# Be sure to skip boundary compounds
if species_name not in stm:
continue
stm[species_name][rid] = -float(sp.getStoichiometry())
for sp in r.getListOfProducts():
species_name = sp.getSpecies()
# Be sure to skip boundary compounds
if species_name not in stm:
continue
stm[species_name][rid] = float(sp.getStoichiometry())
# Add objective function value
rk = r.getKineticLaw()
coeff = float(rk.getParameter("OBJECTIVE_COEFFICIENT").getValue())
obfunc.append(coeff)
return stm, obfunc
|
def parse_data(action, data):
"""Parse an action's data field, return action with updated vals."""
d = data['data'][0]
# Post data
if 'post' in d:
action['action'] = 'post'
action['description'] = d['post']
# Comment data
if 'comment' in d:
action['action'] = 'comment'
action['action_type'] = 'comment'
if type(d['comment']) is str:
action['description'] = d['comment']
elif type(d['comment']) is dict:
action['description'] = d['comment']['comment']
action['person'] = d['comment']['author']
return action
|
def merge(user, default):
"""Merges a user configuration with the default one."""
if not user:
return default
if isinstance(user, dict) and isinstance(default, dict):
for kk, vv in default.items():
if kk not in user:
user[kk] = vv
else:
user[kk] = merge(user[kk], vv)
return user
|
def get_kwargs(kwargs, key, default):
"""
Get an element in kwargs or returnt the default.
:param kwargs: dictionary of keyworded arguments
:type kwargs: dict
:param key: key to retrieve
:type key: str
:param default: default value to return
:type default: mixed
:return: the retrieved value from kwargs or default
:rtype: mixed
"""
if kwargs is not None:
if key in kwargs.keys():
return kwargs[key]
return default
|
def millions(x, pos):
"""x value, pos positions"""
return "$%1.0fM" % (x * 10 ** (-6))
|
def parse_scg_ink_file(contents, scg_id):
"""Parse a SCG INK contents.
Parameters
----------
scg_id : string
The path to a SCG INK file.
Returns
-------
HandwrittenData
The recording as a HandwrittenData object.
"""
stroke_count = 0
stroke_point_count = -1
recording = []
current_stroke = []
time = 0
got_annotations = False
annotations = []
lines = contents.split("\n")
for i, line in enumerate(lines):
line = line.strip()
if i == 0 and line != 'SCG_INK':
raise ValueError(("%s: SCG Ink files have to start with 'SCG_INK'."
" The file started with %s.") %
(scg_id, line))
elif i == 1:
try:
stroke_count = int(line)
except ValueError:
raise ValueError(("%s: Second line has to be the number of "
"strokeswhich has to be an integer, but "
"was '%s'") % (scg_id, line))
if stroke_count <= 0:
raise ValueError(("%s: Stroke count was %i, but should be "
"> 0.") % (scg_id, stroke_count))
elif i == 2:
try:
stroke_point_count = int(line)
except ValueError:
raise ValueError("%s: Third line has to be the number of "
"points which has to be an integer, but was "
"'%s'" % (scg_id, line))
if stroke_point_count <= 0:
raise ValueError(("%s: Stroke point count was %i, but should "
"be > 0.") % (scg_id, stroke_count))
elif i > 2:
if stroke_point_count > 0:
x, y = [int(el) for el in line.strip().split(" ")]
current_stroke.append((x, y))
time += 20
stroke_point_count -= 1
elif line == 'ANNOTATIONS' or got_annotations:
got_annotations = True
annotations.append(line)
elif stroke_count > 0:
try:
stroke_point_count = int(line)
except ValueError:
raise ValueError(("%s: Line %i has to be the number of "
"points which has to be an integer, "
" but was '%s'") %
(scg_id, i + 1, line))
if stroke_point_count <= 0:
raise ValueError(("%s: Stroke point count was %i, but "
"should be > 0.") %
(scg_id, stroke_count))
if stroke_point_count == 0 and len(current_stroke) > 0:
time += 200
recording.append(current_stroke)
stroke_count -= 1
current_stroke = []
return recording
|
def _choose_batch_size(inputs, batch_size):
"""Returns batch size based on inputs and batch_size parameter."""
if len(inputs) > 0:
input_batch_size = len(inputs[0])
if batch_size == -1:
batch_size = input_batch_size
if input_batch_size != batch_size:
raise ValueError(
f"Requested batch_size={batch_size}, but input 0 has batch_size={input_batch_size}")
if batch_size == -1:
raise RuntimeError(
"Operators with no inputs need to have 'batch_size' parameter specified.")
return batch_size
|
def toAtariAction(action):
"""
action_table = [[0, 0, 0], # NOOP
[1, 0, 0], # LEFT (forward)
[1, 0, 1], # UPLEFT (forward jump)
[0, 0, 1], # UP (jump)
[0, 1, 1], # UPRIGHT (backward jump)
[0, 1, 0]] # RIGHT (backward)
"""
left = action[0]
right = action[1]
jump = action[2]
if left == right:
left = 0
right = 0
if left == 1 and jump == 0:
return 1
if left == 1 and jump == 1:
return 2
if right == 1 and jump == 0:
return 5
if right == 1 and jump == 1:
return 4
if jump == 1:
return 3
return 0
|
def get_type_name(type_name, sub_type=None):
"""
"""
if type_name in ("string", "enum"):
return "str"
if type_name == "boolean":
return "bool"
if type_name in ("integer", "long"):
return "int"
if type_name == "time":
return "float"
if type_name == "object":
return "dict"
return type_name
|
def lister(value):
"""
Returns a (comma separated) string value as a list
"""
return value.split(',') if value else []
|
def gcd(a, b):
"""Returns the greatest common divisor of a and b, using the Euclidean
algorithm."""
if a <= 0 or b <= 0:
raise ValueError('Arguments must be positive integers')
while b != 0:
tmp = b
b = a % b
a = tmp
return a
|
def checkMatches(kps):
"""
Take in keypoints
If they are above 100, there are enough to continue, return True
Else, False
"""
if len(kps) >= 100:
print("There are enough matches to continue...")
return True
else:
print("There are not enough matches to continue.")
return False
|
def is_valid(n: str) -> bool:
"""Tests if n is float or int"""
try:
float(n) or int(n)
return True
except ValueError:
return False
|
def _expand_gen_repr(args):
"""Like repr but any generator-like object has its iterator consumed
and then called repr on."""
new_args_list = []
for i in args:
# detect iterable objects that do not have their own override of __str__
if hasattr(i, '__iter__'):
to_str = getattr(i, '__str__')
if to_str.__objclass__ == object:
# the repr for a generator is just type+address, expand it out instead.
new_args_list.append([_expand_gen_repr([j])[0] for j in i])
continue
# normal case: uses the built-in to-string
new_args_list.append(i)
return new_args_list
|
def strcmp(s1, s2):
"""
Compare two strings!
"""
for a, b in zip(s1, s2):
if a != b:
return False
return True
|
def find_bleeding_location_columns(columns):
"""This method find bleeding column sites.
.. note: bleeding_severe is not a location
and therefore should not be included.
Also we should double check that
they are all boolean variables.
.. warning: Include other, severe, severity?
.. note: Bleeding other might be an string.
"""
# Create locations
locations = ['skin', 'mucosal', 'nose', 'gum',
'urine', 'vaginal', 'vensite', 'gi']
# Return variables
return [e for e in columns
if 'bleeding_' in e and
e.split('_')[1] in locations]
|
def lanthoc(b3, b5, b7):
"""
Leaf Anthocyanid Content (Wulf and Stuhler, 2015).
.. math:: LAnthoC = b7/(b3 - b5)
:param b3: Green.
:type b3: numpy.ndarray or float
:param b5: Red-edge 1.
:type b5: numpy.ndarray or float
:param b7: Red-edge 3.
:type b7: numpy.ndarray or float
:returns LAnthoC: Index value
.. Tip::
Wulf, H.; Stuhler, S. 2015. Sentinel-2: Land Cover, \
Preliminary User Feedback on Sentinel-2A Data. \
in: Proceedings of the Sentinel-2A Expert Users \
Technical Meeting, Frascati, Italy, 29-30 September 2015.
"""
LAnthoC = b7/(b3 - b5)
return LAnthoC
|
def mass_transfer_coefficient_mackay_matsugu(wind_speed, diameter, schmdt_nmbr,
n = 0.25):
"""
Return the mass transfer coefficient [m/s]
source : (Mackay and Matsugu, 1973)
Parameters
----------
wind_speed : Wind speed 10 meters above the surface [m/s]
diameter : Pool diameter [m]
schmdt_nmbr : Schmidt number []
n : Wind profile (0.25->1), the default is 0.25.
"""
C = 0.0292 * schmdt_nmbr ** -0.47
q = (2-n)/(2+n)
r = -n/(2+n)
return C * ((wind_speed * 3600) ** q) * (diameter ** r) / 3600
|
def frequency(myMoves, opMoves, myType, opType):
"""--> frequency of the oponent playing myType (either 0 or 1) if
I played opType (either 0 or 1) the round before."""
#startRound = opMoves.index(0)
#i = len(myMoves)-1 - startRound
i = len(myMoves)-1
N = 0; M = 0
while i > 0:
if myMoves[-(i+1)] == myType:
N += 1
if opMoves[-i] == opType:
M += 1
i -= 1
if N == 0: return 1.0 # rather arbitrary choice !!!
else: return float(M) / float(N)
|
def format_float_as_percentage(value):
"""Format float value as a percentage string."""
return f"{value:.0%}"
|
def pad(seq, n, fillvalue=None):
"""
You want a sequence at least n items long, say for an unpacking
operation. You might not have n items. This will right-pad your
sequence into one of the desired length, using your
desired fillvalue (by default, None).
"""
length = len(seq)
if length == n:
return seq
else:
needed = n - length
return seq + type(seq)([fillvalue] * needed)
# itertools equivalent:
# return chain(iterable, repeat(fillvalue, n-len(iterable)))
|
def verify_probability(probability):
""" Makes sure it's a probability. """
return probability>=0 and probability<=1
|
def covert_idx_to_hashable_tuple(idx):
"""Converts idxs to hashable type for set, slice is not hashable"""
return (idx[0], idx[1], idx[2], str(idx[3]), str(idx[4]))
|
def sunshine_duration(code: str, unit: str = 'minutes') -> str:
"""
Translates a 5-digit sunlight duration code
"""
return f'Duration of sunlight: {int(code[1:])} {unit}'
|
def clear_buffers(player_list) -> list:
"""Clear score buffers for every player.
Function that clears score buffers for every player and sets them to 0
by calling instance method reset_buffer() on every
player in player_list
Args:
player_list (list): List of player instances
Returns:
player_list (list): Modified list of player instances
"""
for player in player_list:
player.reset_buffer()
return player_list
|
def add_token(sent, tag_infos):
""" add special tokens specified by tag_infos to each element in list
tag_infos: list of tuples (tag_type,tag)
each tag_info results in a token of the form: __{tag_type}__{tag}__
"""
tokens = []
for tag_type, tag in tag_infos:
token = '__' + tag_type + '__' + tag + '__'
tokens.append(token)
return ' '.join(tokens) + ' ' + sent
|
def sublime_basic_command(path):
"""
Form bash command to open file in Sublime Text.
@type: path: str
@rtype: str
"""
return str.format(
"subl \"{path}\"",
path=path
)
|
def gen_participant_json(passwords, expression, randomize=True):
"""
Function to create new users and store them in a json file
:param passwords: list with passwords
:param expression: string that contain the fixed part of the username
:param randomize: indicates whether the passwords should be assigned randomly
:return:
"""
from random import shuffle
# First create a dict
users = {}
if randomize:
shuffle(passwords)
for i, pss in enumerate(passwords, 1):
users["{}".format(i)] = {"username": "{0}{1}".format(expression, i), "password": pss}
return users
|
def format_mac(mac: str) -> str:
"""Format the mac address string for entry into dev reg."""
to_test = mac
if len(to_test) == 17 and to_test.count(":") == 5:
return to_test.lower()
if len(to_test) == 17 and to_test.count("-") == 5:
to_test = to_test.replace("-", "")
elif len(to_test) == 14 and to_test.count(".") == 2:
to_test = to_test.replace(".", "")
if len(to_test) == 12:
# no : included
return ":".join(to_test.lower()[i : i + 2] for i in range(0, 12, 2))
# Not sure how formatted, return original
return mac
|
def find_function(functions, pc):
"""Given a PC, figure out which function it is in.
Args:
functions: list of (addr: int, name: str)
Returns:
str Name of function.
Raises:
Nothing
"""
low = 0
high = len(functions)
while low < high:
mid = int((low + high) / 2)
if pc < functions[mid][0]:
high = mid
else:
low = mid + 1
if low == len(functions):
return None
return functions[low - 1][1]
|
def mock_ssh(host, command):
"""Avoid network connection."""
return ['/bin/sh', '-c', command]
|
def recur(n, a, c, b):
"""recursive function that keeps getting called until all disks are at the target destination"""
count = 0
if n >= 1:
first = recur(n - 1, a, b, c)
c.append(a.pop())
count += 1
second = recur(n - 1, b, c, a)
count += first + second
return count
|
def num_permutations(list_of_lists):
"""
Returns the number of permutations from an input
list of lists, representing domains.
"""
if not list_of_lists:
return 0
count = 1
for lst in list_of_lists:
count *= len(lst)
return count
|
def metade(p):
"""
-> Divide o valor inicial
:param p: valor inicial
:return: valor dividido pela metade
"""
res = p / 2
return res
|
def linegraph(G):
"""Graph, the vertices of which are edges of G,
with two vertices being adjacent iff the corresponding
edges share a vertex."""
L = {}
for x in G:
for y in G[x]:
nx = [frozenset([x,z]) for z in G[x] if z != y]
ny = [frozenset([y,z]) for z in G[y] if z != x]
L[frozenset([x,y])] = frozenset(nx+ny)
return L
|
def iota(n):
"""
Return a list of integers.
"""
return list(range(n))
|
def calc_ratio(divident, divisor, min_divident=None, min_divisor=None):
"""
Return quotient result of true division operation for `divident` and `division`
If either of `min_divident`, `min_divisor` values is greater
than its corresponding test values, return zero.
"""
try:
assert(min_divident is None or divident >= min_divident)
assert(min_divisor is None or divisor >= min_divisor)
return divident/divisor
except (ValueError, TypeError, ZeroDivisionError, AssertionError):
return 0.0
|
def newItem(url=None, post=None, metatags=None):
""" return a new 'item' """
# This is not the only place where items are made. In
# the main sourcefile, check out the __getItems() function
item = {}
item['url'] = url
if post != None:
item['post'] = post
return item
|
def find(predicate, seq):
"""Method shamelessly taken from https://github.com/Rapptz/discord.py """
for element in seq:
if predicate(element):
return element
return None
|
def check_unique_list_of_words(words, arg_name):
"""Check that words is a list and each element is a str without any duplication"""
if not isinstance(words, list):
raise ValueError(arg_name + " needs to be a list of words of type string.")
words_set = set()
for word in words:
if not isinstance(word, str):
raise ValueError("each word in " + arg_name + " needs to be type str.")
if word in words_set:
raise ValueError(arg_name + " contains duplicate word: " + word + ".")
words_set.add(word)
return words_set
|
def validate_clusters(clusters):
"""
Prevent cluster from being empty.
@param clusters : clusters to be validated
@return : whether no empty cluster
"""
return all(len(cluster) > 0 for cluster in clusters.values())
|
def _ark(actual, predicted, k=10):
"""
Computes the average recall at k.
Parameters
----------
actual : list
A list of actual items to be predicted
predicted : list
An ordered list of predicted items
k : int, default = 10
Number of predictions to consider
Returns:
-------
score : int
The average recall at k.
"""
if len(predicted)>k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i,p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if not actual:
return 0.0
return score / len(actual)
|
def _is_git_url_mismatch(mismatch_item):
"""Returns whether the given mismatch item is for a GitHub URL."""
_, (required, _) = mismatch_item
return required.startswith('git')
|
def fitness_bc(agent, environment, raw_fitness, path):
""" Returns as a tuple the raw fitness and raw observation of an agent evaluation. """
if len(path) > 0:
return raw_fitness, path
return raw_fitness, [0]
|
def replace_all(text, dic):
"""
text: input string of template
dic:
replacing method - from stackOverflow
:@return(String) with replaced parens for brackets
"""
for i, j in dic.items():
text = text.replace(i, j)
return text
|
def _parse_list_response(response, outer_key, inner_key):
"""Responses from the niaopendata service aren't entirely consistent across
the board, so we can't rely on a single consistent format even for a single
endpoint. This function checks and can parse all known return formats into
simple lists suitable for returning to callers.
"""
outer = response.get(outer_key)
if outer is None:
return []
inner = outer[inner_key]
if isinstance(inner, dict):
return [inner]
return inner
|
def gen_params(guess):
"""Return dict of parameters with guess string."""
return {'file': 'foo', 'signature': guess}
|
def isolateNameToLabel(names):
"""Function to process isolate names to labels
appropriate for visualisation.
Args:
names (list)
List of isolate names.
Returns:
labels (list)
List of isolate labels.
"""
# useful to have as a function in case we
# want to remove certain characters
labels = [name.split('/')[-1].replace('.','_').replace(':','').replace('(','_').replace(')','_') \
for name in names]
return labels
|
def all_equal_length(*arrays):
"""Checks whether input arrays have equal length.
:param arrays: one or more lists/numpy arrays/pd.Series
:return: true if lengths are all equal, otherwise false
"""
length = None
for arr in arrays:
try:
if length is not None and len(arr) != length:
return False
length = len(arr)
except TypeError: # continue if `arr` has no len method. constants and None are excluded from length check
pass
return True
|
def calc_series_range_well(wellnumber, imgperwell):
"""
This function can be used when the number of positions or scenes
per well is equal for every well
The well numbers start with Zero and have nothing to do with the actual wellID, e.g. C2
"""
seriesseq = range(wellnumber * imgperwell, wellnumber * imgperwell + imgperwell, 1)
return seriesseq
|
def euclid_dist(coords1, coords2, weights=None):
"""
Given two equal-length lists of coordinates in multi-dimensional space,
return the Euclidean distance between the two points.
"""
assert len(coords1) == len(coords2), "Coordinate vectors differ in length"
squared_diffs = [(coords1[i] - coords2[i])**2 for i in range(0,len(coords1))]
if weights is not None:
assert len(weights) == len(squared_diffs), "Weight vector is different length than coordinate vectors"
squared_diffs = [weights[i]*squared_diffs[i] for i in range(0,len(weights))]
euclidean_distance = sum(squared_diffs)**.5
return euclidean_distance
|
def valid_day(tms):
"""Checks if day of month is valid"""
year, month, day = tms[:3]
if day > 31:
return 0
if month in [4, 6, 9, 11] and day > 30:
return 0
if month == 2 and (year%4 == 0 and day > 29 or day > 28):
return 0
return 1
|
def create_new_label_columns(label_columns: list) -> list:
"""
Extracts names for new label columns for one-hot-encoded data
"""
transformed_label_columns = []
for column in label_columns:
transformed_label_columns.append(column + "_" + "none")
transformed_label_columns.append(column + "_" + "favor")
transformed_label_columns.append(column + "_" + "against")
return transformed_label_columns
|
def getKey(data, key, default=None):
"""
Returns the key from the data if available or the given default.
:param data: Data structure to inspect
:type data: dict
:param key: Key to lookup in dictionary
:type key: str
:param default: Default value to return when key is not set
:type default: any
"""
if key in data:
return data[key]
else:
return default
|
def _to_xml(number):
"""
convert Python number to XML String representation
Keyword Parameters:
number -- Python numeric variable
>>> _to_xml(0)
'0'
>>> _to_xml(0.09834894752)
'0.09834894752'
>>> _to_xml(None)
''
"""
if number is None:
return ''
return str(number)
|
def psit(t, xp, x, mu, phi, sigma):
""" score of the model (gradient of log-likelihood at theta=theta_0)
"""
if t == 0:
return -0.5 / sigma ** 2 + \
(0.5 * (1. - phi ** 2) / sigma ** 4) * (x - mu) ** 2
else:
return -0.5 / sigma ** 2 + (0.5 / sigma ** 4) * \
((x - mu) - phi * (xp - mu)) ** 2
|
def get_parent_id_list(term_list):
"""Takes a list with string elements of a node, return a list containing string
identifiers of its parent nodes.
Args:
term_list: for example:
['id: MI:0000',
'name: molecular interaction',
'def: "Controlled vocabularies originally created for protein protein
interactions, extended to other molecules interactions." [PMID:14755292]',
'subset: Drugable',
'subset: PSI-MI_slim']
Returns:
id_string_list: a list of string identifiers of its parent nodes. For example:
["MI:0013", "MI:1349"]
"""
id_string_list = []
for term in term_list:
# term containining parent information is "is_a: MI:0013 ! biophysical"
# or "relationship: part_of MI:1349 ! chembl"
if term.startswith('is_a'):
id_string_list.append(term.split(' ')[1])
elif term.startswith('relationship'):
id_string_list.append(term.split(' ')[2])
else: continue
return id_string_list
|
def sgd(w, dw, params={}):
"""
Perform Vanilla SGD for parameter update.
Arguments:
w: numpy array of current weight
dw: numpy array of gradient of loss w.r.t. current weight
params: dictionary containing hyper-parameters
- lr: float of learning rate
Outputs:
next_w: updated weight
params: updated dictionary of hyper-parameters
"""
# set default parameters
params.setdefault('lr', 1e-2)
# update w
next_w = w - params['lr'] * dw
return next_w, params
|
def list_text_size(info):
"""Calculate size and duration of the claims."""
number = len(info["claims"])
size = info["size"]
seconds = info["duration"]
size_gb = size/(1024**3)
hrs = seconds / 3600
days = hrs / 24
hr = seconds // 3600
mi = (seconds % 3600) // 60
sec = (seconds % 3600) % 60
m = [f"Claims: {number}",
f"Total size: {size_gb:.4f} GB",
f"Total duration: {hr} h {mi} min {sec} s, "
f"or {days:.4f} days"]
text = "\n".join(m)
return text
|
def load_nonfitbit_data(nonfitbit_id):
"""
"""
# for n in nonfitbit_id:
return nonfitbit_id
|
def convert(value):
"""Converts some specific json objects to python object"""
if isinstance(value, dict):
return {convert(k): convert(v) for k, v in value.items()}
elif isinstance(value, list):
return [convert(element) for element in value]
else:
return value
|
def term_info_as_list(term_info):
"""
Given a dictionary as returned by `parse_term_code`, return a list
suitable for use as a sort key.
"""
return [term_info["year"], term_info["spring"]]
|
def get_name_from_selector(selector):
"""
A basic method to get the name from a name selector.
"""
if selector.startswith("name="):
return selector[len("name="):]
if selector.startswith("&"):
return selector[len("&"):]
return selector
|
def enterSnapMode(mode, wp, idx):
"""Checks if we are in snap mode. This happens if we are already snapping, or
if we have reached a prescribed window depth.
"""
if mode or not wp.depthType():
return True
t = wp.depthType()
return wp[t][idx] >= wp.windowDepth()
|
def is_in_path(id, path):
"""Determines whether id is in the path as returned from /entity/{id}/path
:param id: synapse id string
:param path: object as returned from '/entity/{id}/path'
:returns: True or False
"""
return id in [item['id'] for item in path['path']]
|
def _get_title_and_df_keys_from_tab_value(chart_title):
"""Remove extra quotes from Jinja template and separate multi-column chart titles"""
chart_title = chart_title.replace('"', '')
axes_titles = chart_title.split(' / ')
df_keys = [item.lower().replace(' ', '_') for item in axes_titles]
return chart_title, df_keys, axes_titles
|
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
|
def queryAll(node, matcher, deep=True, inner=False, result=None):
"""
Recurses the tree starting with the given node and returns a list of nodes
matched by the given matcher method
- node: any node
- matcher: function which should return a truish value when node matches
- deep: whether inner scopes should be scanned, too
- inner: used internally to differentiate between current and inner nodes
- result: can be used to extend an existing list, otherwise a new list is created and returned
"""
if result == None:
result = []
# Don't do in closure functions
if inner and node.type == "script" and not deep:
return None
if matcher(node):
result.append(node)
for child in node:
queryAll(child, matcher, deep, True, result)
return result
|
def lt(a,b):
""" return 1 if True, else 0 """
if(a < b):
return 1
else:
return 0
|
def get_domain_max(d):
""" Retrieves the upper bound of an integer or interval variable domain.
The domain can be either:
* a single number value,
* a list of numbers, or tuple of 2 numbers representing an interval.
This method returns the last number of the domain.
Args:
d: Domain
Returns:
Domain upper bound
"""
if isinstance(d, (tuple, list)):
d = d[-1]
if isinstance(d, (tuple, list)):
d = d[-1]
return d
|
def rivers_with_station(stations):
"""Return a set with the names of the rivers with a monitoring station"""
# make an empty list for the items to go into
river_names = []
for i in stations:
river_names.append(i.river)
# convert to a set to remove duplicates
river_names_set = set(river_names)
return river_names_set
|
def is_usb_serial(device, vid=None, pid=None, vendor=None, serial=None, *args,
**kwargs):
"""Checks device to see if its a USB Serial device.
The caller already filters on the subsystem being 'tty'.
If serial_num or vendor is provided, then it will further check to
see if the serial number and vendor of the device also matches.
"""
if 'ID_VENDOR' not in device:
return False
if vid is not None:
if device['ID_VENDOR_ID'] != vid:
return False
if pid is not None:
if device['ID_MODEL_ID'] != pid:
return False
if vendor is not None:
if 'ID_VENDOR' not in device:
return False
if not device['ID_VENDOR'].startswith(vendor):
return False
if serial is not None:
if 'ID_SERIAL_SHORT' not in device:
return False
if not device['ID_SERIAL_SHORT'].startswith(serial):
return False
return True
|
def to_bool(boolstr):
""" str to bool """
return boolstr.lower() == "true"
|
def ranking_to_pairwise_comparisons(distance_pairs, ranked_stimuli):
""" Convert ranking data to comparisons of pairs of pairs of stimuli
@param distance_pairs: kkk
:type distance_pairs: list
:param ranked_stimuli: kj
:type ranked_stimuli: list
"""
# ranked_stimuli is a list of lists. each list is a 'repeat'
rank = {}
comparisons = {}
for stimulus_list in ranked_stimuli:
for index in range(len(stimulus_list)):
rank[stimulus_list[index]] = index
for pair in distance_pairs:
dists = pair.split('<')
stim1 = dists[0].split(',')[1]
stim2 = dists[1].split(',')[1]
if pair not in comparisons:
comparisons[pair] = 1 if rank[stim1] < rank[stim2] else 0
else:
if rank[stim1] < rank[stim2]:
comparisons[pair] += 1
return comparisons
|
def aggregate_by_player_id(statistics, playerid, fields):
"""
Inputs:
statistics - List of batting statistics dictionaries
playerid - Player ID field name
fields - List of fields to aggregate
Output:
Returns a nested dictionary whose keys are player IDs and whose values
are dictionaries of aggregated stats. Only the fields from the fields
input will be aggregated in the aggregated stats dictionaries.
"""
players = {}
# create nested dict with outer keys of player ids and inner dict of fields
for dic in statistics:
if dic[playerid] not in players:
players[dic[playerid]] = {playerid: dic[playerid]}
for field in fields:
players[dic[playerid]][field] = 0
# loop through statistics again, incrementing field values
for dic in statistics:
for field in fields:
players[dic[playerid]][field] += int(dic[field])
return players
|
def tofloat(val):
"""Function to convert the values read from
several fields as defined in the ENSDF manual
(section V.14).
Returns the energy value in keV.
"""
if val.strip():
return float(val)
else:
return 0
|
def verify_capacity(capacity, obj_name):
"""
:type capacity: int
:param capacity: Capacity to check
:type obj_name: str
:param obj_name: Object name to chekc. Example: pool, volume, etc.
:rtype: int
:return: Fixed capacity format
:raises: ValueError: Invalid capacity
"""
capacity = int(capacity)
if capacity < 1:
raise ValueError(
'{0} capacity must be >= 1 GB ("{1}" was given)'.format(obj_name,
capacity))
return capacity
|
def datt1(b4, b5, b8):
"""
Vegetation Index proposed by Datt 1 (Datt, 1999a).
.. math:: DATT1 = (b8 - b5) / (b8 - b4)
:param b4: Red.
:type b4: numpy.ndarray or float
:param b5: Red-edge 1.
:type b5: numpy.ndarray or float
:param b8: NIR.
:type b8: numpy.ndarray or float
:returns DATT1: Index value
.. Tip::
Datt, B. 1999a. Remote sensing of water content in Eucalyptus leaves. \
Australian Journal of Botany 47, 909-923. doi:10.1071/BT98042.
"""
DATT1 = (b8 - b5) / (b8 - b4)
return DATT1
|
def check_value_in_dictionary_of_exceptions(name, exceptions_dict, default):
"""Performs a dictionary lookup to try to map the player's name/school to the correct Basketball-Reference page."""
return exceptions_dict.get(name, default)
|
def _url(server_url, physical_port, action):
"""
Helper function to build an url for given port and target
Args:
server_url: a str, the url for mux server, like http://10.0.0.64:8080/mux/vms17-8
physical_port: physical port on switch, an integer starting from 1
action: a str, either "output" or "drop"
Returns:
The url for posting flow update request, like http://10.0.0.64:8080/mux/vms17-8/1/drop(output)
"""
return server_url + "/{}/{}".format(physical_port - 1, action)
|
def q_function_approximation(x: str = 'x') -> str:
"""
Returns a string representing the Stirling approximation for the Q function.
**Parameters**
- `x`: str
The argument of the Q function.
**Returns**
`out`: str
TeX compatible string.
"""
return f'Q({x}) \\approx \\frac{{1}}{{2}} e^{{-\\frac{{{x}^2}}{{2}}}}'
|
def object_split(object):
"""
>>> object_split("Sequence:F02E9.2a")
('Sequence', 'F02E9.2a')
"""
return tuple(object.split(":"))
|
def fixed_parse(s):
"""
Parse an floating point string in a log file.
This is a simple variant on float() that returns None in the case of a
single dash being passed to s.
:param str s: The string containing the floating point number to parse
:returns: An float value
"""
return float(s) if s != '-' else None
|
def seg_units2chars(seg_units):
"""seg_units is [list, dict] for use in segmenting words."""
# Initialize chars as a copy of the list of simple chars in seg_units
chars = list(seg_units[0])
for chs in seg_units[1].values():
chars.extend(chs)
return chars
|
def is_fake(row_values):
"""
Check whether row is fake tuple based on "tid" attribute in row and computed hash
from other attributes in row.
:param row_values: dict with keys as column names and values as values from server DB (decrypted using `decrypt_row`)
:return: bool
"""
return int(row_values["tid"]) >= 0
|
def unescape(val, maxLength = 0):
"""
Unquotes several HTML-quoted characters in a string.
:param val: The value to be unescaped.
:type val: str
:param maxLength: Cut-off after maxLength characters.
A value of 0 means "unlimited". (default)
:type maxLength: int
:returns: The unquoted string.
:rtype: str
"""
val = val \
.replace("<", "<") \
.replace(">", ">") \
.replace(""", "\"") \
.replace("'", "'")
if maxLength > 0:
return val[0:maxLength]
return val
|
def p_XOR_fuzzy_3d(z, u, v):
"""
Compute XOR probability given p(x), p(u), p(z)
"""
return 3 * z * u * v - 2 * (z * u + u * v + z * v) + z + u + v
|
def alter_context(context):
""" Modify the context and return it """
# An extra variable
context['ADD'] = '127'
return context
|
def get_span_labels(sentence_tags, inv_label_mapping=None):
"""
Desc:
get from token_level labels to list of entities,
it doesnot matter tagging scheme is BMES or BIO or BIOUS
Returns:
a list of entities
[(start, end, labels), (start, end, labels)]
"""
if inv_label_mapping:
sentence_tags = [inv_label_mapping[i] for i in sentence_tags]
span_labels = []
last = "O"
start = -1
for i, tag in enumerate(sentence_tags):
pos, _ = (None, "O") if tag == "O" else tag.split("-")
if (pos == "S" or pos == "B" or tag == "O") and last != "O":
span_labels.append((start, i - 1, last.split("-")[-1]))
if pos == "B" or pos == "S" or last == "O":
start = i
last = tag
if sentence_tags[-1] != "O":
span_labels.append((start, len(sentence_tags) -1 , sentence_tags[-1].split("-")[-1]))
return span_labels
|
def typecheck(obj, *args):
"""Check type of nested objects"""
if isinstance(obj, args[0]):
if len(args) == 1:
return True
if hasattr(args[0], "__iter__"):
for i in obj:
if not typecheck(i, *args[1:]):
return False
return True
return False
|
def getSidebarItems(params):
"""Retrieves a list of sidebar entries for this view.
Params usage:
The params dictionary is provided to the menu_text's format.
sidebar: The sidebar value is returned directly if non-False
sidebar_defaults: The sidebar_defaults are used to construct the
sidebar items for this View. It is expected to be a tuple of
three items, the item's url, it's menu_text, and it's
access_type, see getSidebarMenus on how access_type is used.
sidebar_additional: The sidebar_additional values are appended
to the list of items verbatim, and should be in the format
expected by getSidebarMenus.
Args:
params: a dict with params for this View.
"""
# Return the found result
if params['sidebar']:
default = params['sidebar']
result = default[:]
for item in params['sidebar_additional']:
result.append(item)
return result
# Construct defaults manualy
defaults = params['sidebar_defaults']
result = []
for item in params['sidebar_additional']:
result.append(item)
for url, menu_text, access_type in defaults:
url = url % params['url_name'].lower()
item = (url, menu_text % params, access_type)
result.append(item)
return result
|
def linmag(vali, magstart, magend, dur):
"""
funciton for generating a ramp of values that is linear in magnification
vali: initial value (globally)
magstart: magnification at the start of the ramp
magend: magnification at the end of the ramp
dur: number of steps (duration)
"""
out = []
for i in range(dur):
out.append(float(vali) / ((magend - magstart) / dur * i + magstart))
return out
|
def _get_ngrams(n, text):
"""Calcualtes n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
|
def splitrip(obj, split):
"""Pass."""
vals = [x.strip() for x in obj.split(split) if x.strip()]
return vals
|
def issubclass_of_any(var, list_types):
""" Checks if var is instance of any of the classes in the list. """
return any([issubclass(type(var), type_elem) for type_elem in list_types])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.