content
stringlengths 42
6.51k
|
---|
def filter2query(tags):
"""
tags @list : ex.: [{key: value, ...}, ...]
"""
return [[{"k": k, "v": v} for k,v in tags_.items()] for tags_ in tags]
|
def appendPositives(num_list):
"""assumes num_list is a list of numerics
returns a list of numerics, the positive numerics of num_list"""
pos_list = []
for num in num_list:
if num > 0:
pos_list.append(num)
return pos_list
|
def get_IW_var_from_hyp(hyp):
"""
prior_iWishart.psi
Seems OK, although iff `self.natparam[:self.d, :self.d]`
means the latest "matrix-part" of the hyperparametr
"""
return -2 * hyp[0]
|
def non_private_tags_in_dicom_dataset(ds):
"""Return all non-private tags from a DICOM dataset."""
non_private_tags = []
for elem in ds:
if not elem.tag.is_private and not (
# Ignore retired Group Length elements
elem.tag.element == 0
and elem.tag.group > 6
):
non_private_tags.append(elem.tag)
return non_private_tags
|
def get_progress_for_repre(doc, active_site, remote_site):
"""
Calculates average progress for representation.
If site has created_dt >> fully available >> progress == 1
Could be calculated in aggregate if it would be too slow
Args:
doc(dict): representation dict
Returns:
(dict) with active and remote sites progress
{'studio': 1.0, 'gdrive': -1} - gdrive site is not present
-1 is used to highlight the site should be added
{'studio': 1.0, 'gdrive': 0.0} - gdrive site is present, not
uploaded yet
"""
progress = {active_site: -1,
remote_site: -1}
if not doc:
return progress
files = {active_site: 0, remote_site: 0}
doc_files = doc.get("files") or []
for doc_file in doc_files:
if not isinstance(doc_file, dict):
continue
sites = doc_file.get("sites") or []
for site in sites:
if (
# Pype 2 compatibility
not isinstance(site, dict)
# Check if site name is one of progress sites
or site["name"] not in progress
):
continue
files[site["name"]] += 1
norm_progress = max(progress[site["name"]], 0)
if site.get("created_dt"):
progress[site["name"]] = norm_progress + 1
elif site.get("progress"):
progress[site["name"]] = norm_progress + site["progress"]
else: # site exists, might be failed, do not add again
progress[site["name"]] = 0
# for example 13 fully avail. files out of 26 >> 13/26 = 0.5
avg_progress = {}
avg_progress[active_site] = \
progress[active_site] / max(files[active_site], 1)
avg_progress[remote_site] = \
progress[remote_site] / max(files[remote_site], 1)
return avg_progress
|
def od2list(od):
"""Convert an ordered dictionary to a list."""
return list(od.values())
|
def flatten(items):
"""Flattens a list"""
return [item for sublist in items for item in sublist]
|
def get_filestem(url):
"""Get the stem of the file at this url."""
return url.rsplit("/", 1)[-1].split(".", 1)[0]
|
def rotateList(e, n):
"""
Rotate n elements of a list e
"""
n %= len(e)
for i in range(n):
e.append(e.pop(0))
return e
|
def get_filter_from_filter_list(step_list):
"""
Build a Shotgun query filter from a list of Steps.
:returns: A Shotgun filter which can be directly added to a Shotgun query.
"""
if step_list is None:
# No Steps saved yet, allow all Steps.
return []
if not step_list:
# All steps off, ask for a non-existing step
return ["step.Step.id", "is", -1]
# General case, build the filter from the Step list.
step_filter = ["step.Step.id", "in", [x["id"] for x in step_list]]
return step_filter
|
def compute_product(n):
"""
Processes each chunk of 5 digits and returns the product of these 5 digits
If there is a zero on the sequence, even at the most significative digit,
e.g., the left-most digit, the result will be zero
"""
a = 1
for m in range(1, 6, 1):
a *= n % 10
n //= 10
return a
|
def knot_insertion_kv(knotvector, u, span, r):
""" Computes the knot vector of the rational/non-rational spline after knot insertion.
Part of Algorithm A5.1 of The NURBS Book by Piegl & Tiller, 2nd Edition.
:param knotvector: knot vector
:type knotvector: list, tuple
:param u: knot
:type u: float
:param span: knot span
:type span: int
:param r: number of knot insertions
:type r: int
:return: updated knot vector
:rtype: list
"""
# Initialize variables
kv_size = len(knotvector)
kv_updated = [0.0 for _ in range(kv_size + r)]
# Compute new knot vector
for i in range(0, span + 1):
kv_updated[i] = knotvector[i]
for i in range(1, r + 1):
kv_updated[span + i] = u
for i in range(span + 1, kv_size):
kv_updated[i + r] = knotvector[i]
# Return the new knot vector
return kv_updated
|
def relative_coordinate(x, y, x_shift, y_shift):
"""
This function ...
:param x:
:param y:
:param x_delta:
:param y_delta:
:return:
"""
rel_x = x - x_shift
rel_y = y - y_shift
return (rel_x, rel_y)
|
def filter_roidb(roidb):
"""
filter the image without bounding box.
"""
print('before filtering, there are %d images...' % (len(roidb)))
i = 0
while i < len(roidb):
if len(roidb[i]['boxes']) == 0:
del roidb[i]
i -= 1
i += 1
print('after filtering, there are %d images...' % (len(roidb)))
return roidb
|
def str2framework(s):
"""Make sure RLlib uses a compatible framework.
RLlib natively supports tf, tf2, and torch, but
using lazy evaluation with tf leads to issues.
As of now, options must be torch or tf2
:s: Framework string
"""
if (not s):
return None
s = s.lower()
assert s in ["tf","tf2","torch"],\
"ERROR: framework {} not supported: Please used tf, tf2, or torch"\
.format(s)
return s
|
def GetNextQtnodeBounds(qtnode, x, y, size):
"""Calculate next level boundary for qtnode.
If the qtnode has further precision, call this routine
recursively.
Args:
qtnode: The remaining string of the qtnode.
x: Current left of the qtnode (degrees).
y: Current bottom of the qtnode (degrees).
size: Current size of sides of qtnode (degrees).
Returns:
List of lower left and upper right boundary.
Raises:
Exception: if qtnode is not well formed.
"""
if qtnode:
size /= 2
if qtnode[0] == "3":
return GetNextQtnodeBounds(qtnode[1:], x, y + size, size)
elif qtnode[0] == "2":
return GetNextQtnodeBounds(qtnode[1:], x + size, y + size, size)
elif qtnode[0] == "1":
return GetNextQtnodeBounds(qtnode[1:], x + size, y, size)
elif qtnode[0] == "0":
return GetNextQtnodeBounds(qtnode[1:], x, y, size)
else:
raise Exception("Error: unexpected qtnode value %s" % qtnode[0])
else:
return [x, y, x + size, y + size]
|
def to_flat_dict(d, delim='.', copy=True):
"""TLDR;
While there are entries in the dictionary that have a dict as a value:
pop them at the outer level and create a delimitted path as a key, eg:
{'a': {'b': {'c': 0}}} -> {'a.b': {'c': 0}}
# by same process
{'a.b': {'c': 0}} -> {'a.b.c': 0}
"""
flat = dict(d) if copy else d
# we copy the keys since we are modifying the dict in place
# we reverse to retain order
incomplete = list(flat)[::-1]
while(incomplete):
k = incomplete.pop()
if isinstance(flat[k], dict):
val = flat.pop(k)
# Reverse to retain order since we are popping
for subk, subv in tuple(val.items())[::-1]:
new_key = delim.join((k, subk))
flat[new_key] = subv
incomplete.append(new_key)
# elif isinstance(flat[k], (list, tuple)):
# val = flat.pop(k)
# for subk, subv in tuple(enumerate(val))[::-1]:
# new_key = delim.join((k, str(subk)))
# flat[new_key] = subv
# incomplete.append(new_key)
else:
# Re-insert entry to retain order in dict
# Python guarantees dict ordered by insertion starting in 3.6.
val = flat.pop(k)
flat[k] = val
return flat
|
def parse_json(json_dict):
"""
@param json_dict: should have keys: multiply_1, multiply_2, output
@return: strings of multiply_1, multiply_2, output
"""
try:
return str(json_dict['multiply_1']), str(json_dict['multiply_2']), str(json_dict['output'])
except Exception:
raise KeyError('Error while paring: Multiply')
|
def truncate(s, length: int):
"""Truncates sequence `s` to length `l`."""
return s[:length] if len(s) > length else s
|
def normalise_arch(arch):
""" recognise architecture name using aliases known to cabal """
arch = arch.lower()
if arch == "powerpc": return "ppc"
if arch in ["powerpc64", "powerpc64le"]: return "ppc64"
if arch in ["sparc64", "sun4"]: return "sparc"
if arch in ["mipsel", "mipseb"]: return "mips"
if arch in ["armeb", "armel"]: return "arm"
if arch == "arm64": return "aarch64"
return arch
|
def character_mapping(string_a, string_b):
"""
Return a boolean of either it is possible to map every unique character from string a to string b with a one to one relation.
O(n log n) time
O(n) space
"""
dico_a = {}
dico_b = {}
for char in string_a:
dico_a[char] = dico_a[char]+1 if char in dico_a else 1
for char in string_b:
dico_b[char] = dico_b[char]+1 if char in dico_b else 1
count_a = sorted(dico_a.values(), reverse=True)
count_b = sorted(dico_b.values(), reverse=True)
return count_a == count_b and len(string_a) == len(string_b)
|
def union(groups):
"""returns the intersection of all groups"""
union = set(groups.pop())
union = union.union(*map(set, groups))
return union
|
def library_fine(d1, m1, y1, d2, m2, y2):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/library-fine/problem
Your local library needs your help! Given the expected and actual return dates for a library book, create a program
that calculates the fine (if any). The fee structure is as follows:
1. If the book is returned on or before the expected return date, no fine will be charged (i.e.: fine = 0).
2. If the book is returned after the expected return day but still within the same calendar month and year as the
expected return date, fine = 15 Hackos x (the number of days late).
3. If the book is returned after the expected return month but still within the same calendar year as the expected
return date, the fine = 500 Hackos x (the number of months late).
4. If the book is returned after the calendar year in which it was expected, there is a fixed fine of 10,000 Hackos.
Charges are based only on the least precise measure of lateness. For example, whether a book is due January 1, 2017
or December 31, 2017, if it is returned January 1, 2018, that is a year late and the fine would be 10,000 Hackos.
Args:
d1 (int): return day
m1 (int): return month
y1 (int): return year
d2 (int): due day
m2 (int): due month
y2 (int): due year
Returns:
int: the amount of hackos owed based on the book date returned and date due
"""
if y1 > y2:
return 10000
elif y1 == y2:
if m1 > m2:
return 500 * (m1 - m2)
elif m1 == m2 and d1 > d2:
return 15 * (d1 - d2)
return 0
|
def escape_cell(cell):
"""
Escape table cell contents.
:param cell: Table cell (as unicode string).
:return: Escaped cell (as unicode string).
"""
cell = cell.replace(u'\\', u'\\\\')
cell = cell.replace(u'\n', u'\\n')
cell = cell.replace(u'|', u'\\|')
return cell
|
def key_from_dict(d):
"""Create a hashable key from a `dict` by converting the `dict` to a tuple."""
return tuple(sorted(d.items()))
|
def divide_to_keys(dict_all_embeddings):
"""
Distinguish between types of embedding methods - ours and state-of-the-art
:param dict_all_embeddings: dict of all embeddings
:return: Names of our methods, names of state-of-the-art methods
"""
keys_ours = []
keys_state_of_the_art = []
keys = list(dict_all_embeddings.keys())
for key in keys:
# value = dict_all_embeddings[key]
if " + " in key:
keys_ours.append(key)
else:
keys_state_of_the_art.append(key)
return keys_ours, keys_state_of_the_art
|
def get_region_plot_settings(region):
"""Common definition of region plot settings
"""
region_plot_settings_lut = [
{
'name': 'Binh Dinh',
'bbox': (108.5, 109.4, 14.75, 13.5),
'weight_legend': {
'x_l': 108.53,
'x_r': 108.58,
'base_y': 13.84,
'y_step': 0.035,
'y_text_nudge': 0.01,
'x_text_nudge': 0.04
},
'scale_legend': 10,
'figure_size': (7, 10)
},
{
'name': 'Lao Cai',
'bbox': (103.5, 104.7, 22.9, 21.8),
'weight_legend': {
'x_l': 103.53,
'x_r': 103.58,
'base_y': 22.18,
'y_step': 0.04,
'y_text_nudge': 0.01,
'x_text_nudge': 0.04
},
'scale_legend': 10,
'figure_size': (10, 10)
},
{
'name': 'Thanh Hoa',
'bbox': (104.35, 106.1, 20.7, 19.1),
'weight_legend': {
'x_l': 104.4,
'x_r': 104.47,
'base_y': 19.68,
'y_step': 0.06,
'y_text_nudge': 0.01,
'x_text_nudge': 0.04
},
'scale_legend': 10,
'figure_size': (10, 10)
}
]
for region_plot_settings in region_plot_settings_lut:
if region == region_plot_settings['name']:
return region_plot_settings
raise Exception('Region plot settings not defined for this region')
|
def s_expected_milestone_linear(params, substep, state_history, prev_state, policy_input, **kwargs):
"""
State for generating signal from marketing.
"""
key = 'expected_milestone_linear'
prev_value = prev_state['expected_milestone_linear']
new_value = params['CLAIMS_MAGNITUDE']
value = prev_value + new_value
return (key, value)
|
def toJacobian(Xp_Yp):
"""
Convert a tuple of Xp Yp coordinates to a Jacobian.
Args:
Xp_Yp: write your description
"""
Xp, Yp = Xp_Yp
return (Xp, Yp, 1)
|
def single_dimensional_fitness(individual):
"""A simple single-dimensional fitness function."""
return pow(individual, 2)
|
def tails(file_object, last_lines=50):
"""
return the last `n` lines of a file, much like the Unix
tails command
"""
with open(file_object) as file_object:
assert last_lines >= 0
pos, lines = last_lines+1, []
while len(lines) <= last_lines:
try:
file_object.seek(-pos, 2)
except IOError:
file_object.seek(0)
break
finally:
lines = list(file_object)
pos *= 2
return "".join(lines[-last_lines:])
|
def ngrams(sequence, N):
"""Return all `N`-grams of the elements in `sequence`"""
assert N >= 1
return list(zip(*[sequence[i:] for i in range(N)]))
|
def sort_diclist(undecorated, sort_on):
"""
Sort a list of dictionaries by the value in each
dictionary for the sorting key
Parameters
----------
undecorated : list of dicts
sort_on : str, numeric
key that is present in all dicts to sort on
Returns
---------
ordered list of dicts
Examples
---------
>>> lst = [{'key1': 10, 'key2': 2}, {'key1': 1, 'key2': 20}]
>>> sort_diclist(lst, 'key1')
[{'key2': 20, 'key1': 1}, {'key2': 2, 'key1': 10}]
>>> sort_diclist(lst, 'key2')
[{'key2': 2, 'key1': 10}, {'key2': 20, 'key1': 1}]
"""
decorated = [(len(dict_[sort_on]) if hasattr(dict_[sort_on], '__len__') else dict_[
sort_on], index) for (index, dict_) in enumerate(undecorated)]
decorated.sort()
return[undecorated[index] for (key, index) in decorated]
|
def get_seating_row(plan, seat_number):
"""
Given a seating plan and a seat number, locate and return the dictionary
object for the specified row
:param plan: Seating plan
:param seat_number: Seat number e.g. 3A
:raises ValueError: If the row and/or seat number don't exist in the seating plan
:return: The row as a dictionary containing seat class and seats keys
"""
row_number = seat_number[:-1]
if row_number not in plan.keys():
raise ValueError(f"Row {row_number} does not exist in the seating plan")
row = plan[row_number]
if seat_number not in row["seats"].keys():
raise ValueError(f"Seat {seat_number} does not exist in row {row_number}")
return row
|
def _create_ner_key(start_offset, end_offset):
"""
Create a key out of start and end offsets. Useful for identifying which NER mentions are involved
in a relation.
Arguments:
start_offset (int) : start index of the ner mention (inclusive)
end_offset (int) : end index of the ner mention (inclusive)
Returns:
str : string representation of an NER mention's position in a string.
"""
return "-".join([str(start_offset), str(end_offset)])
|
def get_full_vname(namespaces: list, table_name: str, vname: str):
"""Generates the string that is the full variable name
This full variable name is what is used in the C++ code across
all of the different namespaces and if there's a table prefix.
Putting it all here reduces repeated code.
Parameters
----------
namespaces : list
The list of namespaces that belong to this parameter.
table_name : str
The "table" name for this parameter. This is typically
when there's a block of parameters that all use the same
prefix. I.e. bssn::BH1_MASS and bssn::BH2_MASS can be defined
in the template file in one table alone.
vname : str
The name of the variable (the key for the corresponding table)
Returns
-------
str
The output string of the full variable name
"""
out_str = "::".join(namespaces)
out_str += "::" + table_name + vname
return out_str
|
def parse_rawstring(s):
"""
It parses reference strings contained in some fields.
These strings contain references to :class:`genes.molecule` instances, and one of its atoms
"""
molecule, res_or_atom = s.split('/')
molecule = molecule.strip()
try:
res_or_atom = int(res_or_atom)
except ValueError:
pass # is str
return molecule, res_or_atom
|
def nonone(value, replace_value):
"""Returns ``value`` if ``value`` is not ``None``. Returns ``replace_value`` otherwise.
"""
if value is None:
return replace_value
else:
return value
|
def _get_default_image_size(model):
"""Provide default image size for each model."""
image_size = (224, 224)
if model in ["inception", "xception", "inceptionresnet"]:
image_size = (299, 299)
elif model in ["nasnetlarge"]:
image_size = (331, 331)
return image_size
|
def find_path(graph, start, end, path = []):
"""
In given 'graph', find a path from node 'start' to 'end'.
'graph' is a dictionary with nodes as keys and a list of
connections (i.e.: other nodes) as value. 'start' and 'end'
are nodes. A node is represented by a string (its name or
label). 'path' is the way we have walked already (if any).
The returned value is a list of nodes conforming the path.
"""
# If we're there, just add the latest node and return
if start == end:
return path + [end]
# If end is one of our connections, shortcut to it (don't look further)
if end in graph[start]:
return path + [start, end]
# Loop on the connections of our starting point
for node in graph[start]:
# If the node is in the path already, forget it (loops)
if node in path: continue
# Try out walks from each connected node
# (including this node in the already walked path)
newpath = find_path(graph, node, end, path + [start])
# If the walk was OK, return it as a result, otherwise go on
if newpath: return newpath
# If no connection worked out fine, return impossible
return None
|
def des_exposure_time(bands=''):
"""
Sample from the single epoch exposure time for DES
"""
# https://arxiv.org/pdf/1801.03181.pdf
return [45.0 if b == 'Y' else 90.0 for b in bands.split(',')]
|
def board_copy(board):
"""return a shallow copy of board"""
return board.copy()
|
def get_input_size(dataset_name):
"""
Returns the size of input
"""
dataset_name = dataset_name.lower()
mapping = {
'mnist': 28 * 28,
'adult': 6,
'pums': 4,
'power': 8
}
if dataset_name not in mapping:
err_msg = f"Unknown dataset '{dataset_name}'. Please choose one in {list(mapping.keys())}."
raise ValueError(err_msg)
return mapping[dataset_name]
|
def get_song_and_artist(tracks):
"""
:param tracks: a list of song information from the Spotify API
:return: a list of dictionary of {song : artist}
"""
songs = []
for s in tracks:
song = s['track']['name']
artist = s['track']['artists'][0]['name']
pl = {song: artist}
songs.append(pl)
print(songs)
return songs
|
def get_qualified_types(qualifier_types):
"""We want all qualifiers to point to non qualifier types.
There are sometimes e.g. 'const restrict int' types. We want
the 'const' type point to 'int', not 'restrict'.
Returns dictionary {qualifier_type: modified_non_qualifier_type}.
"""
non_qualifier_types = {}
for k, t in qualifier_types.items():
modified_type = t['modified_type']
while modified_type in qualifier_types:
modified_type = qualifier_types[modified_type]['modified_type']
non_qualifier_types[k] = modified_type
return non_qualifier_types
|
def filter_line(line):
"""Filter input .pb.go line to ignore non-problematic differences."""
# Strip the compiler and plugin version numbers. The expected lines
# look like:
#
# // <tab>protoc-gen-go v1.26.0\n
# // <tab>protoc v3.12.4\n
#
# Note that protoc-gen-go-grpc does not embed its version number
# in its output, so isn't checked here.
for version_prefix in ('// \tprotoc ', '// \tprotoc-gen-go '):
if line.startswith(version_prefix):
return version_prefix + '\n'
return line
|
def groups_balanced(arg):
"""
Match [, {, and ( for balance
>>> groups_balanced("(a) and (b)")
True
>>> groups_balanced("((a) and (b))")
True
>>> groups_balanced("((a) and (b)")
False
>>> groups_balanced(" [a] and [b] ")
True
>>> groups_balanced("((a) and [(b)])")
True
>>> groups_balanced("((a) and [(b))]")
False
"""
arg = arg.strip()
open_list = ["(", "[", "{"]
close_list = [")", "]", "}"]
stack = []
for i in arg:
if i in open_list:
stack.append(i)
elif i in close_list:
pos = close_list.index(i)
if ((len(stack) > 0) and
(open_list[pos] == stack[len(stack)-1])):
stack.pop()
else:
return False
if len(stack) == 0:
return True
else:
return False
|
def get_clean_config(config):
"""Get cleaned config from original config.
:param config: configuration to be cleaned.
:returns: clean configuration without key referring to None or empty dict.
"""
if config is None:
return None
if isinstance(config, dict):
extracted_config = {}
for key, value in config.items():
sub_config = get_clean_config(value)
if sub_config is not None:
extracted_config[key] = sub_config
if not extracted_config:
return None
return extracted_config
else:
return config
|
def decs_osimage_package_facts(arg_osimage_facts, arg_check_mode=False):
"""Package a dictionary of OS image according to the decs_osimage module specification. This
dictionary will be returned to the upstream Ansible engine at the completion of the module run.
@param arg_osimage_facts: dictionary with OS image facts as returned by API call to .../images/list
@param arg_check_mode: boolean that tells if this Ansible module is run in check mode.
@return: dictionary with OS image specs populated from arg_osimage_facts.
"""
ret_dict = dict(id=0,
name="none",
size=0,
type="none",
state="CHECK_MODE",
)
if arg_check_mode:
# in check mode return immediately with the default values
return ret_dict
if arg_osimage_facts is None:
# if void facts provided - change state value to ABSENT and return
ret_dict['state'] = "ABSENT"
return ret_dict
ret_dict['id'] = arg_osimage_facts['id']
ret_dict['name'] = arg_osimage_facts['name']
ret_dict['size'] = arg_osimage_facts['size']
ret_dict['type'] = arg_osimage_facts['type']
ret_dict['state'] = arg_osimage_facts['status']
return ret_dict
|
def values_by_key(dct, keys, fill_val=None):
""" return dictionary values for specific keys, filling missing entries
"""
return tuple(dct[key] if key in dct else fill_val for key in keys)
|
def get_gender(filepath):
"""files are coded so first two characters decide gender.
odd = man, even = woman
code woman as 1, men as 0"""
if int(filepath.split("/")[-1][0:2]) % 2 == 0:
return "woman"
return "man"
|
def _reconstruct(start, period, dE):
"""
Reconstruct occurences,
starting from `start`, and
correcting `period` with a delta for all deltas in `dE`,
`len(dE)` occurences are reconstructed
Parameters
----------
start: int or datetime
starting point for the event
period: int or timedelta
period between two occurences
d_E: np.array of [int|timedelta]
inters occurences deltas
"""
occurences = [start]
current = start
for d_e in dE:
e = current + period + d_e
occurences.append(e)
current = e
return occurences
|
def linear_search_iterative(array, item):
"""Incrementing index until item is found in the array iteratively.
array: list
item: str
Best case running time: O(1) if the item is at the beginning of the array.
Worst case running time: O(n) if the item is last in the array.
"""
for index, value in enumerate(array):
if item == value:
return index # found
return None
|
def generate_param_getter(param_name: str, param_type: str) -> str:
"""Generates Python code for a parameter getter.
Args:
param_name: The name of the parameter
param_type: The type of the parameter.
Returns:
str: Python code for the parameter getter.
"""
return " @property\n" \
" def " + param_name + "(self) -> '" + param_type + "':\n" \
" return self._" + param_name + "\n\n"
|
def parse_instruction(context, instr):
""" Helps the GUI go from lists of instruction data to a cleanly formatted string """
if instr is not None:
docs = context.get_doc_url(instr.split(' '))
instruction = context.escape(instr.replace(' ', ' '))
shortForm = context.newline.join("<a href=\"{href}\">{form}</a>".format(href=url, form=context.escape(short_form)) for short_form, url in docs)
return instruction, shortForm
else:
return 'None', 'None'
|
def problem_19_4(a, b):
""" Write a method which finds the maximum of two numbers. You should not
use if-else or any other comparison operator.
Example: input: 5, 10; output: 10
Solution: identify the first bit which is different in the two numbers. The
number which has that bit set to 1 is larger.
"""
bin_a = bin(a)[2:]
bin_b = bin(b)[2:]
bin_len = max(len(bin_a), len(bin_b))
bin_a = '0'*(bin_len-len(bin_a))+bin_a
bin_b = '0'*(bin_len-len(bin_b))+bin_b
for i in range(bin_len):
if bin_a > bin_b:
return a
elif bin_b > bin_a:
return b
return a
|
def is_number_in_intervals(intervals: list, target: int):
"""
Traverses intervals and checks if the given number is inside an interval.
Example:
interval: [[1, 6], [45, 48], [110, 112]]
number: 2
return: True
"""
for interval in intervals:
# print(f"interval={interval} type(interval)={type(interval)} target={target} type(target)={type(target)}")
if interval[0] <= target <= interval[1]:
return True
return False
|
def convert_datetime_weekday_to_zero_sunday(weekday):
"""
Python weekday yields values of 0-6 with Mon as 0 and Sun as 6; we want to scale this so Sun is 0 and Sat is 6
"""
return (weekday + 1) % 7
|
def pixel(x, y, width_x, width_y):
"""Spatial representation of a pixel.
Parameters
----------
x : numpy.ndarray
x coordinates
y : numpy.ndarray
y coordinates
width_x : float
x diameter of the pixel, in microns
width_y : float
y diameter of the pixel, in microns
Returns
-------
numpy.ndarray
spatial representation of the pixel
"""
width_x = width_x / 2
width_y = width_y / 2
return (x <= width_x) & (x >= -width_x) & (y <= width_y) & (y >= -width_y)
|
def binstring_to_bit_list(binstring):
"""
Converts a string into a list, where each char in the string becomes an item in the list,
with matching left to right ordering. Each char in the input string must be able to be casted
to an int, as each item in the output list is casted to an int.
Args:
binstring: a string composed entirely of chars that are able to be casted to ints
Returns:
A list of ints containing the casted chars from the input string, with identical frequency
and ordering.
Raises:
ValueError: The input string contains a character that cannot be casted to an int.
"""
bit_list = []
for bit in binstring:
try:
bit_list.append(int(bit))
except ValueError:
print("All characters in the input string must be able to be casted to an int!")
return bit_list
|
def get_sequencing_data():
""" for strain sequencing pie chart """
result = {}
result["data"] = list([
120000,
7000,
7600,
750,
350,
1300,
])
result["labels"] = list([
"archive",
"gDNA extracted",
"sent for sequencing",
"sequenced - on progress",
"sequenced - failed QC",
"sequenced - annotated",
])
return result
|
def get_paths(index, stack): # O(logN)
"""
>>> get_paths(10, [])
['left', 'right', 'left']
"""
if index == 1: # O(1)
return stack # O(1)
is_odd = index % 2 # O(1)
if not is_odd: # O(1)
parent_node = index / 2 # O(1)
stack.append('left') # O(1)
return get_paths(parent_node, stack) # O(1)
parent_node = (index - 1) / 2 # O(1)
stack.append('right') # O(1)
return get_paths(parent_node, stack) # O(logN)
|
def hex_to_rgb(color):
"""Convert a hex color to rgb integer tuple."""
if color.startswith('#'):
color = color[1:]
if len(color) == 3:
color = ''.join([c*2 for c in color])
if len(color) != 6:
return False
try:
r = int(color[:2],16)
g = int(color[2:4],16)
b = int(color[4:],16)
except ValueError:
return False
else:
return r,g,b
|
def get_policy_condition(statement):
"""Check if a a policy have a condition and return it."""
if "Condition" in statement:
return(statement["Condition"])
else:
return(False)
|
def convert_string_to_list(_string, separator=','):
"""
splits the string with give separator and remove whitespaces
"""
return [x.strip() for x in _string.split(separator)]
|
def _latest_common_snapshot(some, others):
"""
Pick the most recent snapshot that is common to two snapshot lists.
:param list some: One ``list`` of ``Snapshot`` instances to consider,
ordered from oldest to newest.
:param list others: Another ``list`` of ``Snapshot`` instances to consider,
ordered from oldest to newest.
:return: The ``Snapshot`` instance which occurs closest to the end of both
``some`` and ``others`` If no ``Snapshot`` appears in both, ``None`` is
returned.
"""
others_set = set(others)
for snapshot in reversed(some):
if snapshot in others_set:
return snapshot
return None
|
def _split_training_and_validation_sets(texts, labels, validation_split):
"""Splits the texts and labels into training and validation sets.
# Arguments
texts: list, text data.
labels: list, label data.
validation_split: float, percentage of data to use for validation.
# Returns
A tuple of training and validation data.
"""
num_training_samples = int((1 - validation_split) * len(texts))
return ((texts[:num_training_samples], labels[:num_training_samples]),
(texts[num_training_samples:], labels[num_training_samples:]))
|
def con_celsius_to_kelvin(degree_celsius):
"""
Parameters
----------
degree_celsius : float
temperature in Celsius
Returns
-------
degree_kelvin : float
temperature in Kelvin
"""
degree_kelvin = degree_celsius + 273.15
return degree_kelvin
|
def fix_author(author):
"""Change surname-firstname order.
Parameters
----------
author : str
Author as string
Returns
-------
fixed_author : str
Changed author string.
Examples
--------
>>> author = 'Lybeck, Mikael'
>>> fix_author(author)
'Mikael Lybeck'
"""
author_parts = author.split(', ')
if len(author_parts) == 2:
fixed_author = author_parts[1] + ' ' + author_parts[0]
else:
fixed_author = author
return fixed_author
|
def convertByte(byte_to_convert):
"""
Converts byte to most biggest unit.
"""
TBYTE = 1024 * 1024 * 1024 * 1024
GBYTE = 1024 * 1024 * 1024
MBYTE = 1024 * 1024
KBYTE = 1024
if byte_to_convert / TBYTE >= 1:
return str(round(byte_to_convert / TBYTE, 2)) + " TB"
elif byte_to_convert / GBYTE >= 1:
return str(round(byte_to_convert / GBYTE, 2)) + " GB"
elif byte_to_convert / MBYTE >= 1:
return str(round(byte_to_convert / MBYTE, 2)) + " MB"
elif byte_to_convert / KBYTE >= 1:
return str(round(byte_to_convert / KBYTE, 2)) + " KB"
else:
return str(round(byte_to_convert, 0)) + " B"
|
def is_none(param):
"""Test if a parameter is None value or string 'None'.
:param param: Parameter to test
:return: Boolen value
"""
if param is None or (type(param) == str and param.lower() == 'none'):
return True
else:
return False
|
def cumulative_sum(arr):
"""
Returns an array of len(arr)-1
Each element shows the sum of all numbers in the indices prior to the given element
"""
current_val = 0
answer = []
for j in arr:
current_val += j
answer.append(current_val)
return answer
|
def _route2url(route):
"""Convert host from openshift route to https:// url"""
return f"https://{route['spec']['host']}"
|
def pe6(n=100):
"""
s1 = n*(n+1)/2
s2 = n*(n+1)*(2*n+1)/6
s1*s1 - s2
= n*(n+1)*(n*(n+1)/4 - (2*n+1)/6)
= n*(n+1)*(3*n*(n+1) - 2*(2*n+1))/12
= n*(n+1)*(3*n*n - n - 2)/12
= n*(n+1)*(n-1)*(3*n+2)/12
>>> pe6()
25164150
"""
return(n * (n + 1) * (n - 1) * (3 * n + 2) // 12)
|
def parse_tf_ids(target_tfs_filename):
"""
If user has provided a file with Ensembl transcript ids, parse these to a list.
"""
with open(target_tfs_filename, 'r') as target_tfs_file:
target_tfs_list = target_tfs_file.read().splitlines()
target_tfs_list = [x.upper() for x in target_tfs_list if len(x)>0]
return target_tfs_list
|
def ppebwr_round_probs(L):
"""
Return 'round probabilities' for use in ppebwr method.
These are just scaled precinct sizes.
"""
V = float(sum([x[0] for x in L]))
return [x[0]/V for x in L]
|
def create_keys(ids):
"""Keys creator
This function creates a list of integers to be used as internal SNAPPy ids
to avioid issues with compex fasta file names.
Args:
ids (list): List of fasta headers in the msa file.
Returns:
List of pairs (keys, ids)
"""
d = len(str(len(ids)))
new_keys = [f"%0{d}d" % x for x in list(range(len(ids)))]
return [new_keys, ids]
|
def get_fix_param(var, val):
"""
var: variable name.
val: variable value to fix.
"""
out = {}
for i, j in zip(var, val):
out[i] = j
return out
|
def distr_equal_propn(label_data, num_workers):
"""
Idea:
1. For each label, distribute disproportionate allocation to workers.
2. Apply the worker's allocation to label_data and store in distr_labeldata,
where the keys are workers and the values are the labeled data with X and Y.
Inputs:
label_data - dict: output of segregate_labels
num_workers - scalar: number of workers
"""
#Step 1: Distribute allocation to workers
distr_propn = dict() #A dict of dicts: labels and then worker allocations
labels = label_data.keys()
#Initial allocation
for label in labels:
ndata = len(label_data[label]['X']) #number of data points for the given label
propn = [ndata // num_workers] * num_workers
distr_propn[label] = dict(zip(list(range(num_workers)), propn))
assert round(sum(propn), 1) <= ndata, "Allocation of proportions should at most be the length of label data"
return distr_propn
|
def decimal2dms(decimal_degrees):
""" Converts a floating point number of degrees to the equivalent
number of degrees, minutes, and seconds, which are returned
as a 3-element list. If 'decimal_degrees' is negative,
only degrees (1st element of returned list) will be negative,
minutes (2nd element) and seconds (3rd element) will always be positive.
Example:
>>> decimal2dms(121.135)
[121, 8, 6.0000000000184173]
>>> decimal2dms(-121.135)
[-121, 8, 6.0000000000184173]
"""
degrees = int(decimal_degrees)
decimal_minutes = abs(decimal_degrees - degrees) * 60
minutes = int(decimal_minutes)
seconds = (decimal_minutes - minutes) * 60
return [degrees, minutes, seconds]
|
def etags_section_filename(header):
"""Return the file name in the section header.
A section header is a filename and a section length separated by a
comma.
"""
return header.split(',')[0]
|
def insertion_sort(L):
"""
Insertion sort is a simple algorithm which builds the final sorted list 1
item at a time.
It compares the current element with it's neighbor to the left. If the
current element is smaller than the neighbor, it then compares with the
neighbor before that and so on until the beginning of the list is reached.
Performance
===========
Worst: O(n^2)
Average: O(n^2)
Best: O(n)
Space
=====
Worst: O(1)
"""
# Start from the second element so we can compare the current element with
# the previous element.
for i in range(1, len(L)):
val = L[i]
k = i
# Start from the current position of the array and iterate backwards
# toward the beginning of the array comparing adjascent values as you
# go.
while k > 0 and val < L[k - 1]:
L[k] = L[k - 1]
k -= 1
L[k] = val
return L
|
def pack_varint(data):
"""Pack a VARINT for the protocol."""
return bytes([(0x40 * (i != data.bit_length() // 7)) +
((data >> (7 * i)) % 128) for i in range(1 + data.bit_length() // 7)])
|
def mode(ary):
"""
mode function
find the object with the biggest number of appears in the given ary
:param ary: input ary (some iterable object)
:return: the number with the largest appearence number
"""
dct = {}
for value in ary:
if value in dct:
dct[value]+=1
else:
dct[value] =1
return max(dct, key=lambda k: dct[k])
|
def lowercase_first_letter(s: str) -> str:
"""
Given a string, returns that string with a lowercase first letter
"""
if s:
return s[0].lower() + s[1:]
return s
|
def wrap(text, line_length):
"""Wrap a string to a specified line length.
Args:
text: The string to wrap.
line_length: The line length in characters.
Returns:
A wrapped string.
Raises:
ValueError: If line_length is not positive.
"""
# DON'T DO THIS:
# assert line_length > 0, "line_length must be positive"
# INSTEAD, DOCUMENT EXCEPTIONS
if line_length < 1:
raise ValueError("line_length {} is not positive".format(line_length))
words = text.split()
if max(map(len, words)) > line_length:
raise ValueError("line_length must be at least as long as the longest word")
lines_of_words = []
current_line_length = line_length
for word in words:
if current_line_length + len(word) > line_length:
lines_of_words.append([]) # new line
current_line_length = 0
lines_of_words[-1].append(word)
current_line_length += len(word) + len(' ')
lines = [' '.join(line_of_words) for line_of_words in lines_of_words]
result = '\n'.join(lines)
assert all(len(line) <= line_length for line in result.splitlines())
return result
|
def escape_float(value):
"""
Python's float are usually double:
https://docs.python.org/3.6/library/stdtypes.html#numeric-types-int-float-complex
"""
return '"%s"^^xsd:double' % value
|
def get_outputcsv(clusters,grps):
"""
args: clusters of utgs, reads in utgs
returns: list of lists, in each list there are all the reads in one cluster
"""
csv_out = []
for clus in clusters:
one_cl = []
for el in clus:
one_cl.extend(grps[el])
csv_out.append(list(set(one_cl)))
return csv_out
|
def color_str_yellow(s):
"""Color string YELLOW for writing to STDIN."""
return "\033[93m{}\033[00m".format(s)
|
def is_ascii(s):
"""Return True if the given string contains only ASCII characters."""
return len(s) == len(s.encode())
|
def get_column_names(outcomes, races, genders, pctiles):
""" Generate column names for outcomes and factors selected"""
col_names = []
for outcome in outcomes:
for race in races:
for gender in genders:
for pctile in pctiles:
col_names.append(outcome + '_' + race + '_' + gender + '_' + pctile)
return col_names
|
def bipolar_diverge(maximum):
""" Returns [0, 1, -1, ...., maximum, -maximum ] """
sequence = list(sum(list(zip(list(range(maximum + 1)), list(range(0, -maximum - 1, -1)))), ()))
sequence.pop(0)
return sequence
|
def Gmat2A(G):
"""Extract A from reciprocal metric tensor (G)
:param G: reciprocal maetric tensor (3x3 numpy array
:return: A = [G11,G22,G33,2*G12,2*G13,2*G23]
"""
return [G[0][0], G[1][1], G[2][2], 2. * G[0][1], 2. * G[0][2], 2. * G[1][2]]
|
def validate_slots(slots):
"""
:param slots:
:return:
"""
keys = ['book', 'chapter', 'verse']
return [ slots[key] for key in keys]
|
def in_box(coords, box):
""" Returns True if a geotag is inside a specified coordinate box"""
if box[0][0] < coords[0] < box[1][0] and box[1][1] < coords[1] < box[0][1]:
return True
return False
|
def get_defaults(info):
"""
Extracts the default values from the info structure.
"""
trial_defaults = {}
for item in info:
trial_defaults[item['tag']] = item['value']
return trial_defaults
|
def sol(arr, n):
"""
Idea is simple, while going from left to right keep a track of max
till i, thus all elements to the left of i are smaller.
While going from right to left keep a track if min till i thus all
elements to the right are greater
"""
ltr = [0]*n
rtl = [0]*n
ma = arr[0]
for i in range(n):
ma = max(arr[i], ma)
ltr[i] = ma
mi = arr[n-1]
for i in range(n-1, -1, -1):
mi = min(arr[i], mi)
rtl[i] = mi
for i in range(n):
if ltr[i] == rtl[i] and i and i!=n-1:
# Boundaries of 'i' are checked as we want atleast one element
# on either side of the result
return arr[i]
return -1
|
def get_n_matched_associations(feedback: str) -> int:
"""
Return the number of matched associations in the feedback of a single submission.
"""
total_matches = feedback.count("Matched")
class_matches = feedback.count("Class Matched")
attr_matches = feedback.count("Attribute Match") + feedback.count("Attribute Misplaced Match")
return max(0, total_matches - class_matches - attr_matches)
|
def to_text(obj, encoding='utf-8', errors='strict'):
"""Makes sure that a string is a text string.
Args:
obj: An object to make sure is a text string.
encoding: The encoding to use to transform from a byte string to
a text string. Defaults to using 'utf-8'.
errors: The error handler to use if the byte string is not
decodable using the specified encoding. Any valid codecs error
handler may be specified.
Returns: Typically this returns a text string.
"""
if isinstance(obj, str):
return obj
return str(obj, encoding=encoding, errors=errors)
|
def capfirst(x):
"""Capitalize the first letter of a string. Kindly borrowed from Django"""
return x and str(x)[0].upper() + str(x)[1:]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.