content
stringlengths 42
6.51k
|
---|
def all_black_colors(N):
"""
Generate all black colors
"""
black = [(0, 0, 0) for i in range(N)]
return black
|
def utmify_email_url(url: str, campaign: str) -> str:
"""
Returns a versioned absolute asset URL (located within PostHog's static files).
Example:
{% utmify_email_url 'http://app.posthog.com' 'weekly_report' %}
=> "http://app.posthog.com?utm_source=posthog&utm_medium=email&utm_campaign=weekly_report"
"""
return f"{url}{'&' if '?' in url else '?'}utm_source=posthog&utm_medium=email&utm_campaign={campaign}"
|
def sign_extend(val, bits=32):
""" Sign extension. High-order bit of val is left extended.
:param val: VexValue
"""
sign_bit = 1 << (bits - 1)
return (val & (sign_bit - 1)) - (val & sign_bit)
|
def unescaper(msg):
""" unescape message
this function undoes any escape sequences in a received message
@param msg: the message to unescape
@return: the unescaped message
"""
out = []
escape = False
for x in msg:
if x == 0x5c:
escape = True
continue
if escape:
x = 0x5c ^ x ^ 0xa3
escape = False
out.append(x)
return bytes(out)
|
def weight_point_in_circle(
point: tuple,
center: tuple,
radius: int,
corner_threshold: float = 1.5
):
"""
Function to decide whether a certain grid coordinate should be a full, half or empty tile.
Arguments:
point (tuple): x, y of the point to be tested
center (tuple): x, y of the origin (center) point
radius (int): radius of certainly empty tiles, does not include half tiles
corner_threshold (float): threshold that decides if the tile should be a half tile instead of empty
Returns:
int: the type of the tested tile
0 if empty tile
1 if full tile
2 if half tile
"""
diff_x, diff_y = map(lambda x, y: abs(x - y), center, point) # subtract point from center then abs for both x and y
if (diff_y > radius) or (diff_x > radius):
return 0 # eliminate any obviously out of bounds tiles
# precalculate pythagoras distance squared
dist_squared = (diff_x * diff_x) + (diff_y * diff_y)
# precalculate radius sqaured
radius_squared = radius * radius
# precalculate rounded distance
rounded_distance = round(dist_squared)
if rounded_distance < radius_squared: # distance within radius
return 1 # full tile
elif rounded_distance < radius_squared * corner_threshold and diff_x < radius: # distance on edge
return 2 # half tile
# outside of any thresholds
return 0
|
def lc_prefix(prefix, s):
"""return the longest common prefix of prefix and s"""
if prefix is None:
return s
n = 0
for i, j in zip(prefix, s):
if i != j:
break
n += 1
return prefix[:n]
|
def fill_tabs(string: str):
"""Replaces every occurence of \\t with four spaces"""
return string.replace("\t", " ")
|
def count_keys(num_clbits):
"""Return ordered count keys."""
return [bin(j)[2:].zfill(num_clbits) for j in range(2 ** num_clbits)]
|
def custom_format(source, language, class_name, options, md, **kwargs):
"""Custom format."""
return '<div lang="%s" class_name="class-%s", option="%s">%s</div>' % (language, class_name, options['opt'], source)
|
def get_venv_name(dockenv_name):
"""
Helper function to split the user-defined virtual env name
out of the full name that includes the dockenv tag
:param dockenv_name: The full name to pull the virtual env name
out of
"""
# Format of tage name if dockenv-**NAME**:latest
venv_start = len("dockenv-")
if dockenv_name.endswith(":latest"):
venv_end = len(":latest") * -1
return dockenv_name[venv_start:venv_end]
return dockenv_name[venv_start:]
|
def reducemap(func, sequence, initial=None, include_zeroth = False):
"""
A version of reduce that also returns the intermediate values.
:param func: A function of the form x_i_plus_1 = f(x_i, params_i)
Where:
x_i is the value passed through the reduce.
params_i is the i'th element of sequence
x_i_plus_i is the value that will be passed to the next step
:param sequence: A list of parameters to feed at each step of the reduce.
:param initial: Optionally, an initial value (else the first element of the sequence will be taken as the initial)
:param include_zeroth: Include the initial value in the returned list.
:return: A list of length: len(sequence), (or len(sequence)+1 if include_zeroth is True) containing the computed result of each iteration.
"""
if initial is None:
val = sequence[0]
sequence = sequence[1:]
else:
val = initial
results = [val] if include_zeroth else []
for s in sequence:
val = func(val, s)
results.append(val)
return results
|
def content_to_md(content, ref_count, faulty_pdfs):
""" Returns the content (reference list for each PDF file) in a markdown formated string
Keyword arguments:
content -- a dict with each key being a PDF filepath and reference list (str) as value
ref_count -- a dict with each key being a PDF filepath and reference count as value
faulty_pdfs -- a list of PDF files, either without references or with parsing issues
"""
content_md = ''
for key, value in content.items():
content_md += ('### File: ' + key + ' (reference count: ' + str(ref_count[key]) +
')\n```\n' + value + '```\n')
if len(faulty_pdfs) != 0:
content_md += ('\n :x: Could not find reference list for PDF files: ' + ' '.join(faulty_pdfs))
return content_md
|
def _FindLatestMinorVersion(debuggees):
"""Given a list of debuggees, find the one with the highest minor version.
Args:
debuggees: A list of Debuggee objects.
Returns:
If all debuggees have the same name, return the one with the highest
integer value in its 'minorversion' label. If any member of the list does
not have a minor version, or if elements of the list have different
names, returns None.
"""
if not debuggees:
return None
best = None
best_version = None
name = None
for d in debuggees:
if not name:
name = d.name
elif name != d.name:
return None
minor_version = d.labels.get('minorversion', 0)
if not minor_version:
return None
minor_version = int(minor_version)
if not best_version or minor_version > best_version:
best_version = minor_version
best = d
return best
|
def recall_score(true_entities, pred_entities):
"""Compute the recall."""
nb_correct = len(true_entities & pred_entities)
nb_true = len(true_entities)
score = nb_correct / nb_true if nb_true > 0 else 0
return score
|
def map_clobs(columns):
"""
The map_clobs function will inject the following if there
are clob data types. --map-column-java col6=String,col8=String
This is needed as clob columns will not come into Hadoop unless
we specifically map them.
"""
hasclobs = False
clobs = ""
for c in columns:
if c.get("datatype").lower() == "clob":
if not hasclobs:
hasclobs = True
clobs = "--map-column-java "
clobs = clobs + c.get("name") + "=String,"
return clobs[:-1]
|
def search_for_pod_name(details: dict, operator_id: str):
"""Get operator pod name.
Args:
details (dict): workflow manifest from pipeline runtime
operator_id (str): operator id
Returns:
dict: id and status of pod
"""
try:
if 'nodes' in details['status']:
for node in [*details['status']['nodes'].values()]:
if node['displayName'] == operator_id:
return {'name': node['id'], 'status': node['phase'], 'message': node['message']}
except KeyError:
pass
|
def iterate(iterator, n):
"""
Returns the nth element returned by the iterator if there are sufficient elements,
or None if the iterator has been exhausted.
:param iterator: The iterator to extract elements from
:param n: The nth element returned by the iterator will be returned
:return: An element returned by the iterator
"""
# throw away n-1 elements
for index in range(1, n):
next(iterator, None)
return next(iterator, None)
|
def blend(c, a):
"""blend semi-transparent color with white"""
return 255 + (c - 255) * a
|
def merge(left, right):
"""Two lists are merged."""
results = []
while len(left) and len(right):
if left[0] < right[0]:
results.append(left.pop(0))
else:
results.append(right.pop(0))
return results + left + right
|
def check_if_similarity_is_valid(similarity):
"""Raise error if not true"""
similarity_error_message = "Invalid value, it should be between 0.0 and 1.0."
try:
similarity = float(similarity)
except ValueError:
raise ValueError(similarity_error_message)
if not isinstance(similarity, float) or (float(similarity) < 0 or float(similarity) > 1):
raise ValueError(similarity_error_message)
return similarity
|
def normalize(position):
"""Accepts `position` of arbitrary precision and returns the block
containing that position.
Parameters
----------
position : tuple of len 3
Returns
-------
block_position : tuple of ints of len 3
"""
x, y, z = position
x, y, z = (int(round(x)), int(round(y)), int(round(z)))
return x, y, z
|
def clean_str(
s: str,
l: list,
r: list,
) -> str:
"""Replace substrings within a given string
Parameters
----------
s : str
The string
l : list
The list of substrings
r : list
The list of replacement substrings
Returns
-------
str
The string with the substrings removed
"""
# Loop through every substring in the list
for i in range(0, len(l)):
# Remove all occurrences of the substring
s = s.replace(l[i], r[i])
return s
|
def _tf2idxf(tf_d, tidx_d):
""" _tf2idxf(): Don't use it directly, use tf2idxf instead.
This function is getting a TF dictionary representing the TF Vector,
and a TF-Index. It returns a Index-Frequency dictionary where each term of the TF
dictionary has been replaced with the Index number of the TF-Index. In case the term of the
TF Dictionary is not in the TF-Index then the term is just Dropped. Therefore,
the Index-Frequency dictionary it will no more include the missing (from TF-Index) term. """
idxed_d = dict()
for term, freq in tf_d.items():
if term in tidx_d:
idxed_d[tidx_d[term]] = freq
# else: DROP THE TERM
return idxed_d
|
def vni_row_name(network_type):
"""Determine name of row for VNI in allocations table.
:param network_type: Network type to determine row name for.
:type network_type: str
:returns: Row name
:rtype: str
:raises: ValueError
"""
if network_type in ('gre',):
return '{}_id'.format(network_type)
elif network_type in ('geneve', 'vxlan'):
return '{}_vni'.format(network_type)
raise ValueError('Unsupported network_type: {}'.format(network_type))
|
def Command(*_args, **_kw):
"""Fake Command"""
return ["fake"]
|
def space_tokenize_with_eow(sentence):
"""Add </w> markers to ensure word-boundary alignment."""
return [t + "</w>" for t in sentence.split()]
|
def highest(x, a, b=0):
"""Calculate the highest sum of X."""
a, b = a + x, b + x
return a if a > b else b
|
def _first_unvisited(graph, visited):
"""
Return first unvisited node.
@type graph: graph
@param graph: Graph.
@type visited: list
@param visited: List of nodes.
@rtype: node
@return: First unvisited node.
"""
for each in graph:
if (each not in visited):
return each
return None
|
def unpack(dict_of_list, dict_):
"""
Unpack predictions into a dictionary.
Dictionary keys are keys of *dict_*
"""
ret = {k: [None] * len(v) for k, v in dict_of_list.items()}
for k, p in dict_.items():
ret[k[0]][k[1]] = p
return ret
|
def add_word_continuation_tags(tags):
"""In place, add a continutation tag to each word:
<cc/> -word continues current dialogue act and the next word will also
continue it
<ct/> -word continues current dialogue act and is the last word of it
<tc/> -word starts this dialogue act tag and the next word continues it
<tt/> -word starts and ends dialogue act (single word dialogue act)
"""
tags = list(tags)
for i in range(0, len(tags)):
if i == 0:
tags[i] = tags[i] + "<t"
else:
tags[i] = tags[i] + "<c"
if i == len(tags)-1:
tags[i] = tags[i] + "t/>"
else:
tags[i] = tags[i] + "c/>"
return tags
|
def append_cmd_line_args_to(cmd):
"""Appends the syntax for command line arguments ("$*") to the cmd
Args:
cmd: A string representing the command.
Returns:
'cmd' with the syntax for command line arguments ("$*") appended
"""
return cmd + " $*"
|
def parse_input(trace_doc, dbg_path_resolver):
"""Return a list of frame dicts from an object of {backtrace: list(), processInfo: dict()}."""
def make_base_addr_map(somap_list):
"""Return map from binary load address to description of library from the somap_list.
The somap_list is a list of dictionaries describing individual loaded libraries.
"""
return {so_entry["b"]: so_entry for so_entry in somap_list if "b" in so_entry}
base_addr_map = make_base_addr_map(trace_doc["processInfo"]["somap"])
frames = []
for frame in trace_doc["backtrace"]:
if "b" not in frame:
print(
f"Ignoring frame {frame} as it's missing the `b` field; See SERVER-58863 for discussions"
)
continue
soinfo = base_addr_map.get(frame["b"], {})
elf_type = soinfo.get("elfType", 0)
if elf_type == 3:
addr_base = "0"
elif elf_type == 2:
addr_base = frame["b"]
else:
addr_base = soinfo.get("vmaddr", "0")
addr = int(addr_base, 16) + int(frame["o"], 16)
# addr currently points to the return address which is the one *after* the call. x86 is
# variable length so going backwards is difficult. However llvm-symbolizer seems to do the
# right thing if we just subtract 1 byte here. This has the downside of also adjusting the
# address of instructions that cause signals (such as segfaults and divide-by-zero) which
# are already correct, but there doesn't seem to be a reliable way to detect that case.
addr -= 1
frames.append(
dict(
path=dbg_path_resolver.get_dbg_file(soinfo), buildId=soinfo.get("buildId", None),
offset=frame["o"], addr="0x{:x}".format(addr), symbol=frame.get("s", None)))
return frames
|
def _num_32_bit_words_for_bit_fields(bit_fields):
"""
Gets the number of 32 bit unsigned ints needed store a list of bit fields.
"""
num_buckets, cur_bucket = 0, 0
for field in bit_fields:
if field.size + cur_bucket > 32:
num_buckets += 1
cur_bucket = 0
cur_bucket += field.size
return num_buckets + (cur_bucket > 0)
|
def short(products, field):
"""Creates a bash completion list for data
Parameters
----------
products: list
list of products data
field: str
the field name that should be formated
Returns
-------
str
formated string with unique fields and spces converted to underscore
"""
li = []
for p in products:
if not p[field].replace(" ", "_") in li:
li.append(p[field].replace(" ", "_"))
return li
|
def unique_list(obj, key=None):
"""
Remove same element from list
:param obj: list
:param key: key function
:return: list
"""
checked = []
seen = set()
for e in obj:
_e = key(e) if key else e
if _e not in seen:
checked.append(e)
seen.add(_e)
return checked
|
def get_cookie_name(request, name, usage, software='MOIN'):
"""
Determine the full cookie name for some software (usually 'MOIN') using
it for some usage (e.g. 'SESSION') for some wiki (or group of wikis)
determined by name.
Note:
-----
We do not use the path=... information in the cookie any more, because it can
easily cause confusion if there are multiple cookies with same name, but
different pathes (like e.g. / and /foo).
Instead of using the cookie path, we use differently named cookies, so we get
the right cookie no matter at what URL the wiki currently is "mounted".
If name is None, we use some URL components to make up some name.
For example the cookie name for the default desktop wiki: MOIN_SESSION_8080_ROOT
If name is siteidmagic, we just use cfg.siteid, which is unique within a wiki farm
created by a single farmconfig. If you only run ONE(!) wikiconfig wiki, it
is also unique, of course, but not if you run multiple wikiconfig wikis under
same domain.
If name is not None (and not 'siteidmagic'), we just use the given name (you
want to use that to share stuff between several wikis - just give same name
and it will use the same cookie. same thing if you don't want to share, just
give a different name then [e.g. if cfg.siteid or 'siteidmagic' doesn't work
for you]).
Moving a wiki to a different URL will break all sessions. Exchanging URLs
of wikis might lead to confusion (requiring the client to purge the cookies).
"""
if name is None:
url_components = [
# cookies do not store the port, thus we add it to the cookie name:
request.environ['SERVER_PORT'],
# we always store path=/ into cookie, thus we add the path to the name:
('ROOT' + request.script_root).replace('/', '_'),
]
name = '_'.join(url_components)
elif name is 'siteidmagic':
name = request.cfg.siteid # == config name, unique per farm
return "%s_%s_%s" % (software, usage, name)
|
def truncate_errors(errors, limit=float("inf")):
"""If limit was specified, truncate the list of errors.
Give the total number of times that the error was found elsewhere.
"""
if len(errors) > limit:
start1, end1, err1, msg1, replacements = errors[0]
if len(errors) == limit + 1:
msg1 += " Found once elsewhere."
else:
msg1 += f" Found {len(errors)} times elsewhere."
errors = [(start1, end1, err1, msg1, replacements)] + errors[1:limit]
return errors
|
def journey_data_years(all_y = False):
"""Return available journey data years
Either return all the years, if all_y = True, or just
those that are relevant from the TTM viewpoint.
"""
if not (all_y):
return (2012, 2014, 2015, 2016)
else:
return (2007, 2009, 2010, 2012, 2014, 2015, 2016)
|
def unpack_experiment_groups(experiment_groups):
"""
"""
experiments = []
for experiment_group in experiment_groups:
name = experiment_group["name"]
for experiment_dir in experiment_group["dirs"]:
experiments.append({
"name": name,
"dir": experiment_dir
})
return experiments
|
def convert_boolean(value: bool) -> str:
"""Convert from python bool to postgresql BOOLEAN"""
if value is True:
return 'TRUE'
elif value is False:
return 'FALSE'
else:
return 'NULL'
|
def find_largest_digit(n):
"""
A recursive function for comparing the digits
:param n: int, several digits
:return: int, the biggest digit
"""
# Confirm that the parameter is a positive number
if n < 0:
num = n*-1
else:
num = n
# Base case, the first digit is compared
if num//10 == 0:
return num%10
# Compare the digits, return the biggest one
else:
return max(num%10, find_largest_digit(num//10))
|
def zbin(zval):
"""
return bin index for this track -- so that we take at most 1 track/bin/jet
"""
zmax = [0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 1.0]
for i,z in enumerate(zmax):
if zval < z: return i+1
|
def create_context_element_attribute(name, type, value):
"""
Function to create a context attribute body
:param name (string): Name of the attribute
:param type (string): Type of the attribute
:param value (string): Value of the attribute
:return (dict): Contest attribute body. The attribute in JSON will be like this:
{
"name" : "temperature",
"type" : "float",
"value" : "23"
}
"""
return {"name": name,
"type": type,
"value": value}
|
def producttype_to_sat(product_type: str) -> str:
"""Returns the satellite for the given product_type.
Args:
product_type: The type of the input satellite product
(e.g. S2_ESA_L2A or L8_USGS_L1)
Returns:
The name of the satellite that matches the input product_type.
"""
return product_type.split('_')[0]
|
def game_over(current_board):
"""
Brain of the game.
Goes through the classical rules of tic-tac-toe game.
Inspects the current board and checks if the horizontal, vertical or diagonal elements
are same or not. Also decides explicitly whether the game is draw or not.
:param current_board: the current board
:return: 'True' if game is over or,
'False' if game is not over or,
'None' if game is a draw
"""
# horizontal check--> (stable)
for row in current_board:
if len(['col' for col in row if col == row[0]]) == 3:
return True
# vertical check--> (is stable for now)
for c in range(len(current_board[0])):
if (len(['r' for r in range(len(current_board)) if current_board[r][c] == current_board[0][c]])) == 3:
return True
# working with diagonals
# left_diagonal = [col for row in board for col in row if row.index(col) == board.index(row)] # -->Not stable
left_diagonal = list()
for r in range(len(current_board)):
for c in range(len(current_board[r])):
if r == c and current_board[r][c] == current_board[0][0]:
left_diagonal.append(current_board[r][c])
right_diagonal = list()
index_of_each_row = len(current_board) - 1
for row in current_board:
for member in row:
if member == row[index_of_each_row]:
right_diagonal.append(member)
break
index_of_each_row -= 1
# diagonal check--> (is stable for now)
if len(["x" for x in left_diagonal if x == left_diagonal[0]]) == 3 or len(
["x" for x in right_diagonal if x == right_diagonal[0]]) == 3:
return True
# draw decider
draw = list()
for row in current_board:
for member in row:
if not member.isnumeric():
draw.append(member)
# draw check
if len(draw) == 9:
return None
return False
|
def ilen(iterable):
"""Return the number of items in ``iterable``."""
return sum(1 for _ in iterable)
|
def flatten(d):
"""Return a dict as a list of lists.
>>> flatten({"a": "b"})
[['a', 'b']]
>>> flatten({"a": [1, 2, 3]})
[['a', [1, 2, 3]]]
>>> flatten({"a": {"b": "c"}})
[['a', 'b', 'c']]
>>> flatten({"a": {"b": {"c": "e"}}})
[['a', 'b', 'c', 'e']]
>>> flatten({"a": {"b": "c", "d": "e"}})
[['a', 'b', 'c'], ['a', 'd', 'e']]
>>> flatten({"a": {"b": "c", "d": "e"}, "b": {"c": "d"}})
[['a', 'b', 'c'], ['a', 'd', 'e'], ['b', 'c', 'd']]
"""
if not isinstance(d, dict):
return [[d]]
returned = []
for key, value in d.items():
# Each key, value is treated as a row.
nested = flatten(value)
for nest in nested:
current_row = [key]
current_row.extend(nest)
returned.append(current_row)
return returned
|
def appenddir(path: str, dirname: str) -> str:
"""
Append a dirname to a path string
:param str path: path where to append
:param str dirname: path to be appended
:return: path/dirname
"""
if path.endswith('/'):
return '{}{}'.format(path, dirname)
else:
return '{}/{}'.format(path, dirname)
|
def convert_device_name(framework, device_ids):
"""Convert device to either cpu or cuda."""
gpu_names = ["gpu", "cuda"]
cpu_names = ["cpu"]
if framework not in cpu_names + gpu_names:
raise KeyError("the device should either "
"be cuda or cpu but got {}".format(framework))
assert type(device_ids) is list
device_ids_new = []
for device in device_ids:
device_ids_new.append(int(device))
if framework in gpu_names:
return "cuda", device_ids_new
else:
return "cpu", device_ids_new
|
def lorentz_berthelot_rule(sig_A, sig_B, eps_A, eps_B):
"""
Lorentz-Berthelot rules for combining LJ paramters.
Parameters
-----------
sig_A: float
sig_B: float
input sigma values
eps_A: float
eps_B: float
input epsilon values
Returns
--------
float
sigma
float
epsilon
"""
sig = (sig_A + sig_B)/2.0
eps = (eps_A * eps_B)**0.5
return sig, eps
|
def bytes_to_uint8_array(val, width=70):
"""
Formats a byte string for use as a uint8_t* literal in C/C++
"""
if len(val) == 0:
return '{}'
lines = []
line = '{' + str(ord(val[0]))
for x in val[1:]:
token = str(ord(x))
if len(line) + len(token) > width:
lines.append(line + ',')
line = token
else:
line += ',%s' % token
lines.append(line)
return '\n'.join(lines) + '}'
|
def tags_equal(act, exp):
"""Check if each actual tag in act is equal to one or more expected tags in exp."""
return all(a == e if isinstance(e, str) else a in e for a, e in zip(act, exp))
|
def get_parse_context(i, deps, data):
"""
get the two elements from data at position 'i' linked through deps [left|right] (to last added edges)
"""
if i == -1 or not len(deps):
return '', ''
deps = deps[i]
valency = len(deps)
if valency == 1:
return data[deps[-1]], ''
else:
return data[deps[-1]], data[deps[-2]]
|
def check_point(point):
"""Validates a 2-d point with an intensity.
Parameters
----------
point : array_like
(x, y, [intensity])
Coordinates of the point to add. If optional `intensity` is not
supplied, it will be set to `None`.
Returns
-------
point : :obj:tuple of :obj:float
The same point converted to floats, assuming no errors were raised.
"""
if len(point) != 2:
intensity = point[-1]
point = point[:-1]
else:
intensity = None
if len(point) != 2:
raise ValueError("Coordinates must be two-dimensional.")
try:
point = tuple([float(p) for p in point]) # Convert everything to floats.
except ValueError:
raise ValueError("Coordinates must be convertible to floats.")
return point + tuple((intensity,))
|
def commaSplitNum(num : str) -> str:
"""Insert commas into every third position in a string.
For example: "3" -> "3", "30000" -> "30,000", and "561928301" -> "561,928,301"
:param str num: string to insert commas into. probably just containing digits
:return: num, but split with commas at every third digit
:rtype: str
"""
outStr = num
for i in range(len(num), 0, -3):
outStr = outStr[0:i] + "," + outStr[i:]
return outStr[:-1]
|
def tower_builder(n_floors):
"""
Takes in an integer and builds a tower of * from the given int
:param n_floors: the number of floors for the tower
:return: a list with the number of floors represented with *
:rtype: list
"""
return [("*" * (i * 2 - 1)).center(n_floors * 2 - 1)
for i in range(1, n_floors + 1)]
|
def calculate_total_clones(haplotypes):
"""
# ========================================================================
CALCULATE TOTAL CLONES
PURPOSE
-------
Calculates the total number of clones accross multiple haplotypes. Note
that there may be multiple clones associated with each haplotype.
INPUT
-----
[HAPLOTYPE LIST] [haplotypes]
The list of Haplotypes from which to build the distance matrix.
RETURN
------
[INT]
The total number of clones accross all the haplotypes in the list.
# ========================================================================
"""
total = 0
for haplotype in haplotypes:
total += haplotype.count
return total
|
def split_reads(read, d):
"""
:param read:
:param d:
:return:
"""
prev_c = 0
c = 0
reads = []
for c in range(d, len(read), d):
# ToDo: Make generic logic for pairs
first = read[prev_c: c]
second = read[prev_c + 1: c + 1]
third = read[prev_c + 2: c + 2]
reads.append(first)
reads.append(second)
reads.append(third)
prev_c = c
return reads
|
def vacantTheSpot(parking_lot, posn):
"""Vacant that parking spot"""
if posn in parking_lot.keys():
driver_age = parking_lot[posn]['driverAge']
vrn = parking_lot[posn]['VRN']
parking_lot[posn] = {}
print(f"Slot number {posn} vacated, the car with vehicle registration number '{vrn}' left the space, the driver of the car was of age {driver_age}")
return parking_lot, vrn
else:
print("Invalid Slot Number")
return parking_lot, -1
|
def is_equal2(a: int, b: int) -> bool:
"""Determine if a and b are equal numbers.
Determine if both numbers by comparing.
"""
if type(a) == int and type(b) == int:
return a == b
return False
|
def cmake_cache_option(name, boolean_value, comment=""):
"""Generate a string for a cmake configuration option"""
value = "ON" if boolean_value else "OFF"
return 'set({0} {1} CACHE BOOL "{2}")\n'.format(name, value, comment)
|
def prepare_demographics_dict_from_row(row_dict, census_mapping):
"""
Extract demographic information from a row_dict using a census_mapping.
Note that the census_mapping used here should be the same or a subset of the one
used during extraction.
"""
return {
category: {
names['human_readable_name']: float(row_dict[names['joined_column_name']] or 0.0)
for names in census_mapping[category].values()
if names['joined_column_name'] in row_dict
}
for category in census_mapping
}
|
def f_to_c(temp_f):
"""Convert F to C."""
return (temp_f - 32) * 5 / 9
|
def getUserAndArea(user):
"""Factor out the magic user hack for use in other classes"""
area = 'user'
tokens = user.split('_')
if tokens and len(tokens) > 1:
user = tokens[0]
area = tokens[1]
return user, area
|
def extend(iterable, extension):
"""Extends the iterable using another iterable.
:param iterable: collection of data to transform
:type iterable: list
:param extension: contains the additional iterable to extend the current one
:param extension: iterable
"""
iterable.extend(extension)
return iterable
|
def Babylonian(x, N=10):
"""
t=sqrt(x)
"""
t = (1 + x) / 2
for i in range(2, N + 1):
t = (t + x / t) / 2
return t
|
def timepoints_by_route_direction(rating):
"""
Return a dictionary mapping (route ID, direction name) -> list of timepoints
"""
return {
(
timepoint_pattern.route_id,
timepoint_pattern.direction_name,
): timepoint_pattern.timepoints
for timepoint_pattern in rating["ppat"]
}
|
def _get_working_shape_and_iterations(requested_shape, max_power_of_two=13):
"""Returns the necessary size for a square grid which is usable in a DS algorithm.
The Diamond Square algorithm requires a grid of size n x n where n = 2**x + 1, for any
integer value of x greater than two. To accomodate a requested map size other than these
dimensions, we simply create the next largest n x n grid which can entirely contain the
requested size, and return a subsection of it.
This method computes that size.
PARAMETERS
----------
requested_shape
A 2D list-like object reflecting the size of grid that is ultimately desired.
max_power_of_two
an integer greater than 2, reflecting the maximum size grid that the algorithm can EVER
attempt to make, even if the requested size is too big. This limits the algorithm to
sizes that are manageable, unless the user really REALLY wants to have a bigger one.
The maximum grid size will have an edge of size (2**max_power_of_two + 1)
RETURNS
-------
An integer of value n, as described above.
"""
if max_power_of_two < 3:
max_power_of_two = 3
largest_edge = max(requested_shape)
for power in range(1, max_power_of_two+1):
d = (2**power) + 1
if largest_edge <= d:
return (d, d), power
#failsafe: no values in the dimensions array were allowed, so print a warning and return
# the maximum size.
d = 2**max_power_of_two + 1
print("DiamondSquare Warning: Requested size was too large. Grid of size {0} returned""".format(d))
return (d, d), max_power_of_two
|
def suppress(action='ignore', **kwarg):
"""
Sets up the .suppress tuple properly, pass options to this method as you
would the stdlib warnings.filterwarnings()
So, to use this with a .suppress magic attribute you would do the
following:
>>> from twisted.trial import unittest, util
>>> import warnings
>>>
>>> class TestFoo(unittest.TestCase):
... def testFooBar(self):
... warnings.warn("i am deprecated", DeprecationWarning)
... testFooBar.suppress = [util.suppress(message='i am deprecated')]
...
>>>
Note that as with the todo and timeout attributes: the module level
attribute acts as a default for the class attribute which acts as a default
for the method attribute. The suppress attribute can be overridden at any
level by specifying C{.suppress = []}
"""
return ((action,), kwarg)
|
def validate_phone(phone:str) -> bool:
"""
returns if the given phone number is valid
"""
phone = phone.replace("-", "").replace("(", "").replace(")", "")
return phone.isdigit() and len(phone) == 10
|
def _r(noun):
"""Return a Nock-like repr() of the given noun.
>>> _r((42, 0, 1))
'[42 0 1]'
"""
if isinstance(noun, int):
return repr(noun)
else:
return '[%s]' % ' '.join(_r(i) for i in noun)
|
def integerBreak(n):
"""
:type n: int
:rtype: int
"""
if n<=3:
return n-1
rest=n%3
if rest==0:
return 3**(n//3)
elif rest==1:
return 3**(n//3-1)*4
else:
return 3**(n//3)*2
|
def sortable_version(version):
"""Converts '1.2.3' into '00001.00002.00003'"""
# Version must be a string to split it.
version = str(version)
return '.'.join(bit.zfill(5) for bit in version.split('.'))
|
def get_support(itemsets, itemset, probability=False):
"""
Get the support of itemset in itemsets.
Parameters
----------
itemsets : list
All available itemsets.
itemset : set
The itemset of which the support is calculated.
probability : bool
Return the probability of an itemset containing the itemset, otherwise
return the absolute number of itemsets which contain itemset.
Returns
-------
Either a probability or the total number of itemsets which contain the
itemset.
"""
containing_itemsets = 0
total_itemsets = 0
for itemset_tmp in itemsets:
if itemset.issubset(itemset_tmp):
containing_itemsets += 1
total_itemsets += 1
if probability:
return float(containing_itemsets) / float(total_itemsets)
else:
return containing_itemsets
|
def decimal_to_alpha(index):
"""Converts an int to an alphabetical index"""
from string import ascii_lowercase
alphanum = ''
index += 1 # because alphabet hase no 0 and starts with 'a'
while index:
index -= 1 # 'a' needs to be used as next 'decimal' unit when reaching 'z': ..., 'y', 'z', 'aa', 'ab', ...
reste = index % 26
index = index // 26
alphanum = ascii_lowercase[reste] + alphanum
return alphanum
|
def sec_to_time(sec):
""" Returns the formatted time H:MM:SS
"""
mins, sec = divmod(sec, 60)
hrs, mins = divmod(mins, 60)
return f"{hrs:d}:{mins:02d}:{sec:02d}"
|
def wavedec_keys(level):
"""Subband keys corresponding to a wavedec decomposition."""
approx = ''
coeffs = {}
for lev in range(level):
for k in ['a', 'd']:
coeffs[approx + k] = None
approx = 'a' * (lev + 1)
if lev < level - 1:
coeffs.pop(approx)
return list(coeffs.keys())
|
def can_run_for_president(age, is_natural_born_citizen):
"""Can someone of the given age and citizenship status run for president in the US?"""
# The US Constitution says you must be a natural born citizen *and* at least 35 years old
return is_natural_born_citizen and (age >= 35)
|
def removeUnencodedText(text):
"""
Remove non UTF-8 characters
Author: Anna Yannakopoulos (2020)
Args:
text (str): Input text
Returns:
str: Input text where non UTF-8 character removed
"""
return "".join([i if ord(i) < 128 else "" for i in text])
|
def cast_float(str):
"""a helper method for converting strings to their float value
Args:
str: a string containing a number
Returns:
the float value of the string given or None if not a float
"""
if str!="" and str!=None:
v = float(str)
else:
v = None
return v
|
def is_outerwall(line: str) -> bool:
"""Check if current line is the start of an outer wall section.
Args:
line (str): Gcode line
Returns:
bool: True if the line is the start of an outer wall section
"""
return line.startswith(";TYPE:WALL-OUTER")
|
def msecs_to_hours_mins_secs(msecs):
""" Convert milliseconds to hours, minutes and seconds.
msecs is an integer. Hours, minutes and seconds output is a string."""
secs = int(msecs / 1000)
mins = int(secs / 60)
remainder_secs = str(secs - mins * 60)
if len(remainder_secs) == 1:
remainder_secs = "0" + remainder_secs
hours = int(mins / 60)
remainder_mins = str(mins - hours * 60)
if len(remainder_mins) == 1:
remainder_mins = "0" + remainder_mins
hours = str(hours)
if len(hours) == 1:
hours = "0" + hours
res = hours + "." + remainder_mins + "." + remainder_secs
return res
|
def parse(input_dict,nested_lists=None):
"""recursively parse a dictionary with nested lists into a flat dictionary"""
out={}
if dict not in [type(x) for x in input_dict.values()]:
return input_dict
else:
for k,v in input_dict.items():
if type(v) in [str,int,float,bool]:
out[k]=input_dict[k]
elif type(v)==dict:
nested_dict = v
# if nested dict has an 'id', use it as a foreign key
# exepct in the cause of the 'game' table which has a pk field
if 'id' in nested_dict.keys() and 'pk' not in nested_dict.keys():
out[f"{k}_id"]=nested_dict['id']
else:
for key,value in nested_dict.items():
out[f"{k}_{key}"]=value
elif type(v)==list:
if nested_lists != None:
nested_lists.append({k:v})
return parse(out,nested_lists)
|
def check_order_of_symbols_and_freqs_list(symbols_and_freqs_list):
"""Checks whether provided symbols_freq_list is in decreasing order or not."""
assume_max_freq = symbols_and_freqs_list[0][1]
for elem in symbols_and_freqs_list:
if elem[1] > assume_max_freq:
raise ValueError('Provided list is not in decreasing order!')
return True
|
def get(data,key):
"""
Returns the value corresponding to a key, if it exists.
Otherwise it returns an empty string.
"""
try:
value = data[key]
except KeyError:
value = ''
return value
|
def size_of_varint_py(value):
""" Number of bytes needed to encode an integer in variable-length format.
"""
value = (value << 1) ^ (value >> 63)
if value <= 0x7f:
return 1
if value <= 0x3fff:
return 2
if value <= 0x1fffff:
return 3
if value <= 0xfffffff:
return 4
if value <= 0x7ffffffff:
return 5
if value <= 0x3ffffffffff:
return 6
if value <= 0x1ffffffffffff:
return 7
if value <= 0xffffffffffffff:
return 8
if value <= 0x7fffffffffffffff:
return 9
return 10
|
def is_prime_sieve(n):
"""
create a 0-indexed list where sieve[i] is true if i is prime, through n
(size of list returned is n + 1)
"""
# True if prime
is_prime = [True] * (n + 1)
# 1 is definitionally False
is_prime[0:2] = [False, False]
for i in range(2, n):
for add in range(2 * i, n + 1, i):
# mark multiples of i as not prime, excluding i itself
is_prime[add] = False
return is_prime
|
def ini_elec_levels(spc_dct_i, spc_info):
""" get initial elec levels
"""
if 'elec_levels' in spc_dct_i:
elec_levels = spc_dct_i['elec_levels']
else:
elec_levels = [[0., spc_info[2]]]
return elec_levels
|
def gcd(num1, num2):
"""Find greater common divider.
Find greater common divider
for two integer numbers
Args:
num1 (int): number one
num2 (int): number two
Returns:
int: Greater Common Divider
"""
while num1 != num2:
if num1 > num2:
num1 -= num2
else:
num2 -= num1
return (num1)
|
def get_coverage_archive_name(benchmark):
"""Gets the archive name for |benchmark|."""
return 'coverage-build-%s.tar.gz' % benchmark
|
def make_splice_string(nodename, context):
"""Generates splice string from a list of context.
E.g. make_splice_string("renorm4", [-4, 4])
returns "Append(Offset(renorm4, -4), Offset(renorm4, 4))"
"""
assert type(context) == list, "context argument must be a list"
string = ["Offset({0}, {1})".format(nodename, i) for i in context]
string = "Append(" + ", ".join(string) + ")"
return string
|
def set0 (strlist,pos):
""" removes elements of a list from pos to the end, save these as a separate list """
hold = []
while len(strlist) > pos:
hold.append(strlist.pop(len(strlist)-1))
hold.reverse()
return strlist,hold
|
def lags(fs):
"""Extract lags from a collection of features."""
return type(fs)(f.lag for f in fs)
|
def getLocation( min_point: tuple, max_point: tuple ) -> tuple:
"""Gets the bottom center location from the bounding box of an occupant."""
# Unpack the tuples into min/max values
xmin, ymin = min_point
xmax, ymax = max_point
# Take midpoint of x-coordinate and ymax for bottom middle of box
x_result = ( xmin + xmax ) // 2
y_result = ymax
# Return location
return ( x_result, y_result )
|
def select_matching_signature(funcs, *args, **kwargs):
"""
Select and call the function that accepts ``*args, **kwargs``.
*funcs* is a list of functions which should not raise any exception (other
than `TypeError` if the arguments passed do not match their signature).
`select_matching_signature` tries to call each of the functions in *funcs*
with ``*args, **kwargs`` (in the order in which they are given). Calls
that fail with a `TypeError` are silently skipped. As soon as a call
succeeds, `select_matching_signature` returns its return value. If no
function accepts ``*args, **kwargs``, then the `TypeError` raised by the
last failing call is re-raised.
Callers should normally make sure that any ``*args, **kwargs`` can only
bind a single *func* (to avoid any ambiguity), although this is not checked
by `select_matching_signature`.
Notes
-----
`select_matching_signature` is intended to help implementing
signature-overloaded functions. In general, such functions should be
avoided, except for back-compatibility concerns. A typical use pattern is
::
def my_func(*args, **kwargs):
params = select_matching_signature(
[lambda old1, old2: locals(), lambda new: locals()],
*args, **kwargs)
if "old1" in params:
warn_deprecated(...)
old1, old2 = params.values() # note that locals() is ordered.
else:
new, = params.values()
# do things with params
which allows *my_func* to be called either with two parameters (*old1* and
*old2*) or a single one (*new*). Note that the new signature is given
last, so that callers get a `TypeError` corresponding to the new signature
if the arguments they passed in do not match any signature.
"""
# Rather than relying on locals() ordering, one could have just used func's
# signature (``bound = inspect.signature(func).bind(*args, **kwargs);
# bound.apply_defaults(); return bound``) but that is significantly slower.
for i, func in enumerate(funcs):
try:
return func(*args, **kwargs)
except TypeError:
if i == len(funcs) - 1:
raise
|
def avg(window, *args, **kwargs):
"""
Returns window average
:param window:
:return:
"""
return float(sum(w[1] for w in window)) / len(window)
|
def remove_dup(a):
""" remove duplicates using extra array """
res = []
count = 0
for i in range(0, len(a)-1):
if a[i] != a[i+1]:
res.append(a[i])
count = count + 1
res.append(a[len(a)-1])
print('Total count of unique elements: {}'.format(count + 1))
return res
|
def remainder(x, y):
"""Difference between x and the closest integer multiple of y.
Return x - n*y where n*y is the closest integer multiple of y.
In the case where x is exactly halfway between two multiples of
y, the nearest even value of n is used. The result is always exact."""
from math import copysign, fabs, fmod, isfinite, isinf, isnan, nan
x = float(x)
y = float(y)
# Deal with most common case first.
if isfinite(x) and isfinite(y):
if y == 0.0:
# return nan
# Merging the logic from math_2 in CPython's mathmodule.c
# nan returned and x and y both not nan -> domain error
raise ValueError("math domain error")
absx = fabs(x)
absy = fabs(y)
m = fmod(absx, absy)
# Warning: some subtlety here. What we *want* to know at this point is
# whether the remainder m is less than, equal to, or greater than half
# of absy. However, we can't do that comparison directly because we
# can't be sure that 0.5*absy is representable (the mutiplication
# might incur precision loss due to underflow). So instead we compare
# m with the complement c = absy - m: m < 0.5*absy if and only if m <
# c, and so on. The catch is that absy - m might also not be
# representable, but it turns out that it doesn't matter:
# - if m > 0.5*absy then absy - m is exactly representable, by
# Sterbenz's lemma, so m > c
# - if m == 0.5*absy then again absy - m is exactly representable
# and m == c
# - if m < 0.5*absy then either (i) 0.5*absy is exactly representable,
# in which case 0.5*absy < absy - m, so 0.5*absy <= c and hence m <
# c, or (ii) absy is tiny, either subnormal or in the lowest normal
# binade. Then absy - m is exactly representable and again m < c.
c = absy - m
if m < c:
r = m
elif m > c:
r = -c
else:
# Here absx is exactly halfway between two multiples of absy,
# and we need to choose the even multiple. x now has the form
# absx = n * absy + m
# for some integer n (recalling that m = 0.5*absy at this point).
# If n is even we want to return m; if n is odd, we need to
# return -m.
# So
# 0.5 * (absx - m) = (n/2) * absy
# and now reducing modulo absy gives us:
# | m, if n is odd
# fmod(0.5 * (absx - m), absy) = |
# | 0, if n is even
# Now m - 2.0 * fmod(...) gives the desired result: m
# if n is even, -m if m is odd.
# Note that all steps in fmod(0.5 * (absx - m), absy)
# will be computed exactly, with no rounding error
# introduced.
assert m == c
r = m - 2.0 * fmod(0.5 * (absx - m), absy)
return copysign(1.0, x) * r
# Special values.
if isnan(x):
return x
if isnan(y):
return y
if isinf(x):
# return nan
# Merging the logic from math_2 in CPython's mathmodule.c
# nan returned and x and y both not nan -> domain error
raise ValueError("math domain error")
assert isinf(y)
return x
|
def split_arguments(args):
"""
Split specified arguments to two list.
This is used to distinguish the options of the program and
execution command/arguments.
Parameters
----------
args : list
Command line arguments
Returns
-------
list : options, arguments
options indicate the optional arguments for the program and
arguments indicate the execution command/arguments
"""
prev = False
for i, value in enumerate(args[1:]):
if value.startswith('-'):
prev = True
elif prev:
prev = False
else:
return args[:i+1], args[i+1:]
return args, []
|
def _parse_option(option_name, option_value):
"""Parse an option value and return the parsed value.
Args:
option_name: Name of the option.
option_value: Value of the option.
Returns:
The parsed option value
"""
if option_name in ['test_package_names', 'permitted_emails']:
option_value = (option_value or '').strip(', ')
names = [name.strip() for name in option_value.split(',')]
return [name for name in names if name]
else:
return option_value
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.