content
stringlengths 42
6.51k
|
---|
def text_cleaner(string):
"""
This function eliminates \n characters from a string
:param string: str
:return: str
"""
return string.replace('\n', '')
|
def time_to_decimal(time):
""" Takes HH:MM and converts to decimal (24 hour format).
Times after 12:00 and before 8:00 are assumed to be PM.
>>> time_to_decimal('8:30')
8.5
>>> time_to_decimal('1:30')
13.5
"""
time = time.split(":")
new_time = int(time[0])
# Times after 12:00 but before 8:00 are PM
if new_time < 8:
new_time += 12
new_time += int(time[1]) / 60
return new_time
|
def is_cyclic(graph):
"""
Check if graph has cycles.
Parameters
----------
graph : dict
must be represented as a dictionary mapping vertices to
iterables of neighbouring vertices.
Returns
-------
bool
Return ``True`` if the directed graph has a cycle.
Examples
--------
>>> is_cyclic({1: (2,), 2: (3,), 3: (1,)})
True
>>> is_cyclic({1: (2,), 2: (3,), 3: (4,)})
False
"""
path = set()
visited = set()
def visit(vertex):
if vertex in visited:
return False
visited.add(vertex)
path.add(vertex)
for neighbour in graph.get(vertex, ()):
if neighbour in path or visit(neighbour):
return True
path.remove(vertex)
return False
return any(visit(vertex) for vertex in graph)
|
def serialize(dictionary):
"""
Turn dictionary into argument like string.
"""
data = []
for key, value in dictionary.items():
data.append('{0}="{1}"'.format(key, value))
return ', '.join(data)
|
def nextpow2(i):
"""
Find the next power of 2 for number i
"""
n = 1
while n < i:
n *= 2
return n
|
def string_to_hexescaped(s):
"""
Takes a string and will convert each char to a printed hex escaped string, and join them together
@param s: input string
@retrun printable hex escaped string
"""
return ''.join('\\x%02x' % ord(c) for c in s)
|
def compute_f1_span(g_index_set, p_index_set):
"""Compute F1 score for a given pair of token list"""
correct = float(len(g_index_set.intersection(p_index_set)))
if correct == 0.0:
return 0.0
precision = correct / len(p_index_set)
recall = correct / len(g_index_set)
return 2 * (precision * recall) / (precision + recall)
|
def tap(x, f, *fs):
"""
Example usage:
tap(x, lambda x: ...)
tap(x, f, g, h)
tap(f=lambda x: ..., x=x)
"""
for f in [f, *fs]:
f(x)
return x
|
def _pre_validate_int(value, name):
"""
Converts the given `value` to `int`.
Parameters
----------
value : `Any`
The value to convert.
name : `str`
The name of the value.
Returns
-------
value : `int`
Raises
------
TypeError
If `value` was not given as `int` instance.
"""
if type(value) is int:
pass
elif isinstance(value, int):
value = int(value)
else:
raise TypeError(f'`{name}` can be `int` instance, got {value.__class__.__name__}.')
return value
|
def make_entries_by_month(entries):
"""Sort entries by month.
This is a bit convoluted (could use explicit objects) but we want
to support at some point a JSON API easily, where having a raw
dict will be easier.
"""
r = dict()
current_date = None
total = 0
count = 0
for entry in entries:
current_date = entry.time.strftime("%Y-%m-01")
if not current_date in r:
r[current_date] = {
'month_total': 0,
'entries': []
}
count += 1
total += entry.amount
r[current_date]['month_total'] += entry.amount
r[current_date]['entries'].append({
'current_total': total,
'current_count': count,
'current_month_total': r[current_date]['month_total'],
'block': entry.block,
'time': entry.time.strftime('%Y-%m-%d %H:%M:%S'),
'amount': entry.amount,
})
return count, total, r
|
def get_missing(index, cmp_index):
"""Get the set of entries that are not in the index and the index that are not in the index .
Args:
index (list): Lines on the ffindex
cmp_index (list): Lines in the other ffindex
Returns:
list: List of missing
"""
missing = set()
index_set = set()
for entry in index:
index_set.add(entry[0])
cmp_index_set = set()
for entry in cmp_index:
cmp_index_set.add(entry[0])
for key in cmp_index_set:
if key not in index_set:
missing.add(key)
return missing
|
def construct_non_none_tuple(a, b):
"""Returns a tuple of two non-none values, or None if either arguments are None"""
if a is not None and b is not None:
return a, b
else:
return None
|
def compress_indices(indices, size):
"""Return compressed indices.
The function is used for compressing COO row indices to CSR
compressed representation. The input indices must be sorted.
"""
nse = len(indices)
compressed = [0] * (size + 1)
k = 1
last_index = 0
for i in range(nse):
index = indices[i]
for n in range(last_index, index):
compressed[k] = i
k += 1
last_index = index
for n in range(k, size + 1):
compressed[n] = nse
return compressed
|
def to_label(name):
"""
Convert snake_case name to Title case label
"""
return " ".join(x.title() for x in name.split("_"))
|
def _synapse_error_msg(ex):
"""
Format a human readable error message
"""
if isinstance(ex, str):
return ex
return '\n' + ex.__class__.__name__ + ': ' + str(ex) + '\n\n'
|
def sf_mag(a: tuple) -> int:
"""
Calculates the magnitude of a snailfish number
:param a: a snailfish number
:return: the magnitude as int
>>> sf_mag((9, 1))
29
>>> sf_mag((1, 9))
21
>>> sf_mag(((9, 1),(1, 9)))
129
>>> sf_mag(((1,2),((3,4),5)))
143
>>> sf_mag(((((0,7),4),((7,8),(6,0))),(8,1)))
1384
>>> sf_mag(((((1,1),(2,2)),(3,3)),(4,4)))
445
>>> sf_mag(((((3,0),(5,3)),(4,4)),(5,5)))
791
>>> sf_mag(((((5,0),(7,4)),(5,5)),(6,6)))
1137
>>> sf_mag(((((8,7),(7,7)),((8,6),(7,7))),(((0,7),(6,6)),(8,7))))
3488
>>> sf_mag(((((6,6),(7,6)),((7,7),(7,0))),(((7,7),(7,7)),((7,8),(9,9)))))
4140
"""
if type(a[0]) is int:
left = a[0]
else:
left = sf_mag(a[0])
if type(a[1]) is int:
right = a[1]
else:
right = sf_mag(a[1])
return left * 3 + right * 2
|
def is_unit_str(ustr):
"""Check if a string defines a unit"""
ustr = ustr.strip()
if(len(ustr)>=2 and ustr[0]=="[" and ustr[-1]=="]"):
return True
else:
return False
|
def areSimilar(a, b):
"""
>>> areSimilar([1, 2, 3], [3, 2, 1])
True
"""
swap_candidates = []
for index, (el_a, el_b) in enumerate(zip(a, b)):
if el_a != el_b:
swap_candidates.append(index)
if len(swap_candidates) == 0:
return True
elif len(swap_candidates) != 2:
return False
s1 = swap_candidates[0]
s2 = swap_candidates[1]
a_tmp = a[:s1] + [a[s2]] + a[(s1 + 1) : s2] + [a[s1]] + a[(s2 + 1) :]
return a_tmp == b
|
def clues_pay(text):
""" Check for messages about payments """
text = text.lower()
for clue in ('credits', 'paym', 'expired', 'exceeded'):
if clue in text:
return True
return False
|
def memoFib(n, memo = {}):
""" Fibonacci numbers solved with memoization using dictionary"""
if n == 0 or n == 1:
return 1
try:
return memo[n]
except KeyError:
result = memoFib(n-1, memo) + memoFib(n-2, memo)
memo[n] = result
return result
|
def swap(pair):
"""Swap the items in a pair. Return tuple.
>>> swap((1, 2))
(2, 1)
"""
x, y = pair
return y, x
|
def winCheck(choice1: int, choice2: int) -> bool:
"""Returns the result of the round
- choice1: int {choice of the first player}
- choice2: int {choice of the second player}
"""
return True if (
choice1 == 0 and choice2 == 2
) or (
choice1 == 1 and choice2 == 0
) or (
choice1 == 2 and choice2 == 1
) else False
|
def fix_w2l_text(output):
"""W2L text has some predictabile peculiarities, this function strips those out
Args:
output (string): Raw output of W2L inference
Returns:
str: clean tet of what was said
"""
text = output.decode("utf-8")
if len(text) > 1:
text = text.split(",")[2][:-2]
else:
return
# Remove some bad outputs
if len(text) > 0:
if text[0] == "h" and len(text) == 1:
text = ""
if len(text) > 1:
if text[:2] == "h " or text == " transcriptio":
text = ""
return text
|
def pop(tupleo, *index):
"""
pop(...) method of tupleo.tuple instance
T.pop(tupleo, index) -> item -- remove and return item at index (default last) from tuple, tupleo.
Raises IndexError if list is empty or index is out of range
"""
if type(tupleo) != tuple:
raise TypeError("{} is not tuple".format(tupleo))
convertlist = list(tupleo)
if index:
convertlist.pop(*index)
else:
convertlist.pop(len(convertlist)-1)
return tuple(convertlist)
|
def is_number(s):
"""
Check if string is a number
Parameters
----------
s : str
string to test
Returns
-------
True if numeric, False otherwise
"""
try:
float(s)
return True
except ValueError:
return False
|
def equation_quart(x, a, b, c, d, e):
"""Equation form for quart """
return a + b*x + c*x*x + d*x*x*x + e*x*x*x*x
|
def get_naip_tnm_url(xmin, ymin, xmax, ymax,
bbox_epsg=3857,
img_epsg=3857,
img_format='jpgpng',
width=500, height=500):
"""
Returns the URL for making a request to get an image from The National Map's
NAIP REST service.
Parameters
----------
minx, miny, maxx, maxy : numeric
the coordinates of the bounding box of the image to return, assumed to be
in coordinate reference system EPSG:3857
img_format : string
image format to request, valid options are 'jpg', 'jpgpng', 'png', 'png8',
'png24', 'png32', 'bmp', 'gif', 'tiff'.
Returns
-------
url : string
the url that will return the NAIP image
"""
url = ''.join(['https://services.nationalmap.gov/arcgis/rest/services/',
'USGSNAIPImagery/ImageServer/exportImage?',
'bbox={}%2C{}%2C{}%2C{}&'.format(xmin, ymin, xmax, ymax),
'bboxSR={}&'.format(bbox_epsg),
'size={}%2C{}&'.format(width, height),
'imageSR={}&'.format(img_epsg),
'time=&',
'format={}&'.format(img_format),
'pixelType=U8&',
'noData=&',
'noDataInterpretation=esriNoDataMatchAny&',
'interpolation=+RSP_BilinearInterpolation&'
'compression=&',
'compressionQuality=&',
'bandIds=&',
'mosaicRule=&',
'renderingRule=&',
'f=image'])
return url
|
def estimate_subsidies(region, available_for_cross_subsidy):
"""
Estimates either the contribution to cross-subsidies, or the
quantity of subsidy required.
Parameters
----------
region : Dict
Contains all variable for a single region.
available_for_cross_subsidy : int
The amount of capital available for cross-subsidization.
Returns
-------
region : Dict
Contains all variable for a single region.
available_for_cross_subsidy : int
The amount of capital available for cross-subsidization.
"""
if region['deficit'] > 0:
if available_for_cross_subsidy >= region['deficit']:
region['used_cross_subsidy'] = region['deficit']
available_for_cross_subsidy -= region['deficit']
elif 0 < available_for_cross_subsidy < region['deficit']:
region['used_cross_subsidy'] = available_for_cross_subsidy
available_for_cross_subsidy = 0
else:
region['used_cross_subsidy'] = 0
else:
region['used_cross_subsidy'] = 0
required_state_subsidy = (region['total_cost'] -
(region['total_revenue'] + region['used_cross_subsidy']))
if required_state_subsidy > 0:
region['required_state_subsidy'] = required_state_subsidy
else:
region['required_state_subsidy'] = 0
return region, available_for_cross_subsidy
|
def remove_blank_lines(lines):
"""Returns a new list with all blank lines removed."""
return [line for line in lines if line.strip()]
|
def _replace_code(code, replacements):
"""
Replaces code with new code.
:param str code: code to replace
:param list replacements: list of tuples containing (start, end, replacement)
"""
new_code = ''
index = 0
for start, end, code_string in sorted(replacements):
new_code += code[index:start] + code_string
index = end
new_code += code[index:]
return new_code
|
def norm_rsplit(text, n):
""" Function to make the text to lower case and split it"""
return text.lower().rsplit(" ", n)[-n:]
|
def _default_axis_names(n_dims):
"""Name of each axis.
Parameters
----------
n_dims : int
Number of spatial dimensions.
Returns
-------
tuple of str
Name of each axis.
Examples
--------
>>> from landlab.grid.base import _default_axis_names
>>> _default_axis_names(1)
('x',)
>>> _default_axis_names(2)
('x', 'y')
>>> _default_axis_names(3)
('x', 'y', 'z')
"""
_DEFAULT_NAMES = ("x", "y", "z")
return _DEFAULT_NAMES[:n_dims]
|
def coco_as_image_size(dataset, label):
"""Convert a COCO detection label to the image size.
Args:
label (list of dict): an image label in the VOC detection format.
Returns:
tuple: width, height of image.
"""
if not label:
return None
image_id = label[0]['image_id']
image = dataset.coco.loadImgs(image_id)[0]
return image['width'], image['height']
|
def color(string, color=None):
"""
Change text color for the Linux terminal.
Note: this is duplicate code copied from helpers.py because it cannot be imported into this file due to a circular
reference. There are plans to refactor these circular references out, but this is the near term solution.
"""
attr = []
# bold
attr.append("1")
if color:
if color.lower() == "red":
attr.append("31")
elif color.lower() == "green":
attr.append("32")
elif color.lower() == "yellow":
attr.append("33")
elif color.lower() == "blue":
attr.append("34")
return "\x1b[%sm%s\x1b[0m" % (";".join(attr), string)
else:
if string.strip().startswith("[!]"):
attr.append("31")
return "\x1b[%sm%s\x1b[0m" % (";".join(attr), string)
elif string.strip().startswith("[+]"):
attr.append("32")
return "\x1b[%sm%s\x1b[0m" % (";".join(attr), string)
elif string.strip().startswith("[*]"):
attr.append("34")
return "\x1b[%sm%s\x1b[0m" % (";".join(attr), string)
elif string.strip().startswith("[>]"):
attr.append("33")
return "\x1b[%sm%s\x1b[0m" % (";".join(attr), string)
else:
return string
|
def _trim_separator(mac: str) -> str:
"""removes separator from MAC address"""
return mac.translate(str.maketrans("", "", ":-."))
|
def possibilities_list(sum_, dice_amount) -> list:
"""
Returns the list of all the possible combinations for a given sum and
number of dice using lru_cache.
Keeping the maxsize to a limit for the lru_cache is better usage.
"""
poss = []
if dice_amount == 2:
for d1 in range(1, 7):
for d2 in range(1, 7):
if d1 + d2 == sum_:
poss.append([d1, d2])
else:
for dn in range(1, 7):
if sum_ - dn < 2:
continue
for n in possibilities_list(sum_ - dn, dice_amount - 1):
poss.append([dn] + n)
return poss
|
def validate_update_product(product, data):
""" this funtion validates the updated product data """
# Check for empty product_name
if data['product_name'] == '':
data['product_name'] = product['product_name']
# Check for empty product_category
if data['product_category'] == '':
data['product_category'] = product['product_category']
# check for a valid product_name
if data['product_name'].strip(' ').isdigit():
return {'warning': 'Enter a non digit product_name'}, 400
if not data["product_name"].strip():
return {"warning": "Enter a valid product_name"}, 400
# check for valid product_category
if data['product_category'].strip(' ').isdigit():
return {'warning': 'Enter non digit product_category'}, 400
if not data["product_category"].strip():
return {"warning": "Enter valid product_category"}, 400
# Check for large/long inputs
if len(data['product_name']) > 50:
return {'warning': 'product_name is too long'}, 400
|
def get_clean_paragraph(count):
"""
Creates a clean paragraph dict
Returns
--------
dict
A dict with title, content en the count
"""
return {
"title": "",
"content": "",
"count": count
}
|
def _matching_not_matching(on, **kwargs):
"""
Change the text for matching/not matching
"""
text = "matching" if not on else "not matching"
classname = "colour-off" if not on else "colour-on"
return text, classname
|
def _format_call_number(value: str) -> str:
""" Call numbers have a space between sub-fields h and i. """
results = ""
if "^^^" in value:
results = value.split("^^^")[0] + " " + value.split("^^^")[1]
else:
results = value
return results
|
def computeFraction(feature_1, feature_2):
"""
Parameters:
Two numeric feature vectors for which we want to compute a ratio
between
Output:
Return fraction or ratio of feature_1 divided by feature_2
"""
fraction = 0.
if feature_1 == "NaN":
fraction = 0.0
elif feature_2 == "NaN":
fraction = 0.0
else:
fraction = int(feature_1) / float(feature_2)
return fraction
|
def fill_root(tree):
"""Add a root node to a tree if absent.
Parameters
----------
tree : dict
Taxonomy tree.
Returns
-------
str
Root node identifier
Notes
-----
A root is defined as having parent as itself, a behavior derived from the
NCBI convention. Only root must be present in a tree, so that all taxa can
be traced back to the same root.
In custom trees, there may or may not be a clearly defined root. This
function aims as defining a root for any given tree. Specifically, if
there is one root node (highest hierarchy), this node will be "sealed"
(making parent as itself). If there are multiple crown nodes ("crown"
describes the root of a clade instead of the entire tree), a new root
node will be generated and serve as the parent of all crown nodes.
"""
crown, toadd, tested = [], set(), set()
for taxon in tree:
this = taxon
while True:
if this in tested:
break
tested.add(this)
# parent is missing
try:
parent = tree[this]
except KeyError:
crown.append(this)
toadd.add(this)
break
# parent is None
if parent is None:
crown.append(this)
break
# parent is itself
if parent == this:
crown.append(this)
break
this = parent
# fill non-existent root or crown nodes
for node in toadd:
tree[node] = None
# this happens only when tree is empty
if len(crown) == 0:
return None
# there is only one crown node
elif len(crown) == 1:
# make the parent of root iself
root = crown[0]
tree[root] = root
return root
# there are more than one crown node
else:
# in NCBI convention, root should have identifier "1"
i = 1
# in case "1" is already in tree, find an unused integer
while True:
if str(i) not in tree:
break
i += 1
root = str(i)
tree[root] = root
# coalesce all crown nodes to root
for x in crown:
tree[x] = root
return root
|
def do_intersect(bb1, bb2):
"""
Helper function that returns True if two bounding boxes overlap.
"""
if bb1[0] + bb1[2] < bb2[0] or bb2[0] + bb2[2] < bb1[0]:
return False
if bb1[1] + bb1[3] < bb2[1] or bb2[1] + bb2[3] < bb1[1]:
return False
return True
|
def to_ge_level(level):
"""Convert the given Home Assistant light level (0-255) to GE (0-100)."""
return int(round((level * 100) / 255))
|
def to_int(varlist, dryrun=False):
"""
varlist: List of context parameters
out:
Updates the context after the type conversion
"""
if dryrun:
return {v: None for v in varlist}
return {v: int(v) for v in varlist}
|
def bubble_sort(arr):
"""
Passes over a list comparing two elements and repeats with a smaller,
sliced off end of the list each iteration until sorted.
"""
for i in range(len(arr)):
for j in range(len(arr) - i - 1):
# swap if left element is greater than right element
if (arr[j] > arr[j+1]):
tmp = arr[j]
arr[j] = arr[j+1]
arr[j+1] = tmp
return arr
|
def encryptionColor(enc): #OLDNAME Return_Enc_type
"""
Take in the encryption used by the AP and return the proper color scheme based on that value.
Returns a list containing the AP fill color and AP font color
"""
fontColor = "black" #Default Font Color to be used
if enc == "OPN":
color = "firebrick2"
elif enc == "WEP":
color = "gold2"
elif enc in ["WPA","WPA2WPA","WPA2","WPAOPN"]:
color = "green3"
else: #No AP should ever get to this point as they will either be encrypted or open
color = "black"
fontColor = "white"
APcolorList = (color,fontColor) #OLDNAME colorLS
return APcolorList
|
def part_1(passwords):
"""
Absolute cake walk: just do a linear search on each password to count # of occurences.
"""
valid = 0
for (min_occur, max_occur, key_letter, password) in passwords:
if min_occur <= password.count(key_letter) <= max_occur:
valid += 1
return valid
|
def is_not_object(x):
"""Helper function for selecting only object cols"""
return str(x[1]) != 'object'
|
def fizzbuzz(num):
"""
A pretty vanilla actual fizzbuzz solution to build our raw dataset.
"""
if num % 3 == 0:
if num % 5 == 0:
return ('Fizz', 'Buzz')
else:
return('Fizz', '')
if num % 5 == 0:
return ('', 'Buzz')
return ('', '')
|
def to_ascii(code_array):
"""String corresponding to an array of ASCII codes"""
return "".join([chr(code) for code in code_array])
|
def get_languages(s):
"""
A function to obtain language settings via Accept-Language header.
"""
langs = [''.join(x.split(';')[:1]) for x in s]
return langs
|
def hash_patient_id(id):
"""Hash a patient id (created by synthea) to a smaller integer"""
return abs(hash(id) % 1000000)
|
def hasPrecedence(operator1, operator2):
"""Checks if operator2 has precedence over operator1
Args:
operator1 (str): first operator
operator2 (str): second operator
Returns:
(bool): true if operator2 has precedence over operator1 else false
"""
if operator2 == '(' or operator2 == ')':
return False
elif (operator1 == '*' or operator1 == '/') and (operator2 == '+' or operator2 == '-'):
return False
else:
return True
|
def chunk_tasks(n_tasks, n_batches, arr=None, args=None, start_idx=0):
"""Split the tasks into some number of batches to sent out to MPI workers.
Parameters
----------
n_tasks : int
The total number of tasks to divide.
n_batches : int
The number of batches to split the tasks into. Often, you may want to do
``n_batches=pool.size`` for equal sharing amongst MPI workers.
arr : iterable (optional)
Instead of returning indices that specify the batches, you can also
directly split an array into batches.
args : iterable (optional)
Other arguments to add to each task.
start_idx : int (optional)
What index in the tasks to start from?
"""
if args is None:
args = []
args = list(args)
tasks = []
if n_batches > 0 and n_tasks > n_batches:
# chunk by the number of batches, often the pool size
base_chunk_size = n_tasks // n_batches
rmdr = n_tasks % n_batches
i1 = start_idx
for i in range(n_batches):
i2 = i1 + base_chunk_size
if i < rmdr:
i2 += 1
if arr is None: # store indices
tasks.append([(i1, i2), i1] + args)
else: # store sliced array
tasks.append([arr[i1:i2], i1] + args)
i1 = i2
else:
if arr is None: # store indices
tasks.append([(start_idx, n_tasks+start_idx), start_idx] + args)
else: # store sliced array
tasks.append([arr[start_idx:n_tasks+start_idx], start_idx] + args)
return tasks
|
def dups(list1, list2, sort=True):
"""Returns a list containing items in both lists"""
dup_list = []
for item in list1:
if item in list2:
dup_list.append(item)
if sort:
dup_list.sort()
return dup_list
|
def imes_nav_decode(dwrds: list) -> dict:
"""
Helper function to decode RXM-SFRBX dwrds for IMES navigation data.
:param list dwrds: array of navigation data dwrds
:return: dict of navdata attributes
:rtype: dict
"""
return {"dwrds": dwrds}
|
def to_bool(value):
"""
Helper function for translating strings into booleans
@see test/TestReadConfig.py
"""
valid = {
'true': True, 't': True, '1': True, 'y' : True,
'false': False, 'f': False, '0': False, 'n' : False
}
if not isinstance(value, str):
raise ValueError('Cannot check boolean value. Not a string.')
lower_value = value.lower()
if lower_value in valid:
return valid[lower_value]
else:
raise ValueError('Not a boolean string: "%s"' % value)
|
def fromPoint3d( pt ):
"""Converts a Point3d to a tuple"""
if pt is None: return None
return (pt.x, pt.y, pt.z)
|
def _sortnodesellipsis(store, nodes, cl, lookup):
"""Sort nodes for changegroup generation."""
# Ellipses serving mode.
#
# In a perfect world, we'd generate better ellipsis-ified graphs
# for non-changelog revlogs. In practice, we haven't started doing
# that yet, so the resulting DAGs for the manifestlog and filelogs
# are actually full of bogus parentage on all the ellipsis
# nodes. This has the side effect that, while the contents are
# correct, the individual DAGs might be completely out of whack in
# a case like 882681bc3166 and its ancestors (back about 10
# revisions or so) in the main hg repo.
#
# The one invariant we *know* holds is that the new (potentially
# bogus) DAG shape will be valid if we order the nodes in the
# order that they're introduced in dramatis personae by the
# changelog, so what we do is we sort the non-changelog histories
# by the order in which they are used by the changelog.
key = lambda n: cl.rev(lookup(n))
return sorted(nodes, key=key)
|
def clean_string(string: str) -> str:
"""
input -> output
"updatedAt" -> "updated_at"
"UpdatedAt" -> "updated_at"
"base URL" -> "base_url"
"UPdatedAt" -> "u_pdated_at"
"updated_at" -> "updated_at"
" updated_at " -> "updated_at"
"updatedat" -> "updatedat"
"""
fix = {
"api method name": "Api Method Name",
"modifying user": "Modifying User",
"request id": "Request Id",
}
string = fix.get(string, string)
abbreviations = ("URL", "GUID", "IP", "ID", "IDs", "API", "SFDC", "CRM", "SLA")
if any(map(lambda w: w in string.split(), abbreviations)):
return string.lower().replace(" ", "_")
return "".join("_" + c.lower() if c.isupper() else c for c in string if c != " ").strip("_")
|
def genomic_del3_rel_38(genomic_del3_dup3_loc):
"""Create test fixture relative copy number variation"""
return {
"type": "RelativeCopyNumber",
"_id": "ga4gh:VRC.14ULmT3TSo6-du1BQehVkX14OH_92j4F",
"subject": genomic_del3_dup3_loc,
"relative_copy_class": "complete loss"
}
|
def merge_two_dicts(x, y):
"""
Given two dicts, merge them into a new dict as a shallow copy.
"""
z = x.copy()
z.update(y)
return z
|
def dd2dms(degrees):
"""convert degrees to degrees, minutes, seconds"""
from math import trunc
d = trunc(degrees)
m = trunc((degrees-d)*60.)
s = ((degrees-d)*60.-m)*60.
return (d,m,s)
|
def sum_of_divisors_v1(n):
"""Naive version: Find the sum of all divisors of a given integer."""
sum_div = 0
for i in range(1, n+1):
# check if n is divisible by current number from 1 to n inclusive
if n % i == 0:
sum_div += i
return sum_div
|
def parse_creator_string(creator):
"""
Creates a string from the creator information in the SPDX document
:param creators: Array of creationinfo objects
:return: string equivalent
"""
print('Begin Parsing Creator')
creators = []
creator_list = creator.replace(" ", "").split(',')
for c in creator_list:
if c[0] != '[':
creators.append(c)
else:
c = c[7:]
c = c[:-1]
creators.append(c)
print('Completed Parsing Creator')
return creators
|
def serialize_nlcd_landcover_v2(analysis, type):
"""Convert the output of the biomass_loss analysis to json"""
return {
'id': None,
'type': type,
'attributes': analysis
}
|
def triangular_number(n):
"""
Compute the nth triangular number
"""
return n * (n + 1) // 2
|
def find_file_end(chunks, pos):
"""Find a chunk after the one specified which is not a file block."""
pos = pos + 1
while pos < len(chunks)-1:
if chunks[pos][0] != 0x100 and chunks[pos][0] != 0x102:
# This is not a block
return pos
else:
pos = pos + 1
return pos
|
def get_id(ip='unknown', id='unknown'):
""" returns a properly formatted id """
return("{}-{}".format(ip, id))
|
def _get_first_polygon(results, query):
"""
Choose first result with geometry type multi/polygon from list of results.
Parameters
----------
results : list
list of results from downloader._osm_polygon_download
query : str
the query string or structured dict that was geocoded
Returns
-------
result : dict
the chosen result
"""
for result in results:
if result["geojson"]["type"] in {"Polygon", "MultiPolygon"}:
return result
# if we never found a polygon, throw an error
raise ValueError(f'OSM did not return any polygonal geometries for query "{query}"')
|
def parse_address(address):
"""Parse and address on the format host:port.
Returns a tuple (host, port). Raises ValueError if format is
invalid or port is not an integer or out of range.
"""
words = address.split(':')
if len(words) != 2:
raise ValueError('address must contain exactly one colon')
host, port = words
try:
port = int(port)
except ValueError:
raise ValueError('port number must be an integer')
# Note: port 0 is not allowed.
if not 0 < port < (2**16):
raise ValueError('port number out of range')
return (host, port)
|
def version2string(tversion):
""" Converts version tuple to string """
s = ""
if tversion[3] != 0:
s = "{}.{}.{}.{}".format(tversion[0], tversion[1], tversion[2], tversion[3])
elif tversion[2] != 0:
s = "{}.{}.{}".format(tversion[0], tversion[1], tversion[2])
else:
s = "{}.{}".format(tversion[0], tversion[1])
if tversion[4] != "":
s = s + "-" + tversion[4]
return s
|
def factorial_recursion(number):
"""
>>> factorial_recursion(3)
6
>>> factorial_recursion(5)
120
>>> factorial_recursion(0)
1
>>> factorial_recursion(1)
1
"""
if isinstance(number, float) is True:
raise ValueError("Non integer number is not allowed")
if number < 0:
raise ValueError("Negative number is not allowed")
if number in {0,1}:
return 1
return number * factorial_recursion(number-1)
|
def Di_from_WT(Do, WT):
"""Calculate pipe inner diameter from outer diameter and wall thickness.
"""
return Do - 2 * WT
|
def csv_escape(text):
"""Escape functional characters in text for embedding in CSV."""
if text is None:
return ''
return '~' + text.strip('\n -') + '~'
|
def set_sieve(n):
"""
Sets are mutable, unordered collections which are useful
for quick membership testing and math operations.
"""
primes = set(range(2, n+1))
for i in range(2, int(n**0.5)+1):
if i in primes:
primes -= set(range(i*i, n+1, i))
return primes
|
def average_gate_infidelity_to_RB_decay(gate_infidelity, dimension):
"""
Inversion of eq. 5 of [RB] arxiv paper.
:param gate_infidelity: The average gate infidelity.
:param dimension: Dimension of the Hilbert space, 2^num_qubits
:return: The RB decay corresponding to the gate_infidelity
"""
return (gate_infidelity - 1 + 1/dimension)/(1/dimension -1)
|
def clip(value, min_val, max_val):
"""Clip a value between lower and upper bounds.
Args:
value (number): the value to clip
min_val (number): the lower bound
max_val (number): the upper bound
Return:
The clipped value
"""
return min(max_val, max(min_val, value))
|
def filter_statuses(statuses):
"""
Squash statuses to latest state
1. context="first", state="success", update_time=1
2. context="second", state="success", update_time=2
3. context="first", stat="failure", update_time=3
=========>
1. context="second", state="success"
2. context="first", stat="failure"
"""
filt = {}
for status in sorted(statuses, key=lambda x: x.updated_at):
filt[status.context] = status
return filt
|
def is_valid_completion(completion):
"""
param: completion (str) - a string representing the task's completion status
returns: a Boolean True if the completion status is valid; False, otherwise
"""
if completion == "yes" or completion == "no":
return True
else:
return False
|
def _create_blank(width, height):
"""Returns a 2D list filled with None elements.
Creates and returns a list of lists of the given dimensions
filled with None elements.
Args:
width: The width of the blank terrain to generate.
height: The height of the blank terrain to generate.
"""
# Use a list comprehension to generate a blank of the given dimensions
empty_terrain = [[None for i in range(width)] for j in range(height)]
# Return the list
return empty_terrain
|
def get_obj_id(obj):
"""Get the obj id."""
return str(id(obj))
|
def send(dest, msg, transactionid=None):
"""STOMP send command.
dest:
This is the channel we wish to subscribe to
msg:
This is the message body to be sent.
transactionid:
This is an optional field and is not needed
by default.
"""
transheader = ''
if transactionid:
transheader = 'transaction: %s\n' % transactionid
return "SEND\ndestination: %s\n%s\n%s\x00\n" % (dest, transheader, msg)
|
def format_req_str(req) -> str:
"""includes fix for formatting git url"""
res = str(req).strip()
if res.startswith("-e git+git@"):
res = res.replace("-e git+git@", "-e git+ssh://git@")
return res
|
def form_intro(pol_areas, description=None):
"""
Form the introduction line
Parameters
----------
pol_areas: list of all the policy areas included in the reform used to
create a description of the reform
description: user provided description of the reform
"""
# these are all of the possible strings used in the introduction sentance
intro_text = {
1: "modifing the {} section of the tax code",
2: "modifing the {} and {} sections of the tax code",
3: "modifing the {}, {}, and {} sections of the tax code",
4: ("modifing a number of sections of the tax code, "
"including {}, {}, and {}")
}
if not description:
num_areas = min(len(pol_areas), 4)
intro_line = intro_text[num_areas]
if num_areas == 1:
return intro_line.format(pol_areas[0])
elif num_areas == 2:
return intro_line.format(pol_areas[0], pol_areas[1])
else:
return intro_line.format(pol_areas[0], pol_areas[1], pol_areas[2])
else:
return description
|
def collapse(window):
"""
DESCRIPTION:
This function:
1. Divides the sequence by segment fragment.
2. Collapses the repeated letters within a segment.
:param window: [dict] the instance to predict with sequence, the
target value and signal, the ONT values.
:return: [dict] the same window but the sequence has just one
letter per segment (not per signal value).
"""
window['fragments'] = [len(segment) for segment in window['sequence'].split('*') if segment]
window['sequence'] = ''.join([
''.join(set(segment))
for segment in window['sequence'].split('*')
])
return window
|
def _split_repo_url(url):
"""Split a repository URL into an org / repo combination."""
if "github.com/" in url:
end = url.split("github.com/")[-1]
org, repo = end.split("/")[:2]
else:
raise ValueError(
f"Currently Binder/JupyterHub repositories must be on GitHub, got {url}"
)
return org, repo
|
def _object_format(o):
""" Object arrays containing lists should be printed unambiguously """
if type(o) is list:
fmt = 'list({!r})'
else:
fmt = '{!r}'
return fmt.format(o)
|
def rest_netloc(node_number=0, host='localhost', first_port=8080):
"""
Returns a 'host:port' string of the addresses where the node's rest
interface is expected to be listening.
It assumes nodes are listening on consecutive ports as setup by the
elrond scripts v2. The node does not need to be running.
"""
return '%s:%i' % (host, first_port + node_number)
|
def validate_regon(regon):
"""Validate REGON.
:param regon: regon to validate
:return: True if pesel is valid, False otherwise
"""
regon_digits = list(map(int, regon))
args = (8, 9, 2, 3, 4, 5, 6, 7)
sum_v = sum(map(lambda x: x[0] * x[1], zip(args, regon_digits)))
checksum_digit = sum_v % 11
if checksum_digit > 9:
return regon_digits[-1] == 0
return regon_digits[-1] == checksum_digit
|
def flatten(lst):
"""
Recursively flatten a tuple or list.
"""
return sum(([x] if not isinstance(x, (list, tuple)) else flatten(x) for x in lst), [])
|
def get_node(path, name_space, client):
""" This helper method returns a specified node """
if name_space:
path_formatted = 'ns={};i={}'.format(name_space, path)
n = client.get_node(path_formatted)
return n
|
def mailbox_test(str):
"""Tests if the address inside brackets is valid"""
testcase = str
start = str.find('<')
end = str.find('>')
testcase = testcase[start+1:end]
at_test = testcase.find('@')
if at_test == -1:
return False
else:
"""
This is code that was removed for being too strict for the grader in HW1. I'm keeping it here just in case I need it later.
"""
testcase2 = testcase[0:at_test]
testcase3 = testcase[at_test+1:]
if testcase2.isalnum() == False:
return False
if testcase3.find('@') != -1 or testcase3.find(' ') != -1:
return False
return True
|
def rms_is_schedulable(tasks):
"""Check the task set schedulability for RMS.
Check whether the specified task set is schedulable under RMS algorithm.
:param tasks: list of task descriptors.
:return: The return value. True for success, False otherwise.
:rtype: bool.
"""
totalUse = sum(task['exec_time']/float(task['period']) for task in tasks)
n = len(tasks)
if(n == 0 ): return
#check scallability based off of total use and number os tasks
print ("RMS schedudability:",totalUse, " <= ", n*(2**(1/n)-1))
if (totalUse > 1.0):
print("ERROR: total CPU usage > 100%.")
return False
if (totalUse <= n*(2**(1/n)-1)):
print("The tasks are provably schedulable.")
else:
print("The tasks might be scheludable, with no guarantees.")
return True
|
def get_unic(vector):
"""Return a vector with the same element having only one occurence.
Args:
vector (list): List of element which may contains double elements
Returns:
vector_unic (list): List of unic values ordered in ascending way
"""
vector_unic = []
for elem in vector:
if elem not in vector_unic:
vector_unic.append(elem)
vector_unic.sort()
return vector_unic
|
def get_rb_blob_attribute(blobdict, attr):
"""Get Attribute `attr` from dict `blobdict`
Parameters
----------
blobdict : dict
Blob Description Dictionary
attr : string
Attribute key
Returns
-------
ret : Attribute Value
"""
try:
value = blobdict['BLOB']['@' + attr]
except KeyError:
raise KeyError('Attribute @' + attr + ' is missing from Blob.' +
'There may be some problems with your file')
return value
|
def stream_idxs_zeros(subscriptions):
"""Initialize an integer index for each subscription."""
idxs = {name: 0 for name in subscriptions}
return idxs
|
def is_empty(coll):
"""Returns true if coll has no items.
"""
return (coll is None) or (len(coll) == 0)
|
def parse_numeric_columns(columns:str)->list:
"""parse a comma separated list (no spaces) of columns and return the columns as a list."""
return columns.split(',')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.