content
stringlengths 42
6.51k
|
---|
def get_table_name(line):
"""Get and return table name from table structure SQL file"""
start = line.upper().find('TABLE') + 6
end = line[start:].find(' ', 2) + start
name = line[start:end]
return name.strip().lower()
|
def consume_whitespace(s: str, offset):
"""
This also deals with comments.
"""
while True:
while offset < len(s) and s[offset].isspace():
offset += 1
if offset >= len(s) or s[offset] != ";":
break
while offset < len(s) and s[offset] not in "\n\r":
offset += 1
return offset
|
def _process_vals(vals):
"""
:param vals: None or a single value in a list of a list
>>> _process_vals(None) is None
True
>>> _process_vals([])
[]
>>> _process_vals([0])
0
>>> _process_vals(["0", "1"])
['0', '1']
"""
if vals is None or not vals:
return vals
if len(vals) == 1: # single value in a list
return vals[0]
return vals
|
def sunion_empty(ls):
""" return empty set if the list of sets (ls) is empty"""
try:
return set.union(*ls)
except TypeError:
return set()
|
def rwrap(some_string):
"""Wraps a string to be red."""
return "\033[91m%s\033[0m" % some_string
|
def setdefaultx(dct, key, *values):
"""
Set dict.default() for first non-None value.
"""
for value in values:
if value is not None:
dct.setdefault(key, value)
break
return dct
|
def make_bytes(s: str) -> bytes:
"""Helper function to convert a string of digits into packed bytes.
Ignores any characters other than 0 and 1, in particular whitespace. The
bits are packed in little-endian order within each byte.
"""
buf = []
byte = 0
idx = 0
for c in s:
if c == '0':
pass
elif c == '1':
byte |= 1 << idx
else:
# coverage: ignore
continue
idx += 1
if idx == 8:
buf.append(byte)
byte = 0
idx = 0
if idx:
buf.append(byte)
return bytearray(buf)
|
def exp_duration_handler(inputs):
"""
Helper method to handle the Short, Intermediate, and Long term options
ONLY SHORT TERM IS CURRENTLY ALLOWED ON THE FRONTEND
:param inputs: dict
:return: float
"""
class_inputs = {'dermal': {}, 'inhal': {}}
type = '_st'
if inputs['expDurationType_st']:
type = '_st'
if inputs['expDurationType_it']:
type = '_it'
if inputs['expDurationType_lt']:
type = '_lt'
class_inputs['dermal']['abs_frac'] = float(inputs['dermal_abs_frac' + type]) / 100.
class_inputs['dermal']['bw_adult'] = inputs['bw_dermal_NC' + type]
class_inputs['dermal']['nc_POD'] = inputs['dermal_NC_POD' + type]
class_inputs['dermal']['nc_LOC'] = inputs['dermal_NC_LOC' + type]
class_inputs['inhal']['abs_frac'] = float(inputs['inhalation_abs_frac' + type]) / 100.
class_inputs['inhal']['bw_adult'] = inputs['bw_inhalation_NC' + type]
class_inputs['inhal']['nc_POD'] = inputs['inhalation_NC_POD' + type]
class_inputs['inhal']['nc_LOC'] = inputs['inhalation_NC_LOC' + type]
return class_inputs
|
def no_overlap(s1, d1, s2, d2):
"""
no_overlap(s1, d1, s2, d2)
Ensures that task 1 (start time s1 with duration d1) does not overlap with
task2 (start time s2 with duration d2)
"""
return [(s1 + d1 <= s2) | (s2 + d2 <= s1)]
|
def get_execution_timer_name(exec_uuid: int):
"""Return the name of the timer used for recording pure execution time."""
return f"{exec_uuid}-execution"
|
def validateFilename(value):
"""
Validate filename.
"""
if 0 == len(value):
raise ValueError("Name of SimpleGridDB file must be specified.")
try:
fin = open(value, "r")
except IOError:
raise IOError("Spatial database file '{}' not found.".format(value))
return value
|
def is_tty(stream): # taken from catkin_tools/common.py
"""Returns True if the given stream is a tty, else False"""
return hasattr(stream, 'isatty') and stream.isatty()
|
def ygel(g):
"""Calculates the y position for a given amount of gelcoat."""
return 10 + (g - 600) * 1
|
def pagination_range(total_page, current_num=1, display=5):
"""Return Page range
:param total_page: Total numbers of paginator
:param current_num: current display page num
:param display: Display as many as [:display:] page
In order to display many page num on web like:
< 1 2 3 4 5 >
"""
try:
current_num = int(current_num)
except ValueError:
current_num = 1
half_display = int(display/2)
start = current_num - half_display if current_num > half_display else 1
if start + display <= total_page:
end = start + display
else:
end = total_page + 1
start = end - display if end > display else 1
return range(start, end)
|
def list_attr(data, key):
"""
Fetches a list from a querydict or dict.
"""
try:
return data.getlist(key)
except AttributeError:
if key not in data:
return []
value = data[key]
if not isinstance(value, list):
value = [value]
return value
|
def trim_and_filter_page(page):
"""
Use the "FOR THE PRESIDENT ONLY" tags and the start, end of each
page to further trim the pages that have text.
Not all pages (e.g., "Title Page" and Maps) have these lines,
so cannot use to split pages. But, because it is missing from
these page types, we can use it to filter out pages.
THIS METHOD IS NOT RELIABLE; SOME TEXT PAGES HAVE THIS TAG COVERED UP
"""
# pres_seq = "FOR THE PRESIDENT ONLY"
pres_seq = "R THE PRESIDENT O"
#pres_seq_match = re.compile(r'(FO)?R THE PRESIDENT O(NLY)?')
page = ' '.join(page)
pos = page.find(pres_seq)
if pos > -1 and pos < 100:
page = page[(pos + len(pres_seq)):]
pos = page.find(pres_seq)
page = page[:pos]
return(page)
else:
return(page)
|
def overlapping(start1, end1, start2, end2):
"""
>>> overlapping(0, 5, 6, 7)
False
>>> overlapping(1, 2, 0, 4)
True
>>> overlapping(5,6,0,5)
False
"""
return not ((start1 <= start2 and start1 <= end2 and end1 <= end2 and end1 <= start2) or
(start1 >= start2 and start1 >= end2 and end1 >= end2 and end1 >= start2))
|
def parse_mode(mode_data):
""" Takes in a mode string and parses out which are to be added and which
are to be removed.
>>> mode_data = "+ocn-Ct"
>>> parse_mode(mode_data)
('ocn', 'Ct')
"""
add = remove = ""
directive = "+"
for char in mode_data:
if char in ("+", "-"):
directive = char
elif directive == "-":
remove += char
elif char == " ":
continue
else:
add += char
return (add, remove)
|
def bullet_wall(data):
"""
The wall is represented by two coordinates W1 (xw1, yw1) and W2 (xw2, yw2)
on a coordinate plane. The bullet flies from point "A" (xa, ya), and the
direction of its flight is given by the second point "B" (xb, yb).
Determine whether the bullet hits the wall or not if gravity is not a
factor. A != B.
Input: A list with coordinates in next order W1, W2, A, B. Each coordinate
is a list of x and y coordinates (int).
Output: Whether the bullet hits the wall or not. A boolean.
plan, use the bullet to construct a line using point slope form
if one point is below the line and another point is above it, it
intersects, else it does not
"""
xw1, yw1 = data[0]
xw2, yw2 = data[1]
xa, ya = data[2]
xb, yb = data[3]
#check if the wall is behind the gun
if ((xa-xb)/(xa-xw1)>0 or (ya-yb)/(ya-yw1>0)):
hits=(False, "The wall is behind the gun")
elif ((xb,yb)==(xw1,yw1) or (xb,yb)==(xw2,yw2)):
hits= (True, "The aim point is the wall")
elif (xa-xb)!=0:
mgun=float(ya-yb)/(xa-xb)
bgun=-(mgun*xa-ya)
guneq = lambda x: mgun*x+bgun
if (xw1-xw2)!=0:
mwall=float(yw1-yw2)/(xw1-xw2)
bwall=mwall*xw1-yw1
xvalue=(bgun-bwall)/(mwall-mgun)
hits= ((min(xw1,xw2) <= xvalue <= max(xw1,xw2)), mgun, bgun, guneq, mwall, bwall, xvalue)
else:
hits= ((min(yw1,yw2) <= guneq(xw1) <= max(yw1,yw2)), mgun, bgun, guneq)
else:
hits= (min(xw1,xw2) <= xa <= max(xw1,xw2))
# if the line is below the wall at one point and above it in the other in the direction
# of the bullets trajectory, it hit the wall
# issues to be aware of, if the wall extends to the right and below of
return hits
|
def mixing_dict(xy, normalized=False):
"""Returns a dictionary representation of mixing matrix.
Parameters
----------
xy : list or container of two-tuples
Pairs of (x,y) items.
normalized : bool (default=False)
Return counts if False or probabilities if True.
Returns
-------
d: dictionary
Counts or Joint probability of occurrence of values in xy.
"""
d = {}
psum = 0.0
for x, y in xy:
if x not in d:
d[x] = {}
if y not in d:
d[y] = {}
v = d[x].get(y, 0)
d[x][y] = v + 1
psum += 1
if normalized:
for k, jdict in d.items():
for j in jdict:
jdict[j] /= psum
return d
|
def get_chain_length(chains, chain_idx, start_gen_idx):
"""
Get length of the chain with index "chain_idx", starting from (and including)
generation "start_gen_idx" to end of chain, or until first
empty bin (while excluding empty bin).
"""
length = 0
for gen_idx, gen in enumerate(chains[start_gen_idx:]):
bn = gen[chain_idx]
if len(bn) == 0:
break
length += 1
# print("\nbn: " + str([x+1 for x in bn]))
return length
|
def distance(p1, p0):
"""euclidean distance """
d = ((p1[0] - p0[0]) ** 2 + (p1[1] - p0[1]) ** 2 + (p1[2] - p0[2]) ** 2) ** 0.5
return d
|
def parse_fasta(handle):
""" Simple fasta parser, returns dict of strings """
output = dict()
current_id = None
current_seq = []
for line in handle:
if line.startswith(">"):
if current_id is not None:
output[current_id] = ''.join(current_seq)
current_id = line[1:].strip()
current_seq = []
else:
current_seq.append(line.strip())
if current_id is not None:
output[current_id] = ''.join(current_seq)
return output
|
def leftPadItems(alist):
"""Add a space to the begining of each string in a given list."""
return [' ' + item for item in alist]
|
def next_prime(old_prime,values):
"""
returns the next prime using as the first
non-multiple obtained in the within the set of multiples
@param:
old_prime - prime number
values - set of booleans that mark the multiples of 'old_prime'
"""
for multiple,state in enumerate(values,1):
if multiple > old_prime and state is False:
return multiple
|
def noise_removal_w_list(input_text):
"""Removing noise from text using a list of noise words.
:param input_text: Noisy text.
:return: Clean text.
"""
# noise to be removed
noise_words = ["is", "a", "this", "..."]
# converting sentence into words
words = input_text.split()
# noise-free words
noise_free_words = [word for word in words if word not in noise_words]
# building noise-free text
return " ".join(noise_free_words)
|
def _max_order_circular_harmonics(N, max_order):
"""Compute order of 2D HOA."""
return N // 2 if max_order is None else max_order
|
def constrain(value, min_val, max_val):
""" Constrains value to a specific range. """
if value < min_val:
return min_val
elif value > max_val:
return max_val
else:
return value
|
def tic(name=None):
"""
Returns
-------
clock_name : str or None
The parameter which was passed as ``name``. Pass it, or the ``name``
directly, to ``toc()`` to get the timing for that particular call to
``tic``.
Notes
-----
- The ``/PROFILER`` keyword is not implemented.
- http://www.harrisgeospatial.com/docs/TIC.html
"""
import timeit
if not hasattr(tic, "start"):
tic.start = {}
tic.start[name] = timeit.default_timer()
return name
|
def generate_bn_mat_gamma_0(n, k):
"""
Defined in the paper eq (24)
"""
if n == 0:
return 0.5
return (2.0 * k * (k + 1) + n * (2 * k + n + 1)) / ((2 * k + n) * (2 * k + n + 2))
|
def is_bit_set(a, offset):
""" Checks whether the offset bit in a is set or not.
Returns
bool, True if bit in position offset in a is 1
"""
return a & (1 << offset) != 0
|
def is_monic(f):
"""Check if the homomorphism is monic."""
return len(set(f.keys())) ==\
len(set(f.values()))
|
def get_column_alphabetical_index_from_zero_indexed_num(col_idx: int) -> str:
"""Convert zero-index column number to alphabetical base-26 (e.g. 0 -> 'A', 27 -> 'AA'"""
num_letters_alphabet = 26
def get_letter_from_zero_indexed_idx(idx: int):
ascii_start = 65
return chr(ascii_start + idx)
prefix_str = ''
if col_idx < num_letters_alphabet:
return get_letter_from_zero_indexed_idx(col_idx)
last_char = get_letter_from_zero_indexed_idx(col_idx % num_letters_alphabet)
prefix_str = get_column_alphabetical_index_from_zero_indexed_num(col_idx // num_letters_alphabet)
return prefix_str + last_char
|
def intToBinaryString(integer : int) -> str:
"""Convert an integer to a string representing a big endian binary number
Parameters
----------
integer : int
A positive integer
Returns
-------
str
A string representing a big endian binary number
"""
rtn = ""
while integer > 0:
rtn += str(integer % 2)
integer = integer // 2
return rtn[::-1]
|
def maybe_tuple(data):
"""Returns `tuple(data)` if :attr:`data` contains more than 1 elements.
Used to wrap `map_func` inputs.
"""
data = tuple(data)
data = data if len(data) > 1 else data[0]
return data
|
def set_override_certificate_errors(override: bool) -> dict:
"""Enable/disable overriding certificate errors. If enabled, all certificate error events need to
be handled by the DevTools client and should be answered with `handleCertificateError` commands.
Parameters
----------
override: bool
If true, certificate errors will be overridden.
"""
return {
"method": "Security.setOverrideCertificateErrors",
"params": {"override": override},
}
|
def prime_factorization(n):
"""
Factorize a number into prime numbers. Not that this has been modified to fit the need of the application
"""
# Modify a little
primes=[]
i = 2
while i*i <= n:
d = 1
while n % i == 0:
d *= i
n = n // i
if d>1:
primes += [d]
i += 1
if n>1:
primes += [n]
return primes
|
def calc_pier_reaction(cur_axle_loc, mod_axle_wt, span1_begin, span1_end,
span2_begin, span2_end, num_axles, axle_id, direction):
"""Calculate the interior pier (floorbeam) reaction.
Args:
cur_axle_loc (list of floats): current x-coordinate of all axles on span
axle_wt (list of floats): weight of each axle
begin_span (float): x-coordinate of the beginning of the span
end_span (float): x-coordinate of the end of the span
num_axles (int): number of program defined axles (includes axles for
approximate distributed load)
Returns:
Rpier (float): reaction at the pier (floorbeam)
Notes:
This function is very similar to the calc_load_and_loc function. The
main reason this function is required is the loads that act directly
over the support at the beginning of span 1 and at the end of span 2 do
not contribute to the reaction at the pier. However, they do contribute
to the reactions of each span and in determining the correct moment and
shear in each span.
"""
Rpier = 0.0
#these are *not* the reactions at the beginning and end supports of a two
#span structure
#these are the two reactions on the center pier for the adjacent span
L_S1 = 0.0 #center pier reaction from loads on span 1
L_S2 = 0.0 #center pier reaction from loads on span 2
span1_length = span1_end - span1_begin
span2_length = span2_end - span2_begin
for i in range(num_axles):
#if load is not over the support at the beginning of span 1
#and if the load is not over the support at the end of span 2
if cur_axle_loc[i] > span1_begin and cur_axle_loc[i] < span2_end:
#if axle is directly over pier
if cur_axle_loc[i] == span1_end:
r = mod_axle_wt[i]
L_S1 = L_S1 + r/2
L_S2 = L_S2 + r/2
#if the load is on span 1, calc the reaction at the pier
if cur_axle_loc[i] < span1_end:
r = (cur_axle_loc[i] - span1_begin)/span1_length*mod_axle_wt[i]
L_S1 = L_S1 + r
#if the load is on span 2, calc the reaction at the pier
if cur_axle_loc[i] > span2_begin:
r = (span2_end - cur_axle_loc[i])/span2_length*mod_axle_wt[i]
L_S2 = L_S2 + r
Rpier = L_S1 + L_S2
return Rpier, L_S1, L_S2, axle_id, direction
|
def get_overlap(a, b):
"""
get overlap between ranges
"""
return max(0, min(a[1], b[1]) - max(a[0], b[0]))
|
def _ValidateFeatureOptions(feature_options):
"""Checks that the feature_options is properly formatted.
Args:
feature_options (dict): A dictionary mapping feature to its
configurations.
For example:
{
'TouchCrashedComponent': {
'blacklist': ['Internals>Core'],
}
Returns:
True if ``feature_options`` is properly formatted, False otherwise.
"""
if not isinstance(feature_options, dict):
return False
touch_crashed_directory_options = feature_options.get('TouchCrashedDirectory')
if not isinstance(touch_crashed_directory_options, dict):
return False
directory_blacklist = touch_crashed_directory_options.get('blacklist')
if not isinstance(directory_blacklist, list):
return False
touch_crashed_component_options = feature_options.get('TouchCrashedComponent')
if not isinstance(touch_crashed_component_options, dict):
return False
component_blacklist = touch_crashed_component_options.get('blacklist')
if not isinstance(component_blacklist, list):
return False
return True
|
def news_helper(news) -> dict:
"""Parsing the results from a database query into a Python dict."""
return {
'id': str(news['_id']),
'title': news['title'],
'link': news['link'],
'image_url': news['image_url'],
'source': news['source'],
}
|
def hex2rgb(value):
"""Converts a hexadeximal color string to an RGB 3-tuple
EXAMPLE
-------
>>> hex2rgb('#0000FF')
(0, 0, 255)
"""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i+lv//3], 16) for i in range(0, lv, lv//3))
|
def process_coordinates(result):
"""
A function read the dictionary contains
key: neighborhood
value: list of coordinates (latitude, longitude)
and reconstruct a new dictionary contains
key: neighborhood
value: a dictionary contains a list of latitudes and a list of longitudes.
Parameter: result dictionary, contains neighborhoods and list of coordinates
Return: polygon dictionary, contains neighborhoods
and a list of latitudes and a list of longitudes
"""
polygons = {}
# for neighborhood, coordinates in result.items():
for neighborhood in result.keys():
coordinates = result[neighborhood]
lat_list = []
lon_list = []
for coordinate in coordinates:
lat_list.append(coordinate[1])
lon_list.append(coordinate[0])
polygons[neighborhood] = {}
polygons[neighborhood]["Lat"] = lat_list
polygons[neighborhood]["Lon"] = lon_list
return polygons
|
def fix_tftp_filepath(file_path):
"""Validate the filepath for the TFTP transfer and verify the source file
exists. The TFTP configuration file sets the default TFTP directory,
normally /var/lib/tftpboot (the equivalent of in.tftpd -s or
--secure flag). Therefore, if the directory is also in the file path, it
will appear twice (i.e., '/var/lib/tftpboot/var/lib/tftpboot/startup-config-c3745.tftp').
The following code ensures that the file path contains only the necessary
directory info to transfer the file.
:param file_path: The raw file path.
:type file_path: str
:returns: The corrected file path
:raises ValueError: If the file_path is invalid.
"""
if file_path is not None and file_path.strip():
file_path = file_path.strip()
# Check if the file path was already modified
# (i.e., it does not begin with 'var/lib/tftpboot/')
default_dir_str = 'var/lib/tftpboot/'
offset = file_path.rfind(default_dir_str)
if offset != -1:
# Default dir found in file path! Remove everything through the
# last instance of /var/lib/tftpboot/
# (e.g.,
# /var/lib/tftpboot/startup-config-switch.tftp
# becomes
# startup-config-switch.tftp, etc.)
offset += len(default_dir_str)
file_path = file_path[offset:]
else:
# Check for and remove any leading forward slashes.
# FYI - Not taking this step caused a lot of hate and
# discontent towards the PMA!
file_path = file_path if file_path[0] != '/' else file_path[1:]
return file_path
else:
raise ValueError('Invalid file path.')
|
def string_to_bool(string: str) -> bool:
"""Converts a string to a bool"""
return string.lower() in ("yes", "y", "true", "t", "yes")
|
def stringToFields(jobFields):
""" Convert a jobState string to a fields array """
jobFields = jobFields.replace('[','').replace(']','')
jobFields = jobFields.replace("\'","")
rf = jobFields.split(',')
fields = []
for f in rf:
fields += [f.strip()]
return fields
|
def func_pro_pn_limit(string,bool_repeats=True):
"""process pn_limit
Raw input should be a string
1) '1,p, 2, p, 3:P, 4 - Positive, 5 :N, 6- negative, 7, 8, 9 n'
2) '1p, 2 3, p 4, - , p, 5 ,6n, 7 8 ,, 9 n'
3) '1,2,3,4p, 5,6,7,8,9n'
4) '1-p, 2:p, 3-p, 4p, 5n, 6n, 7-n, 8n, 9n'
5) '1:p, 2-p, 3p, 4p, 5:n, 6n, 7n, 8-n, 9n'
6) '1p, 2p, 3p, 4p, 5n, 6n, 7n, 8n, 9n'
The number of space or tab doesn't matter.
All of those six inputs are equivalent.
The comma is chosen as the delimiter, the key word 'p' and 'n'
represent 'positive' and 'negative' respectively, both its
initial and full word are OK, they are case-insensitive.
the bool_repeats are used to define the double-definition
can be accepted or not for the same entry.
Note:
the return value is not sequence based
"""
line = string.replace(',',' ').replace(':',' ').replace('-',' ').lower()
line = line.replace('ositive',' ').replace('egative',' ')
subline = line.replace(' ','')
try:
dump_value = int(subline.replace('p','').replace('n',''))
# The first character in subline can't be 'n' or 'p',
# 'pn' or 'np' should not exist, and the last char has
# to be either 'n' or 'p'
if subline[0] == 'p' or subline[0] == 'n' or \
'pn' in subline or 'np' in subline or \
(subline[-1] != 'n' and subline[-1] != 'p'):
raise ValueError
except ValueError:
print(string)
raise ValueError('Wrong defined')
nl = pl = ''
i = 0
j = 0
while i < len(line):
if line[i] == 'p':
if j < i:
pl += line[j:i]
j = i + 1
elif line[i] == 'n':
if j < i:
nl += line[j:i]
j = i + 1
i += 1
nvlist = []
for i in nl.split():
nvlist.append([int(i),'n'])
for i in pl.split():
nvlist.append([int(i),'p'])
if not bool_repeats:
t = [i[0] for i in nvlist]
if len(t) != len(set(t)):
print(string)
raise ValueError('Wrong defined')
return nvlist
|
def writeFragments( outfile_fasta, outfile_index,
fragments, mangler, size,
write_all = False):
"""write mangled fragments to outfile in chunks of size.
returns remaining chunk.
if write_all is True, all of fragments are written and
the position of the last position is added to the index
as well.
"""
s = "".join(fragments)
if len(s) > size:
for x in range(0, len(s)-1, size):
outfile_index.write( "\t%i" % outfile_fasta.tell() )
outfile_fasta.write( mangler(s[x:x+size]) )
x = len(s) % size
if x:
if write_all:
outfile_index.write("\t%i" % outfile_fasta.tell() )
outfile_fasta.write( mangler( s[-x:] ) )
outfile_index.write("\t%i" % outfile_fasta.tell() )
return ""
else:
return s[-x:]
else:
return ""
|
def toUnicode(text):
"""Converts bytes to unicode."""
if type(text) != str:
try:
text = str(text, encoding='UTF-8')
except UnicodeDecodeError:
text = "\n[WARNING] : Failed to decode bytes!\n"
except TypeError:
text = "\n[WARNING] : Failed to decode bytes!\n"
return text
|
def int_or_none(item):
"""
Tries to convert ``item`` to :py:func:`int`. If it is not possible, returns
``None``.
:param object item: Element to convert into :py:func:`int`.
>>> int_or_none(1)
... 1
>>> int_or_none("1")
... 1
>>> int_or_none("smth")
... None
"""
if isinstance(item, int):
return item
try:
return int(item)
except:
return None
|
def top_sort(dafsa):
"""Generates list of nodes in topological sort order."""
incoming = {}
def count_incoming(node):
"""Counts incoming references."""
if node:
if id(node) not in incoming:
incoming[id(node)] = 1
for child in node[1]:
count_incoming(child)
else:
incoming[id(node)] += 1
for node in dafsa:
count_incoming(node)
for node in dafsa:
incoming[id(node)] -= 1
waiting = [node for node in dafsa if incoming[id(node)] == 0]
nodes = []
while waiting:
node = waiting.pop()
assert incoming[id(node)] == 0
nodes.append(node)
for child in node[1]:
if child:
incoming[id(child)] -= 1
if incoming[id(child)] == 0:
waiting.append(child)
return nodes
|
def group_labels_func(labels, preds, group_keys):
"""Devide labels and preds into several group according to values in group keys.
Args:
labels (list): ground truth label list.
preds (list): prediction score list.
group_keys (list): group key list.
Returns:
all_labels: labels after group.
all_preds: preds after group.
"""
all_keys = list(set(group_keys))
group_labels = {k: [] for k in all_keys}
group_preds = {k: [] for k in all_keys}
for l, p, k in zip(labels, preds, group_keys):
group_labels[k].append(l)
group_preds[k].append(p)
all_labels = []
all_preds = []
for k in all_keys:
all_labels.append(group_labels[k])
all_preds.append(group_preds[k])
return all_labels, all_preds
|
def gc(seq):
""" Return the GC content of as an int
>>> x = tuple('TTTTTATGGAGGTATTGAGAACGTAAGATGTTTGGATAT')
>>> gc(x)
30
"""
g = seq.count('G')
c = seq.count('C')
return int((g + c) / len(seq) * 100)
|
def _consume_single_get(response_iterator):
"""Consume a gRPC stream that should contain a single response.
The stream will correspond to a ``BatchGetDocuments`` request made
for a single document.
Args:
response_iterator (~google.cloud.exceptions.GrpcRendezvous): A
streaming iterator returned from a ``BatchGetDocuments``
request.
Returns:
~google.cloud.proto.firestore.v1beta1.\
firestore_pb2.BatchGetDocumentsResponse: The single "get"
response in the batch.
Raises:
ValueError: If anything other than exactly one response is returned.
"""
# Calling ``list()`` consumes the entire iterator.
all_responses = list(response_iterator)
if len(all_responses) != 1:
raise ValueError(
"Unexpected response from `BatchGetDocumentsResponse`",
all_responses,
"Expected only one result",
)
return all_responses[0]
|
def zigzag(value: int) -> int:
"""Zig-zag encode a parameter to turn signed ints into unsigned ints.
Zig-zag encoding is required by Geometry commands that require parameters,
and the number of parameters will be (number_of_commands * number_of_arguments).
For more information about this technique, check out:
https://developers.google.com/protocol-buffers/docs/encoding#types
Args:
value: the integer value to encode
Returns:
The encoded representation of the value.
"""
return (value << 1) ^ (value >> 31)
|
def _get_user_id_from_access_token(access_token):
"""
Get user's identifier from the access token claims
"""
token_user_id = access_token.get("sub")
if token_user_id:
try:
token_user_id = int(token_user_id)
except ValueError:
# if we can't cast to an int, that's an issue. fence should
# only issue access tokens with the user's id as the sub field.
token_user_id = None
return token_user_id
|
def get_class_bases(klass):
"""Getting the base classes excluding the type ``object``"""
bases = klass.__bases__
if len(bases) == 1 and bases[0] == object:
return []
else:
return list(bases)
|
def happiness_detect(info) :
"""Checks to see if a smiley is in the message"""
for emotion in [":)", ":D", "C:", "=D", "=)", "C=", "(=", "(:" "xD", ":p", ";p", "=p", ":(", "D:", "=(", "D=", "):", ")=", "=C", ":C", ":P"] :
if emotion in info["message"] : return True
return False
|
def list_to_string(list):
"""
Converts list to string.
:param list: List to convert.
:return: String
"""
string = ""
for a in list:
string = string + a + " "
return string.strip()
|
def to_bash_variable(param: str) -> str:
"""
Convert a command variable in a bash variable
"""
return param.upper().replace('-', '_')
|
def get_db_img_name(img_name, processing):
"""Creates image name given processing type
Args:
img_name (str): image name
processing (str): processing applied
Returns:
str: Created image name
"""
img_name, filetype = img_name.split('.')
return img_name + processing + "." + filetype
|
def get_provenance_record(caption):
"""Create a provenance record describing the diagnostic data and plot."""
record = {
'caption': caption,
'statistics': ['mean', 'diff'],
'domains': ['global'],
'authors': ['schlund_manuel'],
'references': ['gregory04grl'],
'realms': ['atmos'],
'themes': ['phys'],
}
return record
|
def snake2camel(text):
"""Convert snake case to camel case. This assumes the input is valid snake
case (if you have some weird hybrid of snake and camel case, for instance,
you'd want to do some preprocessing first).
Parameters
----------
text: str
Snake case string, e.g. vader_sentiment_score.
Returns
-------
str: `text` converted to camel case, e.g. vaderSentimentScore.
"""
res = []
prev = ''
for char in text:
if char != '_':
# Check if res is empty because of case with leading underscore.
res.append(char.upper() if prev == '_' and res else char)
prev = char
return ''.join(res)
|
def top_n(data, n=2):
"""
# Arguments
data {list}: [0.3,0.1,0.5,0.01]
# Returns
mask {list}: [2,0]
"""
index = list(range(len(data)))
new_data = list(zip(data,index))
# range
# new_data = list(filter(lambda x: x[0]>0 and x[0]<1, new_data))
sorted_new_data = sorted(new_data, key=lambda x: x[0], reverse=True)
result = list(map(lambda x:x[1], sorted_new_data[:n]))
result = sorted(result)
return result
|
def serialize_list(items):
"""Serializes a list of SQLAlchemy Objects, exposing their attributes.
:param items - List of Objects that inherit from Mixin
:returns List of dictionaries
"""
if not items or items is None:
return []
return [x.to_dict() for x in items]
|
def minmax (minValue, maxValue, value):
"""returns the value if it lies in the range [minValue, maxValue], the range borders otherwise"""
return max(minValue, min(maxValue, value))
|
def to_c_type(bit_width):
""" Convert a bitwidth to the nearest <stdint.h> type that can hold it.
"""
if bit_width > 32:
return "std::uint64_t"
if bit_width > 16:
return "std::uint32_t"
if bit_width > 8:
return "std::uint16_t"
if bit_width > 1:
return "std::uint8_t"
return "bool"
|
def selection_sort(some_list):
"""
https://en.wikipedia.org/wiki/Selection_sort
Split the list into a sorted/unsorted portion. Go through the list from left to right, starting with position 0 in
the unsorted portion. When we find the minimum element of the unsorted portion, swap it to the end of the sorted
list portion.
O(N^2)
"""
iters = 0
for i in range(0, len(some_list) - 1):
iters += 1
min_index = i # Always reset min for each loop
for j in range(i + 1, len(some_list)):
iters += 1
if some_list[j] < some_list[min_index]:
min_index = j
if min_index != i:
some_list[i], some_list[min_index] = some_list[min_index], some_list[i]
return iters, some_list
|
def factorial(n):
"""Computes factorial.
"""
f = 1
for i in range(n):
f *= i + 1
return f
|
def phase_increment(f_out, phase_bits, f_clk):
"""
Calculate the phase increment required to produce the desired frequency.
:param f_out:
:param phase_bits:
:param f_clk:
:return:
"""
return int(f_out * 2**phase_bits / f_clk)
|
def translate_from_farenheit_to_celsius(farenheit: float) -> float:
"""
Translate from Farenheit unit to Celsius unit
:param farenheit: the value to translate
:return: the translated value
"""
return (farenheit - 32) * 5./9.
|
def get_date(csl_item):
"""
Return date in iso-like format, such as:
2019
2019-05
2019-05-01
"""
try:
return '-'.join(f'{int(x):02d}' for x in csl_item['issued']['date-parts'][0])
except Exception:
return None
|
def fake_query_tsh_sample_rate(tsh_id):
"""probably gonna be part of more extensive get/fetch or class definition"""
if tsh_id == 'tshes-44':
return 250.0
else:
raise Exception('using fake query/get/fetch -- so only a fake, tshes-44, works')
|
def comment_string_with_block(string, block_comment):
"""Return string commented using block comments"""
if not string:
return string
beg, end = block_comment
return beg + ' ' + string.replace(beg, '').replace(end, '') + ' ' + end
|
def get_count_words(text):
"""Converts from a text to a dict of words -> their count."""
count_words = {}
for line in text.splitlines():
for word in line.split():
count_words.setdefault(word, 0)
count_words[word] += 1
return count_words
|
def IndexLstToIndexStr(index):
"""
IndexLstToIndexStr
==================
transform a list into an index element
e.g. list named 'a' and index ref is [1,1,2], result is
'[1][1][2]'
@param index: a flat list of indexes
@return: a string
"""
return str(index).replace(',','][')
|
def kineticEnergy(I, w):
"""
variables:
K = kinetic energy
I = moment of inertia
w = magnitude of angular velocity (w_z)
"""
K = 0.5*I*w**2
return K
|
def get_dup_applicability_payload(file_token, device_ids=None, group_ids=None, baseline_ids=None):
"""Returns the DUP applicability JSON payload."""
dup_applicability_payload = {'SingleUpdateReportBaseline': [],
'SingleUpdateReportGroup': [],
'SingleUpdateReportTargets': [],
'SingleUpdateReportFileToken': file_token}
if device_ids is not None:
dup_applicability_payload.update(
{"SingleUpdateReportTargets": list(map(int, device_ids))}
)
elif group_ids is not None:
dup_applicability_payload.update(
{"SingleUpdateReportGroup": list(map(int, group_ids))}
)
elif baseline_ids is not None:
dup_applicability_payload.update(
{"SingleUpdateReportBaseline": list(map(int, baseline_ids))}
)
return dup_applicability_payload
|
def which_recovered(recovered_names, all_names):
"""Produce dictionary of recovered TRUE/FALSE"""
recovery_dict = {}
for name in all_names:
if name in recovered_names:
recovery_dict[name] = 'TRUE'
else:
recovery_dict[name] = 'FALSE'
return(recovery_dict)
|
def _escapeWildCard(klassContent):
"""
>>> _escapeWildCard('')
''
>>> _escapeWildCard(':*')
''
>>> _escapeWildCard(':Object')
':Object'
"""
return klassContent.replace(':*', '')
|
def enhance(desc, key, value):
"""Enhances a description text.
This allows to enhance a description text with addition key-value
information. Before enhancing it checks whether the values are
existing or not."""
if value is None:
return desc
if desc is not None and desc != '':
desc += '\n%s: %s' % (key, value)
else:
desc = '%s: %s' % (key, value)
return desc
|
def remove_u(input_string):
"""
Convert unicode text
"""
words = input_string.split()
words_u = [
(word.encode("unicode-escape")).decode("utf-8", "strict") for word in words
]
words_u = [
word_u.split("\\u")[1] if r"\u" in word_u else word_u for word_u in words_u
]
return " ".join(words_u)
|
def _normalizePath(path):
"""Normalize a path by resolving '.' and '..' path elements."""
# Special case for the root path.
if path == u'/':
return path
new_segments = []
prefix = u''
if path.startswith('/'):
prefix = u'/'
path = path[1:]
for segment in path.split(u'/'):
if segment == u'.':
continue
if segment == u'..':
new_segments.pop() # raises IndexError if there is nothing to pop
continue
if not segment:
raise ValueError('path must not contain empty segments: %s'
% path)
new_segments.append(segment)
return prefix + u'/'.join(new_segments)
|
def include_file(file_path: str, ignore_py: bool):
"""Returns whether the given file path should be considered a test file for this execution.
file_path -- the path of the file in the tests path
ignore_py -- whether files that in in .py should be considered a test file
"""
return file_path.endswith(".test") or (file_path.endswith(".py") and not ignore_py)
|
def chrom_to_number(chrom):
"""
Converts chromosome based position, such as chr1,chr2
etc,
to 1,2
"""
return chrom.split('chr')[1]
|
def Align16(i):
"""Round up to the nearest multiple of 16. See unit tests."""
return ((i-1) | 15) + 1
|
def get_commondir(dirlist):
"""Figure out the common portion/parent (commondir) of all the paths
in DIRLIST and return a tuple consisting of commondir, dirlist. If
a commondir is found, the dirlist returned is rooted in that
commondir. If no commondir is found, dirlist is returned unchanged,
and commondir is the empty string."""
if len(dirlist) < 2 or '/' in dirlist:
commondir = ''
newdirs = dirlist
else:
common = dirlist[0].split('/')
for j in range(1, len(dirlist)):
d = dirlist[j]
parts = d.split('/')
for i in range(len(common)):
if i == len(parts) or common[i] != parts[i]:
del common[i:]
break
commondir = '/'.join(common)
if commondir:
# strip the common portion from each directory
l = len(commondir) + 1
newdirs = [ ]
for d in dirlist:
if d == commondir:
newdirs.append('.')
else:
newdirs.append(d[l:])
else:
# nothing in common, so reset the list of directories
newdirs = dirlist
return commondir, newdirs
|
def elementwise_absolute_error(true_val, pred_val):
"""The absolute error between a single true and predicted value.
Parameters
----------
true_val : float
True value.
pred_val : float
Predicted value.
Returns
-------
residual : float
Absolute error, |true_val - pred_val|
"""
return abs(true_val - pred_val)
|
def v4_add(matrix1, matrix2):
"""Add corresponding numbers in given 2-D matrices.
Removing the variable - row - altogether.
"""
combined = []
for row1, row2 in zip(matrix1, matrix2):
combined.append([
n + m
for n, m in zip(row1, row2)
])
return combined
|
def wrap_angle(angle):
""" Helper function to keep angle under 360 """
if angle < 0:
return 360 + angle
elif angle >= 360:
return angle - 360
else:
return angle
|
def split_time(time):
"""Split time in seconds into hours, minutes, and seconds.
Parameters
----------
time : array, shape (n_steps,)
Seconds since start
Returns
-------
hours : array, shape (n_steps,)
Hours since start
minutes : array, shape (n_steps,)
Minutes since start
seconds : array, shape (n_steps,)
Seconds since start
"""
hours = time // 3600.0
rest = time % 3600.0
minutes = rest // 60.0
seconds = rest % 60.0
return hours, minutes, seconds
|
def mel_to_hz(mel):
"""From Young et al. "The HTK book", Chapter 5.4."""
return 700.0 * (10.0**(mel / 2595.0) - 1.0)
|
def is_power2(num):
"""
States if a number is a power of two (Author: A.Polino)
"""
return num != 0 and ((num & (num - 1)) == 0)
|
def __get_accuracy(predictor, test_set, evaluate):
"""Calculates the accuracy of a given classification predictor using
the given test set.
:param predictor: Predictor to test.
:param test_set: Test set to use for testing.
:param evaluate: Function that is used to evaluate the predictor.
Should take as arguments the predictor to evaluate and the input
and returns corresponding prediction.
:return: Measured accuracy of the predictor."""
correct_count = 0
for point in test_set:
input = point[0:-1]
output = point[-1]
prediction = evaluate(predictor, input)
if prediction == output:
correct_count += 1
return correct_count/len(test_set)
|
def sample(population, k):
"""Return a *k* length list of unique elements chosen from the population sequence.
Used for random sampling without replacement."""
return [population[0]]
|
def vni_id_is_valid(vni):
"""Check if the vni id is in acceptable range (between 1 and 2^24)
"""
if (vni < 1) or (vni > 16777215):
return False
return True
|
def calc_supply_age(
prev_cs, cs, transferred_value, prev_age_ms, ms_since_prev_block
) -> int:
"""
Calculate mean supply height in ms from given args.
Definitions:
- h: height
- s(h): circulating supply at h
- a(h): mean supply age at h
- e(h): coinbase emission for block h
- x(h): transferred ERG in block h, excluding r(h)
- t(h): time between current and previous block
At h = 1
--------
s(1) = e(1)
a(1) = 0
x(1) = 0
At h = 2
--------
x(2) <= s(1)
s(2) = s(1) + e(2)
a(2) = [ (s(1) - x(2)) * (a(1) + t(h)) ] / s(2)
At h = n
--------
x(n) <= s(n-1)
s(n) = s(n-1) + e(n)
a(n) = [ (s(n-1) - x(n)) * (a(n-1) + t(n)) ] / s(n)
"""
return ((prev_cs - transferred_value) * (prev_age_ms + ms_since_prev_block)) / cs
|
def parse_string_to_list(line):
"""Parse a line in the csv format into a list of strings"""
line = line.replace('\n', ' ')
line = line.replace('\r', ' ')
return [field.strip() for field in line.split(',') if field.strip()]
|
def numCreator(a,b,c,d):
"""convert the random numbers into a 4 digit int"""
output =0
output += a*1000
output += b*100
output += c*10
output += d
output = int(output)
return output
|
def is_sequence_of_gids(seq):
"""Checks whether the argument is a potentially valid sequence of
GIDs (non-negative integers).
Parameters
----------
seq : object
Object to check
Returns
-------
bool:
True if object is a potentially valid sequence of GIDs
"""
return all(isinstance(n, int) and n >= 0 for n in seq)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.