content
stringlengths 42
6.51k
|
---|
def get_legacy_msg_type(identifier):
"""
Convert an SBP spec identifier to the message type #define identifier according to the legacy API
Only works for real messages, will assert when called for any other type
"""
assert identifier[:4] == "MSG_"
return "SBP_" + identifier
|
def greater_version(versionA, versionB):
"""
Summary:
Compares to version strings with multiple digits and returns greater
Returns:
greater, TYPE: str
"""
try:
list_a = versionA.split('.')
list_b = versionB.split('.')
except AttributeError:
return versionA or versionB # either A or B is None
try:
for index, digit in enumerate(list_a):
if int(digit) > int(list_b[index]):
return versionA
elif int(digit) < int(list_b[index]):
return versionB
elif int(digit) == int(list_b[index]):
continue
except ValueError:
return versionA or versionB # either A or B is ''
return versionA
|
def get_term_frequency(hash_abundance):
"""Convert raw term abundance to relative frequency in document"""
total_counts = sum(hash_abundance.values())
return {k: v/total_counts for k, v in hash_abundance.items()}
|
def getattr_nested(obj, attr):
"""
Get attribute (possibly nested) from object
ie:
a = getattr_nested(obj, 'x.y.z')
is equivalent to a = obj.x.y.z
"""
parts = attr.split('.')
for part in parts:
if hasattr(obj, part):
obj = getattr(obj, part)
else:
return False
else:
return obj
|
def sdbm(key):
"""
This function is a good bit scrambling function.
This algorithm was created for sdbm (a public-domain
reimplementation of ndbm) database library.
"""
hash = 0
for letter in str(key):
hash = ord(letter) + (hash << 6) + (hash << 16) - hash
return hash
|
def _write_outpt(parameters):
"""Write OUTPT block data."""
outpt = parameters["react"]["output"]
values = [outpt["format"]]
values += [x for x in outpt["shape"][:3]] if "shape" in outpt else []
out = ["{}\n".format(" ".join(str(x) for x in values))]
return out
|
def DetermineServiceFromUrl(url):
"""Takes a DFA service's URL and returns the service name.
Args:
url: string The DFA service's URL.
Returns:
string The name of the service this URL points to.
"""
return url.split('/')[-1]
|
def convert_int_bool(t_input):
"""Function will convert bool to int and int to bool"""
if isinstance(t_input, bool):
if t_input:
return 1
return 0
elif isinstance(t_input, str):
try:
t_input = int(t_input)
if t_input == 0:
return False
else:
return True
except ValueError:
return None
elif isinstance(t_input, int):
if t_input == 0:
return False
else:
return True
return None
|
def _parse_ctime(ctime_lines, analyser_index=0):
"""
:param ctime_lines: e.g ``@CTIME %f %f %f``, first word ``@CTIME`` optional
When multiple CTIME lines are present in a scan header, this argument
is a concatenation of them separated by a ``\\n`` character.
:param analyser_index: MCA device/analyser index, when multiple devices
are in a scan.
:return: (preset_time, live_time, elapsed_time)
"""
ctime_lines = ctime_lines.lstrip("@CTIME ")
ctimes_lines_list = ctime_lines.split("\n")
if len(ctimes_lines_list) == 1:
# single @CTIME line for all devices
ctime_line = ctimes_lines_list[0]
else:
ctime_line = ctimes_lines_list[analyser_index]
if not len(ctime_line.split()) == 3:
raise ValueError("Incorrect format for @CTIME header line " +
'(expected "@CTIME %f %f %f").')
return list(map(float, ctime_line.split()))
|
def color_list():
""" List with colors
"""
color_list = [
'darkturquoise',
'orange',
'firebrick',
'darkviolet',
'khaki',
'olive',
'darkseagreen',
'darkcyan',
'indianred',
'darkblue',
'orchid',
'gainsboro',
'mediumseagreen',
'lightgray',
'mediumturquoise',
'lemonchiffon',
'cadetblue',
'lightyellow',
'lavenderblush',
'coral',
'purple',
'aqua',
'mediumslateblue',
'darkorange',
'mediumaquamarine',
'darksalmon',
'beige']
return color_list
|
def isHappy(n):
"""
:type n: int
:rtype: bool
"""
total=n
result=[]
result=set(result)
while(1):
total=sum(pow(int(i),2) for i in str(total))
if total == 100 or total == 1:
return True
elif total in result and 1 not in result:
return False
else:
result.add(total)
|
def Mceta(m1, m2):
"""Compute chirp mass and symmetric mass ratio from component masses"""
Mc = (m1*m2)**(3./5.)*(m1+m2)**(-1./5.)
eta = m1*m2/(m1+m2)/(m1+m2)
return Mc, eta
|
def is_command(attribute):
"""
Test if a method is an command.
"""
return attribute.startswith('do_')
|
def process_prefixes(images, prefixes):
"""
Remove any image which prefix doesn't match what was specified
in CLI arguments. The result will include any image which
repository name contains the requested prefix.
Starts by flattening the list of prefixes received from
argparse in `[['prefix1'], ['prefix2']]` format.
"""
flatten = lambda t: [item for sublist in t for item in sublist]
prefixes = flatten(prefixes)
for i in reversed(range(len(images))):
if not any(prefix in images[i]['repository'] for prefix in prefixes):
del images[i]
return images
|
def inc_args(s):
"""process a string of includes to a set of them"""
return set([inc.strip() for inc in s.split(" ") if inc.strip()])
|
def hasIntersectionCost(criterion, frRow, exRow):
"""Returns 1 if the two do not share an answer on the specified question, else 0"""
frQ = criterion['frQ']
exQ = criterion['exQ']
if 'singleFr' in criterion and criterion['singleFr']:
frSet = set([frRow[frQ]])
else:
frSet = set(frRow[frQ].split(','))
if 'singleEx' in criterion and criterion['singleEx']:
exSet = set([exRow[exQ]])
else:
exSet = set(exRow[exQ].split(','))
return int(len(frSet.intersection(exSet)) == 0)
|
def is_subclass(T1, T2):
"""
"""
try:
return issubclass(T1, T2)
except TypeError:
return False
|
def generate_csv_url(sheet_url):
"""
Utility function for generating csv URL from a google sheets link
This function generates a link to a csv file from a link used to edit a google sheets file.
The gid must be present in the URL.
Parameters
----------
sheet_url : str
The URL for the google sheet file
Returns
-------
str
URL for the csv file
"""
if type(sheet_url) == str:
if(sheet_url.find("edit#gid") > -1):
return sheet_url.replace("edit#gid", "export?format=csv&gid")
else:
raise ValueError("sheet_url must contain 'edit#gid' phrase")
else:
raise TypeError("sheet_url must be a string")
|
def date_text2num(text):
"""
Transforms the text into the corresponding number and converts to int.
@param text: string date to fix.
@return date: int date fixed.
"""
if(text == ''):
text = '-1'
elif(text == 'today'):
text = '0'
elif(text == 'tomorrow'):
text = '1'
date = int(text) # Converts to int
return date
|
def grain_to_dry_malt_weight(malt):
"""
Grain to DME Weight
:param float grain: Weight of Grain
:return: DME Weight
:rtype: float
"""
return malt * 3.0 / 5.0
|
def get_words_lists(all_paragraphs):
"""
Convert the resulting list of paragraphs into list of lists of string representations of the words
"""
all_words_lists = []
one_words_list = []
for one_paragraph in all_paragraphs:
for word in one_paragraph:
one_words_list.append(''.join([
symbol.text for symbol in word.symbols
]))
all_words_lists.append(one_words_list)
one_words_list = []
return all_words_lists
|
def _GetNextString(s):
"""Get smallest string that larger than all strings prefix with s."""
return s[:-1] + chr(ord(s[-1]) + 1)
|
def print_seconds_nice(seconds, prefix=""):
"""
Static method for interval print in human readable format
:param seconds: seconds count
:param prefix: prefix for print
:return: string which contains human readable representation of interval
"""
if seconds < 60:
return "{}{}s".format(prefix, seconds)
minutes = seconds // 60
seconds -= minutes * 60
if minutes < 60:
seconds = round(seconds, 2)
return "{}{}m {}s".format(prefix, minutes, seconds)
hours = minutes // 60
minutes -= hours * 60
if hours < 24:
minutes = int(minutes)
seconds = round(seconds, 2)
return "{}{}h {}m {}s".format(prefix, hours, minutes, seconds)
days = hours // 24
hours -= days * 24
seconds = round(seconds, 2)
return "{}{}d {}h {}m {}s".format(prefix, days, hours, minutes, seconds)
|
def iam(target, mixture):
""""Compute ideal amplitude mask"""
mask = target / mixture
return mask
|
def sum_responses(stimuli, responses, during_cs=None, during_ctx=None, during_us=None):
"""Calculate sum of responses for a single trial, optionally only counting timesteps occuring
during the specified `during_cs`, `during_ctx`, or `during_us` values. For `during_cs`, the `cs` must match all provided vlaue (will not count if is a subset)."""
# (cs, ctx, us) = stimuli[timestep]
# response = responses[timestep]
def include(stimulus):
cs, ctx, us = stimulus
result = True
if during_cs is not None:
result &= len(cs) == len(during_cs) and all(stim in during_cs and mag > 0 for stim, mag in cs)
if during_ctx is not None:
result &= ctx == during_ctx
if during_us is not None:
result &= us > 0 if during_us else us == 0
return result
return sum(responses[i] for i, stimulus in enumerate(stimuli) if include(stimulus))
|
def payload_1(kernels, time_1, time_system, time_format, sclk_id):
"""Payload from WGC API example 1."""
return {
"kernels": [{
"type": "KERNEL_SET",
"id": kernels,
}],
"times": [
time_1,
],
"calculationType": "TIME_CONVERSION",
"timeSystem": time_system,
"timeFormat": time_format,
"sclkId": sclk_id,
"outputTimeSystem": "UTC",
"outputTimeFormat": "CALENDAR",
}
|
def get_smallest_value_greater_or_equal_to(iter: list or range, value):
"""Returns the smallest element from the list that is greater or equal to the value. Return None if not found
"""
if type(iter) == list:
i = [x for x in iter if x >= value]
return min(i) if i else None
elif type(iter) == range:
if value in range(iter.start, iter.stop): # Value lies within this range, return step-aware value
return value + (iter.step - ((value - iter.start) % iter.step)) % iter.step
elif value < iter.start: # Value is less than range start, return start
return iter.start
else: # Value is greater than range, return None
return None
else:
raise ValueError("iter must be of type list or range")
|
def is_int(s) -> bool:
"""Check if the value is an Integer."""
if s is None:
return False
try:
int(s)
return True
except ValueError:
return False
|
def accepted_formats(header):
"""
Returns a list of accepted formats in HTTP request header
:type header: tornado.web.RequestHandler.request.headers
:param header: HTTP Request Header
"""
try:
header_list = header.split(",")
for i, v in enumerate(header_list):
header_list[i] = v.split(";")[0].strip()
return header_list
except AttributeError: # No Accept: header?
return []
|
def reverse_amount(amount, account):
"""
get counterparty amount
:param amount:
:param account:
:return: {{value: string, currency: *, issuer: *}}
"""
return {
'value': str(-float(amount['value'])),
'currency': amount['currency'],
'issuer': account
}
pass
|
def convert_tuple_to_8_int(tuple_date):
""" Converts a date tuple (Y,M,D) to 8-digit integer date (e.g. 20161231).
"""
return int('{0}{1:02}{2:02}'.format(*tuple_date))
|
def countEdgePixels(pixels_dict, rows, cols):
""" Count the number of edge pixels in the cluster. """
dir_x = [-1, -1, 0, 1, 1, 1, 0, -1]
dir_y = [ 0, 1, 1, 1, 0, -1, -1, -1]
## The number of edge pixels in the cluster.
num_edge_pixels = 0
# Loop over the pixels in the cluster.
for X in pixels_dict.keys():
x = X%256
y = X/256
is_edge_pixel = False
# Loop over the directions.
for direction in range(8):
# The y poxition of the next pixel to scan.
ny = y + dir_y[direction]
# The x position of the next pixel to scan.
nx = x + dir_x[direction]
# The next pixel's X value.
nX = ny * cols + nx
#print "DEBUG: *-----* %1d->(% 1d, % 1d) is (%3d,%3d)->(%10d)" % \
# (direction, self.dir_x[direction], self.dir_y[direction], nx, ny, nxy)
# If the next row or column is on an edge, skip it.
#if ny<0 or ny>=self.rows or nx<0 or nx>=self.cols:
# continue
# If the next X value is not in the pixel keys, we have an edge pixel.
if nX not in pixels_dict.keys():
#print "DEBUG: *-----* Found neighbour in self.pixels!"
#print "DEBUG: * \\--* xy = %d" % (nxy)
#self.pixels[ xy].set_neighbour( direction, nxy)
#self.pixels[nxy].set_neighbour((direction+4)%8, xy)
is_edge_pixel = True
break
if is_edge_pixel:
num_edge_pixels += 1
return num_edge_pixels
|
def split_function_name(fn):
"""
Given a method, return a tuple containing its fully-qualified
class name and the method name.
"""
qualname = fn.__qualname__
if '.' in qualname:
class_name, fn_name = qualname.rsplit('.', 1)
class_name = '%s.%s' % (fn.__module__, class_name)
else:
class_name = fn.__module__
fn_name = qualname
return (class_name, fn_name)
|
def get_progress(context, scope):
""" Returns the number of calls to callbacks registered in the specified `scope`. """
return context.get("progress", {}).get(scope)
|
def append_space(prompt):
"""Adds a space to the end of the given string if none is present."""
if not prompt.endswith(' '):
return prompt + ' '
return prompt
|
def _format_dimensions(dimensions):
"""
Formats a dictionary as a comma-delimited list of key=value tokens.
This was copied from docker-collectd-plugin.
"""
return ",".join(["=".join((key.replace(".", "_"), value)) for key, value in dimensions.items()])
|
def should_check_integrity(f):
"""Returns True if f should be checked for integrity."""
return (
f
not in (
"README.md",
"TRAINING_LOG",
"checksum.md5",
"data",
)
and not f.startswith(".")
)
|
def is_name_excluded(name, exclusions_list):
"""
:param name: The name of the policy, role, user, or group
:param exclusions_list: List of exclusions
:return:
"""
for exclusion in exclusions_list:
# Skip empty items
if exclusion == "":
continue
if exclusion.lower() == name.lower():
print(f"\tExcluded: {exclusion}")
return True
# ThePerfectManDoesntExi*
if exclusion.endswith("*"):
prefix = exclusion[: exclusion.index("*")]
# print(prefix)
if name.lower().startswith(prefix.lower()):
# logger.debug(f"Excluded prefix: {exclusion}")
print(f"\tExcluded prefix: {exclusion}")
return True
if exclusion.startswith("*"):
suffix = exclusion.split("*")[-1]
if name.lower().endswith(suffix.lower()):
# logger.debug(f"Excluded suffix: {exclusion}")
print(f"\tExcluded suffix: {exclusion}")
return True
|
def preprocess_location(location):
"""
get rid of possible spaces around farm location string
"""
locations = location.split(',')
res = ""
for location in locations:
res = res + location.strip() + ","
res = res[:-1]
return res
|
def insertion_sort(arr):
"""
Sorts a list using the insertion sort algorithm
Input: A list of integers
Output: A list containing the same integers as the input, but sorted in ascending order
For each element in the unsorted list, swap with the previous element until it is correctly placed in the sorted list
"""
for i in range(1, len(arr)):
temp = arr[i]
while i > 0 and arr[i-1] > temp:
arr[i] = arr[i - 1]
i = i-1
arr[i] = temp
return arr
|
def _next_break(primitive_boundaries, pos, expects):
"""(internal)
"""
for i in range(pos, len(primitive_boundaries)):
sb = primitive_boundaries[i][1]
if sb in expects:
return sb
return None
|
def _parse_qstat_state(qstat_out, job_id):
"""Parse "state" column from `qstat` output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`qstat` output is empty or job_id is not found.
"""
if qstat_out.strip() == '':
return 'u'
lines = qstat_out.split('\n')
# skip past header
while not lines.pop(0).startswith('---'):
pass
for line in lines:
if line:
job, prior, name, user, state = line.strip().split()[0:5]
if int(job) == int(job_id):
return state
return 'u'
|
def strip_some_punct(s):
"""
Return a string stripped from some leading and trailing punctuations.
"""
if s:
s = s.strip(''','"}{-_:;&''')
s = s.lstrip('.>)]')
s = s.rstrip('<([')
return s
|
def CentralDiff(fx, x, h=0.001):
"""
CentralDiff(@fx, x, h);
Use Central difference to approximatee the derivative of function fx
in points x, and with step length h
The function fx must be defined as a function handle with input
parameter x and the derivative as output parameter
Parameters
----------
fx : function
A function defined as fx(x)
x : float, list, numpy.ndarray
The point(s) of function fx to compute the derivatives
h : float, optional
The step size
Returns
-------
float, list, numpy.ndarray: The numerical derivatives of fx at x with
the same size as x and the type if from fx()
"""
return (fx(x+h) - fx(x-h))/h*0.5
|
def MatchesAnyRegexp(flag, list_of_regexps):
"""Returns true if the first argument matches any regular expression in the
given list."""
for regexp in list_of_regexps:
if regexp.search(flag) != None:
return True
return False
|
def dark_id_to_date(dark_id):
"""
Convert ``dark_id`` (YYYYDOY) to the corresponding DateTime 'date' format.
:param date: dark id (YYYYDOY)
:returns: str in DateTime 'date' format
"""
return '{}:{}'.format(dark_id[:4], dark_id[4:])
|
def z_score(val, v_mean, v_std):
"""Calculate a z-score given a value, mean, and standard deviation.
NOTE: The z_score() of a constant vector is 0
"""
score = 0 if v_std == 0 else (val - v_mean) / v_std
return score
|
def lookupToString(lookup_dict):
"""
Convert the lookup dict into a string.
e.g.:
{"field1": "a", "field2": "b"} -> "field1=a,field2=b"
"""
return ",".join("%s=%s" % (k, v) for k, v in lookup_dict.items())
|
def verts_equal(v0, v1, epsilon=1e-8):
"""
Test if two given vertices are equal within a certain epsilon.
WARNING: This is slower than ==, but it allows for a tolerance level of
equality.
"""
assert epsilon >= 0.0
if len(v0) != len(v1):
return False
for a, b in zip(v0, v1):
if (abs(a - b) > epsilon):
return False
return True
|
def ris_DependentField_get(risTag, dependent_field_map):
"""
params:
dependent_field_map, [{a:
{b:[], c:d, f:[]
}
}, {}, ...]
risTag, str.
return: ris_text_line_dict, {}
"""
#
dependent_field_dict = {}
if risTag in dependent_field_map.keys():
dependent_field_dict[risTag] = dependent_field_map[risTag]
#
#
return dependent_field_dict
|
def ensure_path(path):
""" Check if a path exists. If not, create the necessary directories,
but if the path includes a file, don't create the file"""
import os, errno
dir_path = os.path.dirname(path)
if len(dir_path) > 0: # Only run makedirs if there is a directory to create!
try:
os.makedirs(dir_path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
return path
|
def summerE(n: int) -> int:
"""
Uses the arithmetic series formula.
Avoids generating the multiples, thanks to math.
"""
def sum_multiples(divisor: int, terms: int) -> int:
return divisor * (terms * (terms + 1)) // 2
fizzsum = sum_multiples( 3, (n-1) // 3)
buzzsum = sum_multiples( 5, (n-1) // 5)
intersectsum = sum_multiples(15, (n-1) // 15)
return fizzsum + buzzsum - intersectsum
|
def intToDigit(s):
"""
intToDigit :: int -> str
Convert an Int in the range 0..15 to the corresponding single digit Char.
This function fails on other inputs, and generates lower-case hexadecimal
digits.
"""
if s > 15 or s < 0:
raise ValueError("not a digit %s" % s)
return str(s) if s < 10 else "abcdef"[s-10]
|
def maybe_hparams_to_dict(hparams):
"""If :attr:`hparams` is an instance of :class:`~texar.HParams`,
converts it to a `dict` and returns. If :attr:`hparams` is a `dict`,
returns as is.
"""
if hparams is None:
return None
if isinstance(hparams, dict):
return hparams
return hparams.todict()
|
def label_to_range(label):
"""
Convert label to range
Parameters:
-----------
label: list of integers
must be in the form of [1, 1, ..., 1, 2, 2, ..., 2, ..., C, C, ..., C]
i.e. nondecreasing numbers starting from 1, each element is greater
than the previous element by at most 1
Returns:
--------
a list of intergers with C + 1 elements, start with 0
the i-th element is number of elements in label that equals to i
"""
res = [0]
assert label[0] == 1, 'label must start with 1'
for i in range(1, len(label)):
if label[i] == label[i-1]:
continue
if label[i] == label[i-1] + 1:
res.append(i)
else:
assert False,\
('label[{}] and label[{}] must be equal or two consecutive '
'integers, got {} and {}').format(
i-1, i, label[i-1], label[i]
)
res.append(len(label))
return res
|
def num_time_to_str_time(time, dp=2):
"""Convert a number of seconds to a string time (e.g. 1:36:50)."""
str_time = ''
str_hour = ''
str_mins = ''
str_secs = ''
# Get number of whole hours
hours = int(time // 3600)
# Add to string time
if hours > 0:
str_time += str(hours)
str_hour = str(hours)
# Remove hours from time, for minutes calculation
time -= hours * 3600
# Get number of whole minutes
mins = int(time // 60)
# Add to string time
if (mins > 0) or (len(str_time) > 0):
if len(str_time) > 0:
str_time += ':'
if mins < 10:
str_time += '0'
str_mins += '0'
str_time += str(mins)
str_mins += str(mins)
# Remove minutes from time, for seconds calculation
time -= mins * 60
# Deal with 60 edge case
if str_mins == '60':
str_hour = str(int(str_hour) + 1)
str_mins = '00'
# Get number of seconds to 2 dp (or input dp)
secs = round(time, dp)
# Add to string time
if (secs > 0) or (len(str_time) > 0):
if len(str_time) > 0:
str_time += ':'
if secs < 10:
str_time += '0'
str_secs += '0'
str_time += str(secs) if str(secs)[-2:] != '.0' else str(secs)[:-2]
str_secs += str(secs) if str(secs)[-2:] != '.0' else str(secs)[:-2]
# Deal with 60 edge case
if str_secs == '60':
str_mins = ('0' if (mins < 9) and (hours > 0) else '') + str(mins+1)
str_secs = '00'
# Return string time
return str_hour + (':' if hours>1 else '') + str_mins + (':' if mins>1 else '') + str_secs
|
def sec2time(seconds):
"""
Converts seconds to time format
:param float|int seconds:
:return str: 'h:m:s"
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
|
def filter_by_threshold(line):
"""
Just a filter
"""
temp = float(str.split(line, ',')[2])
return temp > 50
|
def is_palindrome(num):
""" Returns true if a number is a palindrome """
reverse, forward = 0, num
while (num > 0):
reverse = reverse*10 + num%10
num /= 10
return forward == reverse
|
def get_value(obj, key, reference, default=None):
"""
Return the actual value described by obj[key].
If obj[key] is not an integer, return it directly.
Otherwise, consider obj[key] as an index within the reference list.
If obj[key] is unreachable, return default.
If default is None, raise the encoutered exception (either IndexError or KeyError).
"""
try:
val = obj[key]
return reference[val] if isinstance(val, int) else val
except KeyError:
if default is None:
raise
return default
except IndexError:
if default is None:
raise
return default
|
def get_average(metrics):
"""
Gets average if there are values, otherwise 0
"""
avg = 0
valid_count = 0
for class_name in metrics.keys():
if metrics[class_name] is not None:
avg += metrics[class_name]
valid_count += 1
if valid_count > 0:
return avg / valid_count
else:
return None
|
def normalized_in_degree(in_degree_dict):
""" dict -> dict
Takes a dictionary of nodes with their indegree value, and returns a
normalized dictionary whose values sum up to 1.
"""
normalized = {}
for key in in_degree_dict:
normalized[key] = float(in_degree_dict[key]) / len(in_degree_dict)
return normalized
|
def Julian_Epoch_to_MJD(jepoch):
"""
Julian_Epoch_to_MJD(jepoch):
Convert Julian Epoch to Modified Julian Date (MJD)
"""
return 51544.5 + (jepoch-2000.0)*365.25
|
def ensure_all_group(inventory):
"""Ensure that inventory has group "all" with both "hosts" and "children"."""
if "all" not in inventory:
inventory["all"] = {
"children": {},
"hosts": {},
}
all_group = inventory["all"]
if "children" not in all_group:
all_group["children"] = {}
if "hosts" not in all_group:
all_group["hosts"] = {}
return all_group
|
def tet_clean(s):
""" Checks that a passed string contains only unambiguous IUPAC nucleotide
symbols. We are assuming that a low frequency of IUPAC ambiguity
symbols doesn't affect our calculation.
"""
if not len(set(s) - set('ACGT')):
return True
return False
|
def convert_period_into_seconds(period_str):
"""converts a given period of time (in string format) into seconds
Args:
period_str (string): period of time represented by a number followed by either
of the letters 'm', 'h' or 'd', corresponding to minutes, hour and day, respectively.
Returns:
int: period of time in seconds
Raises:
ValueError: raised if the period is represented in minutes and it's not a multiple of 5.
"""
number = int(''.join(filter(str.isdigit, period_str)))
letter = ''.join(filter(str.isalpha, period_str))
if (letter is 'm') and (number % 5 is not 0):
raise ValueError('If you are using "minutes", the period must be multiple of 5.')
if letter is 'm':
return number * 60
if letter is 'h':
return number * 60 * 60
# if period is given in days (d)
return number * 60 * 60 * 24
|
def get_machines_by_vnet_interface_name(config, ifname):
"""
Returns a list of machine that use a particular VNet interface
:param dict config: The config generated by get_config()
:param str ifname: The interface to check for
:return: list of VNet machines using that interface
"""
machines = []
for m_name, m_data in config["machines"].items():
for int_data in m_data["interfaces"].values():
if int(int_data["bridge"]) == int(ifname[-1]):
machines.append(m_name)
return machines
|
def searchReserved(listofips):
"""Search for iplist and return only DHCP_RESERVED addresses
"""
searchlist=[]
for address in listofips:
if address["name"] == None:
address["name"] = ""
ipitem = [address['id'],address['type'],address['name']]
addressprops=""
for aprop in address['properties'].split("|"):
proplist = aprop.split("=")
if len(proplist) > 1:
ipitem.append(proplist[1])
del ipitem[5]
if "DHCP_RESERVED" in ipitem:
searchlist.append(ipitem)
return searchlist
|
def create_tasks(job_id, years, finished_year):
"""
Creates tasks to be run against the NasApi
It creates tasks for each year in the input years."
"""
next_year = finished_year
for year in years.split(','):
if not next_year:
next_year = year
break
if year == next_year:
next_year = None
tasks = []
if next_year:
tasks.append({'job_id': job_id, 'years': years, 'year': next_year, 'calc_type': 'mean', 'quantile': ''})
tasks.append({'job_id': job_id, 'years': years, 'year': next_year, 'calc_type': 'quantile', 'quantile': '90'})
tasks.append({'job_id': job_id, 'years': years, 'year': next_year, 'calc_type': 'quantile', 'quantile': '10'})
return tasks
|
def filter_fn(x):
"""Filter bad samples."""
if x["label"] == -1:
return False
if "premise" in x:
if x["premise"] is None or x["premise"] == "":
return False
if "hypothesis" in x:
if x["hypothesis"] is None or x["hypothesis"] == "":
return False
return True
|
def strip_unnamed(string: str) -> str:
"""When fusing headers coming from excel, the lines with NA values are
named as `Unnamed:...`. This function filters them out.
"""
if string.strip().startswith("Unnamed:"):
return ""
else:
return string.strip()
|
def same_snp_sites(h1, h2):
"""
Find SNP sites with same alleles.
"""
same = {}
for snp_id in h1:
if snp_id in h2 and h2[snp_id][0] == h1[snp_id][0]:
same[snp_id] = (h1[snp_id][1], h2[snp_id][1], h1[snp_id])
if len(h1) == 0:
same_p = 0
else:
same_p = len(same) / len(h1)
if len(h2) == 0:
same_p2 = 0
else:
same_p2 = len(same) / len(h2)
return same
|
def parse_image_id(image_ref):
"""Return the image id from a given image ref."""
return image_ref.rsplit('/')[-1]
|
def timeslips_decoder(x: list, t: int) -> float:
"""
The inverse function of to_timeslips. Decodes a list of ints (timeslips)
into a single integer representing milliseconds of time.
"""
return sum(x) * t
|
def tap(fun, value):
"""
A function that takes a function and a value, applies the function
to the value and returns the value.
Complexity: O(k) where k is the complexity of the given function
params:
fun: the function
value: the value
returns: the value
"""
fun(value)
return value
|
def zeros(number: int) -> int:
"""
We get a zero per factor of 5
"""
num_zeros = 0
while number:
num_zeros += number // 5
number //= 5
return num_zeros
|
def get_int(row, key, ignore_start=0, ignore_end=None):
""" row is the csv row to get an integer from,
key is that row's column that we want to cast as int,
start/end is the number of leading/trailing characters to ignore.
return 0 if could not convert
"""
try:
s = row[key][ignore_start:(-ignore_end if ignore_end else None)]
val = int(s.replace(',', '')) # replace 1,000 by 1000
except ValueError: # raised by int('') or int('---')
val = 0
return val
|
def find_missing(source, target):
"""Return items in source list but not in target list.
Find items that are in the source list but not in the target list. Source
and target should each be a list with a number of items.
Args:
source (list): Students in the sd_df data.
target (list): Students in the lsd_tags_s data.
Returns:
missing (list): Students that are missing from target list.
"""
missing = []
for item in source:
if item not in target:
missing.append(item)
return missing
|
def task4(m: int, n: int) -> str:
"""
Functions which accepts a two numbers as int and generates a string with their concatination (e.g. 3,4 -> 34)
Input: two integers
Output: string
"""
m_str = str(m)
n_str = str(n)
mn_str = m_str + n_str
return mn_str
|
def update_context_google_tag_manager(context, page_name='NOPAGE_NAME', page_id='NOPAGE_ID', plugin_id='NOPLUGIN_ID',
name='NONAME'):
"""
form_name = '{}{}_{}_{}'.format(page_name, page_id, plugin_id, name)
If we have a form we will compile a id like this id="Kontakt88_plugin7451_SupportView"
If its a Button Link we will try to compile it like this id="Kontakt88_plugin7451_Support-Formular"
If the Button Link Plugin is inside a static placeholder we will use the placeholder.slot and id instead of
page infos
"""
form_name = '{}{}_plugin{}_{}'.format(page_name, page_id, plugin_id, name)
form_name = form_name.replace(' ', '-')
context.update({'form_name': form_name})
return context
|
def cash_coins(cents):
"""
cointCounter is Variable to hold the number of coin
will be used as counter initialize to zero for now """
cointCounter = 0
# iterate over cents to count quarters change
while cents >= 25:
cents -= 25
cointCounter += 1 # increment cointCounter by one
# iterate over cents to count dimes change
while cents >= 10:
cents -= 10
cointCounter += 1 # increment cointCounter by one
# iterate over cents to count nickels change
while cents >= 5:
cents -= 5
cointCounter += 1 # increment cointCounter by one
# iterate over cents to count pennies change
while cents >= 1:
cents -= 1
cointCounter += 1 # increment cointCounter by one
# return number of coins
return cointCounter
|
def get_follower_ratio_min(selectpicker_id: str) -> int:
"""
:param selectpicker_id:
:return:
"""
min_values = {
"1": 0,
"2": 1,
"3": 2,
"4": 3,
"5": 4,
"6": 5,
"7": 6,
"8": 7,
"9": 8,
"10": 9,
"11": 10,
"12": 15,
"13": 20,
"14": 30,
"15": 40,
"16": 50
}
return min_values.get(selectpicker_id, 0)
|
def convert_indices_to_vertices(index_list, mesh):
"""
Convert given flattened index list to vertices list
:param index_list: list<str>, list of flattened index to convert
:param mesh: str: mesh vertices belong to
:return: list<str>
"""
vertices = list()
for i in list(index_list):
vertex = '{0}.vtx[{1}]'.format(mesh, i)
vertices.append(vertex)
return vertices
|
def sfloat(x, num_chars=10):
"""Stringify a float to have exactly some number of characters"""
x = float(x)
num_chars = int(num_chars)
start, end = str(x).split('.')
start_chars = len(str(float(start)))
if start_chars > num_chars:
raise Exception('Try num_chars = {}'.format(start_chars))
return '{}' \
.format('{:' + str(num_chars) + '.' +
str(num_chars - start_chars + 1) + 'f}') \
.format(x)
|
def bool_or_none(b):
"""Return bool(b), but preserve None."""
if b is None:
return None
else:
return bool(b)
|
def convert_to_csv(records):
"""Convert records in list type to csv string.
Args:
records: A list of power data records each of which contains timestamp, power value,
and channel. e.g. [[12345678, 80, "SYSTEM"],[23456789, 60, "SENSOR"]].
Returns:
A string that contains all CSV records, None if the given list if empty.
"""
if not records:
return ''
csv_lines = list()
for record in records:
string_record = [str(element) for element in record]
csv_line = ','.join(string_record)
csv_lines.append(csv_line)
return '\n'.join(csv_lines)
|
def does_not_exist_msg(title):
"""
Make the message when something does not exist.
Parameters
----------
title : str
Title to set to the message.
Returns
-------
str
Message with a title.
"""
return '%s matching query does not exist.' % title
|
def longest_common_token_prefix(strings, delimiter='_'):
"""
Find the longest common token-wise prefix of a list of 2 or more strings.
Args:
strings (collection): at least 2 strings.
delimiter (character): the character to split on.
Returns:
string: The longest string that all submitted strings start with.
>>> longest_common_token_prefix(["xr_abc_123", "xr_abc_567"])
'xr_abc_'
"1" is in the per-character longest common prefix, but 123 != 135,
so it's not in the per-token prefix.
>>> longest_common_token_prefix(["xr_abc_123", "xr_abc_135"])
'xr_abc_'
Here, the prefix is actually the entirety of one string, so no trailing delimiter.
>>> longest_common_token_prefix(["xr_abc_123", "xr_abc"])
'xr_abc'
No common prefix here, because it's per-token:
>>> longest_common_token_prefix(["abc_123", "ab_123"])
''
"""
assert(len(strings) > 1)
a = min(strings).split(delimiter)
b = max(strings).split(delimiter)
prefix_tokens = []
for a_token, b_token in zip(a, b):
if a_token == b_token:
prefix_tokens.append(a_token)
else:
break
if prefix_tokens:
prefix = delimiter.join(prefix_tokens)
if len(prefix_tokens) < min(len(a), len(b)):
# This is truly a prefix, not just one of the strings.
prefix += delimiter
return prefix
return ''
|
def _different_coordinates(row_i, row_j):
"""
Return True if the coordinates values are different.
Genomic coordinates values are only compared when end and start phases
are not -1. Phase -1 can be present because of UTR regions, which could
have different genomic coordinates, but still have the same protein
sequence.
"""
if row_i['StartPhase'] != -1 and row_j['StartPhase'] != -1:
if row_i['GenomicCodingStart'] != row_j['GenomicCodingStart']:
return True
if row_i['EndPhase'] != -1 and row_j['EndPhase'] != -1:
if row_i['GenomicCodingEnd'] != row_j['GenomicCodingEnd']:
return True
return False
|
def get_function_input(function):
"""
Return a with the single input that is documented using
api.decoratos.input
"""
parameters = []
if hasattr(function, 'doc_input'):
(description, return_type, required) = function.doc_input
parameters.append({
'name': 'body',
'description': description,
'type': return_type,
'paramType': 'body',
'required': bool(required)
})
return parameters
|
def _parse_connection(connection):
"""
Parse a string of the form "{id1} {id2} {ts}" for
example: "1 2 12345678"
"""
return tuple(int(field) for field in connection.split(' '))
|
def prefix_average_3(array: list) -> list:
"""
BIG-O Notation: O(n)
:param array: list of integers
:return: list of averages
"""
n = len(array)
A = [0] * n
total = 0
for j in range(n):
total += array[j]
A[j] = total / (j + 1)
return A
|
def difference(first, other, epsilon=0.005):
"""
Calculate the difference between two dictionaries. The difference is
defined as all the values for the keys that are in both ``dict`` and
``other`` which differ. For floating point numbers, the value ``epsilon``
is used to define difference, so two floating point values are seen as
different if their difference is larger than ``epsilon``.
:param dict first: The first dictionary to calculate the difference for.
:param dict other: The other dictionary to calculate the difference for.
:param float epsilon: The smallest difference between two floating point \
numbers that we differentiate between
:return: A dictionary where each key is a value where ``dict`` and ``other`` \
differ and each value is the value from ``other``.
:rtype: dict
"""
ret = {}
dict_keys = set(first.keys())
other_keys = set(other.keys())
for key in dict_keys.intersection(other_keys):
try:
if abs(first[key] - other[key]) >= epsilon:
ret[key] = other[key]
except TypeError:
if first[key] != other[key]:
ret[key] = other[key]
return ret
|
def metrics_dict(data_list):
"""Organize calculated ECG data into a dictionary
The ECG test characteristics of concern are extracted from the metrics
list and organized into a dictionary to make the data more readable
and easier to navigate.
:param data_list: list of ECG metrics
:returns: dictionary of metrics containing the keywords: 'duration',
'voltage_extremes', 'num_beats', 'mean_hr_bpm', and 'beats'
"""
metrics_dict = {'duration': data_list[0],
'voltage_extremes': data_list[1],
'num_beats': data_list[2],
'mean_hr_bpm': data_list[3],
'beats': data_list[4]
}
return metrics_dict
|
def is_zypper_error(code):
"""
Returns true if the given code belongs to an error code according
to zypper.
"""
return code != 0 and code < 100
|
def fib(n):
"""
finds the nth fibonacci number
"""
if n <= 0:
return 0
elif n==1:
return 1
else:
return fib(n-2) + fib(n-1)
|
def calculate_MOA_or_DCC(variables, classes, noc):
"""Function to calculate MOA and DCC metric
Args:
variables (list): [description]
classes (list): [description]
noc (int): [description]
Returns:
result: [description]
"""
res = 0
for i in variables:
if i in classes:
res += 1
return res / noc
|
def attr_in_object(object):
"""
Return the attibutes in an object in a list.
:param object: object
"""
members = [attr for attr in dir(object) if
not callable(getattr(object, attr)) and not
attr.startswith("__")]
return members
|
def _get_id(obj, key):
"""
Get the ID of an object by extracting it from the resource_uri field.
"""
resource_uri = obj.pop(key, '')
if resource_uri:
# i.e. /foo/1/ -> /foo/1 -> ('/foo', '/', '1') -> '1'
return resource_uri.rstrip('/').rpartition('/')[-1]
|
def get_solubility(molecular_weight, density):
"""
Estimate the solubility of each oil pseudo-component
Estimate the solubility (mol/L) of each oil pseudo-component using the
method from Huibers and Lehr given in the huibers_lehr.py module of
py_gnome in the directory gnome/utilities/weathering/. This method is from
Huibers & Katrisky in a 2012 EPA report and was further modified by Lehr
to better match measured values. The equation used here is adapted to
return results in mol/L.
Parameters
----------
molecular_weight : np.array
Molecular weights of each pseudo-component as recorded in the NOAA
Oil Library (g/mol)
density : np.array
Density of each pseudo-component as recorded in the NOAA Oil Library
(kg/m^3)
Returns
-------
solubility : np.array
Array of solubilities (mol/L) for each pseudo-component of the oil.
"""
return 46.4 * 10. ** (-36.7 * molecular_weight / density)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.