content
stringlengths 42
6.51k
|
---|
def is_valid_int_greater_zero_param(param, required=True):
"""Checks if the parameter is a valid integer value and greater than zero.
@param param: Value to be validated.
@return True if the parameter has a valid integer value, or False otherwise.
"""
if param is None and not required:
return True
elif param is None:
return False
try:
param = int(param)
if param <= 0:
return False
except (TypeError, ValueError):
return False
return True
|
def _pitch2m(res):
"""Convert pitch string to meters.
Something like -600- is assumed to mean "six-hundreths of an inch".
>>> _pitch2m("-600-")
4.233333333333333e-05
>>> _pitch2m("-1200-")
2.1166666666666665e-05
"""
res = int(res[1:-1])
return 0.0254 / res
|
def vector_in_01(x):
"""Returns True if all elements are in [0, 1]."""
for element in x:
if element < 0.0 or element > 1.0:
return False
return True
|
def handle_mask(mask, tree):
"""Expand the mask to match the tree structure.
:param mask: boolean mask
:param tree: tree structure
:return: boolean mask
"""
if isinstance(mask, bool):
return [mask] * len(tree)
return mask
|
def powerlaw_u_alpha(x, p, z_ref=100):
""" p = (alpha, u_ref) """
return p[1] * (x / z_ref) ** p[0]
|
def distance_pronoun_antecedent (pair) :
"""Distance is 0 if the pronoun and antecedent are in the same sentence"""
return pair[3]-pair[2]
|
def time_format(seconds):
"""
Convert seconds into DATE HOUR MIN SEC format.
"""
if seconds is not None:
seconds = int(seconds)
d = seconds // (3600 * 24)
h = seconds // 3600 % 24
m = seconds % 3600 // 60
s = seconds % 3600 % 60
if d > 0:
return '{:02d}D {:02d}H {:02d}m {:02d}s'.format(d, h, m, s)
elif h > 0:
return '{:02d}H {:02d}m {:02d}s'.format(h, m, s)
elif m > 0:
return '{:02d}m {:02d}s'.format(m, s)
elif s > 0:
return '{:02d}s'.format(s)
return '-'
|
def comma_filter(value):
"""
Format a number with commas.
"""
return '{:,}'.format(value)
|
def interval_intersect(a, b, c, d):
"""Returns whether the intervals [a,b] and [c,d] intersect."""
return (c <= b) and (a <= d)
|
def compare_ltiv_data(expected, actual):
"""
Helper to test the LENGTH|TYPE|ID|VALUE data. It is packed in a dictionary like
{ID: (VALUE, TYPE)
"""
for k, val in expected.items():
actual_v = actual.pop(k)
if not (actual_v[0] == val[0] and actual_v[1] == val[1]):
return False
return actual == {}
|
def get_name(header, splitchar="_", items=2):
"""use own function vs. import from match_contigs_to_probes - we don't want lowercase"""
if splitchar:
return "_".join(header.split(splitchar)[:items]).lstrip(">")
else:
return header.lstrip(">")
|
def Int2AP(num):
"""string = Int2AP(num)
Return 'num' converted to bytes using characters from the set 'A'..'P'
"""
val = b''; AP = b'ABCDEFGHIJKLMNOP'
num = int(abs(num))
while num:
num, mod = divmod(num, 16)
val = AP[mod:mod+1] + val
return val
|
def lca_main(root, v1, v2):
"""
I'm thinking of a method inloving recursion.
Both v1 and v2 WILL indeed exist, so it's a matter of seeing if they are on
the left or right. If it's THIS node, somehow report that correctly maybe.
Very proud of this one! Thought it up completely and it passed all tests.
January 18, 2022
"""
if root is None:
return None
if root.info == v1 or root.info == v2:
return root
v1_location = lca_main(root.left, v1, v2)
v2_location = lca_main(root.right, v1, v2)
if v1_location and v2_location:
return root.info
elif v1_location and not v2_location:
return v1_location
elif not v1_location and v2_location:
return v2_location
|
def _job_id_from_worker_name(name):
""" utility to parse the job ID from the worker name
template: 'prefix--jobid--suffix'
"""
_, job_id, _ = name.split("--")
return job_id
|
def summarize_mutation(mutation_name, event, inputs, outputs, isAsync=False):
"""
This function provides a standard representation of mutations to be
used when services announce themselves
"""
return dict(
name=mutation_name,
event=event,
isAsync=isAsync,
inputs=inputs,
outputs=outputs,
)
|
def define_token(token):
"""Return the mandatory definition for a token to be used within nstl
where nstl expects a valid token.
For example, in order to use an identifier as a method name in a
nstl type, the definition generated by this function must be written
for that specific identifier.
"""
if not isinstance(token, str):
raise TypeError("{} is not of type {}".format(token, str))
return "#define NSTL_TOKEN_{} ({})".format(token, " ".join(token))
|
def cleanup_watch(watch):
"""Given a dictionary of a watch, return a new dictionary for output as
JSON.
This is not the standard cleanup function, because we know who the author
is."""
return str(watch["watched"])
|
def rgb_to_hex(color):
"""Convert an rgb color to hex."""
return "#%02x%02x%02x%02x" % (*color,)
|
def r1_p_r2(R1, R2):
"""
Calculate the Resistance of a parallel connection
"""
return R1 * R2 / (R1 + R2)
|
def _to_yaml_defs(wrapped, instance, args, kwargs):
"""
New in v17
public decorator for yaml generator
"""
return wrapped(*args, **kwargs)
|
def num_configs(n: int) -> int:
"""We are given a 2 x N board. This problem is mostly about figuring out
the possible cases for laying out the shapes. Here are the possible cases:
* Place a domino vertically
* Place two dominos horizontal on top of each other
* two trominos interlocking
We can solve this problem by handling state transitions as we iterate from
0 to `n`. We just record the state of the last column, noting which bits
were filled.
The last column can have several states, which can lead to the following
situations:
* no rows filled (0, 0b11)
* nothing
* both rows filled + vertical domino
* top row filled (1, 0b01)
* 0 rows filled and a tromino
* bottom row filled and a horizontal domino
* bottom row filled (2, 0b10)
* 0 rows filled and a tromino
* top row filled and a horizontal domino
* both rows filled (3, 0b11)
* two trominos
* 0 rows filled and two dominos
param n: The length of the 2 x n board
returns: The number of possible configurations for the triominos
"""
# The base case
if n <= 0:
return 0
# There's only one way to get the the starting state, which is 0 rows
# filled. We record the number of ways to get to each scenario, defined
# out in the comments, in this array.
last_state = [1, 0, 0, 0]
for _ in range(n):
next_state = [0, 0, 0, 0]
next_state[0b00] = last_state[0b00] + last_state[0b11]
next_state[0b01] = last_state[0b00] + last_state[0b10]
next_state[0b10] = last_state[0b00] + last_state[0b01]
next_state[0b11] = (
last_state[0b00] + last_state[0b01] + last_state[0b10]
)
last_state = next_state
return last_state[0]
|
def _FixInt(value: int) -> int:
"""Fix negative integer values returned by TSK."""
if value < 0:
value &= 0xFFFFFFFF
return value
|
def xml_get_attrs(xml_element, attrs):
"""Returns the list of necessary attributes
Parameters:
element: xml element
attrs: tuple of attributes
Returns:
a dictionary of elements
"""
result = {}
for attr in attrs:
result[attr] = xml_element.getAttribute(attr)
return result
|
def normalize_from_minmax(v, min_v, max_v):
"""Normalize a value given their max and minimum values of the lists
:param v: value to normalize
:param max_v: maximum value of the list
:param min_v: minimum value fo the list
:return:
"""
# todo why ((10 - 1) + 1)?
if min_v == 0 and max_v == 0:
return 0
return (v - min_v) / float(max_v - min_v)
|
def int_to_roman(num):
"""Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.
roman_integer_value = {"I":1, "V":5, "X":10, "L":50, "C":100, "D":500, "M":1000}
For example, 2 is written as II in Roman numeral, just two one's added together. 12 is written as XII,
which is simply X + II. The number 27 is written as XXVII, which is XX + V + II.
Roman numerals are usually written largest to smallest from left to right.
However, the numeral for four is not IIII. Instead, the number four is written as IV.
Because the one is before the five we subtract it making four. The same principle applies to the number nine,
which is written as IX. There are six instances where subtraction is used:
I can be placed before V (5) and X (10) to make 4 and 9.
X can be placed before L (50) and C (100) to make 40 and 90.
C can be placed before D (500) and M (1000) to make 400 and 900.
Given an integer, convert it to a roman numeral.
:type num: int
:rtype: str
"""
if 0 >= num >= 4000:
return
roman = ""
while num > 0:
if num >= 1000:
roman += "M"
num -= 1000
elif num >= 900:
roman += "CM"
num -= 900
elif num >= 500:
roman += "D"
num -= 500
elif num >= 400:
roman += "CD"
num -= 400
elif num >= 100:
roman += "C"
num -= 100
elif num >= 90:
roman += "XC"
num -= 90
elif num >= 50:
roman += "L"
num -= 50
elif num >= 40:
roman += "XL"
num -= 40
elif num >= 10:
roman += "X"
num -= 10
elif num >= 9:
roman += "IX"
num -= 9
elif num >= 5:
roman += "V"
num -= 5
elif num >= 4:
roman += "IV"
num -= 4
elif num >= 1:
roman += "I"
num -= 1
print(roman)
return roman
|
def is_valid_option(parser, arg, checklist):
"""
Check if a string arg is a valid option according to a checklist
Parameters
----------
parser : argparse object
arg : str
checklist
Returns
-------
arg
"""
if not arg in checklist:
parser.error("'{}' parametised mission type does not exist. Valid options are {}".format(arg, checklist))
else:
return arg
|
def handle_input(arguments):
"""
Validates command-line arguments, and returns setting dictionary.
Manages defaults: config file if present, otherwise internal value, unless overridden from command-line.
:param args:
:return: settings dictionary
"""
settings = {}
if arguments['--all'] == True:
settings['mode'] = 'all'
settings['type'] = arguments['--type']
settings['name'] = arguments['--name']
settings['ids-file'] = arguments['--ids-file']
settings['results-file'] = arguments['--results-file']
#Upload and download URLs are not specified, but rather are determined when Job is created.
if arguments['--create'] == True:
settings['mode'] = 'create'
settings['type'] = arguments['--type']
settings['name'] = arguments['--name']
if arguments['--list'] == True:
settings['mode'] = 'list'
if arguments['--name'] != None:
settings['name'] = arguments['--name']
if arguments['--id'] != None:
settings['id'] = arguments['--id']
if arguments['--type'] != None:
settings['type'] = arguments['--type']
if arguments['--status'] != None:
settings['status'] = arguments['--status']
if arguments['--upload'] == True:
settings['mode'] = 'upload'
settings['type'] = arguments['--type']
settings['ids-file'] = arguments['--ids-file']
if arguments['--id'] != None:
settings['id'] = arguments['--id']
elif arguments['--name'] != None:
settings['name'] = arguments['--name']
if arguments['--download'] == True:
settings['mode'] = 'download'
settings['type'] = arguments['--type']
settings['results-file'] = arguments['--results-file']
if arguments['--id'] != None:
settings['id'] = arguments['--id']
elif arguments['--name'] != None:
settings['name'] = arguments['--name']
return settings
|
def to_int_or_zero(value):
"""
Converts given value to an integer or returns 0 if it fails.
:param value: Arbitrary data type.
:rtype: int
.. rubric:: Example
>>> to_int_or_zero("12")
12
>>> to_int_or_zero("x")
0
"""
try:
return int(value)
except ValueError:
return 0
|
def find_idx_nonsol(list_kappa_idx, solution_idx):
"""
Find the scalar products for nonsolutions.
Parameters:
list_kappa_idx -- list
solution_idx -- list of Decimal
Return:
nonsol_idx -- list
"""
nonsol_idx = [sublist for sublist in list_kappa_idx if (sublist[0] != solution_idx[0]) and (sublist[1] != solution_idx[1]) and (sublist[2] != solution_idx[2]) and (sublist[3] != solution_idx[3]) and (sublist[4] != solution_idx[4]) and (sublist[4] != solution_idx[4])]
return nonsol_idx
|
def interrogate_age_string(age_string):
"""
Take a string referring to an age group and find it's upper and lower limits.
Args:
age_string: The string to be analysed
Returns:
limits: List of the lower and upper limits
dict_limits: Dictionary of the lower and upper limits
"""
# check the age string starts with the string to indicate age
assert age_string[:4] == '_age', 'Age string does not begin with "_age".'
# extract the part of the string that actually refers to the ages
ages = age_string[4:]
# find the lower age limit
lower_age_limit = ''
for i, letter in enumerate(ages):
if letter.isdigit():
lower_age_limit += letter
else:
break
remaining_string = ages[len(lower_age_limit):]
if lower_age_limit != '':
lower_age_limit = float(int(lower_age_limit))
# find the upper age limit
if remaining_string == 'up':
upper_age_limit = float('inf')
elif remaining_string[:2] == 'to':
upper_age_limit = float(remaining_string[2:])
else:
raise NameError('Age string incorrectly specified')
limits = [lower_age_limit, upper_age_limit]
dict_limits = {age_string: limits}
return limits, dict_limits
|
def get_component_key(out_chan: str, in_chan: str) -> str:
"""
Get key for out channel and in channel combination in the solution
Parameters
----------
out_chan : str
The output channel
in_chan : str
The input channel
Returns
-------
str
The component key
Examples
--------
>>> from resistics.regression import get_component_key
>>> get_component_key("Ex", "Hy")
'ExHy'
"""
return f"{out_chan}{in_chan}"
|
def _MakeSplitDimension(value, enabled):
"""Return dict modelling a BundleConfig splitDimension entry."""
return {'value': value, 'negate': not enabled}
|
def _x2n(x):
"""
converts base 26 number-char into decimal
:param x:
:return:
"""
numerals = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b=26
n=0
for i,l in enumerate(reversed(x)):
n+=(numerals.index(l)+1)*b**(i)
return n
|
def _resolve_nodata(src, band, fallback=None, override=None):
"""Figure out what value to use for nodata given a band and fallback/override
settings
:param src: Rasterio file
"""
if override is not None:
return override
band0 = band if isinstance(band, int) else band[0]
nodata = src.nodatavals[band0 - 1]
if nodata is None:
return fallback
return nodata
|
def vari(blue, green, red):
"""
# VARI: Visible Atmospherically Resistant Index
# VARI is the Visible Atmospherically Resistant Index, it was
# designed to introduce an atmospheric self-correction
# Gitelson A.A., Kaufman Y.J., Stark R., Rundquist D., 2002.
# Novel algorithms for estimation of vegetation fraction
# Remote Sensing of Environment (80), pp76-87.
"""
return((green - red) / (green + red - blue))
|
def is_os_tool(path_to_exe):
"""test if path_to_exe was installed as part of the OS."""
return path_to_exe.startswith('/usr/bin')
|
def YEAR(expression):
"""
Returns the year portion of a date.
See https://docs.mongodb.com/manual/reference/operator/aggregation/year/
for more details
:param expression: expression or variable of a Date, a Timestamp, or an ObjectID
:return: Aggregation operator
"""
return {'$year': expression}
|
def contains_static_content(content: str) -> bool:
"""check if html contains links to local images"""
return "<img alt=" in content
|
def select_user(user):
"""Selects a specific mysql user and returns true if it exists."""
return "SELECT user FROM mysql.user WHERE user = '" + user + "'"
|
def mclag_session_timeout_valid(session_tmout):
"""Check if the MCLAG session timeout in valid range (between 3 and 3600)
"""
if session_tmout < 3 or session_tmout > 3600:
return False, "Session timeout %s not in valid range[3-3600]" % session_tmout
return True, ""
|
def CSourceForElfSymbolListMacro(variable_prefix, names, name_offsets,
base_address=0x10000, symbol_size=16,
spacing_size=16):
"""Generate C source definition for a macro listing ELF symbols.
Args:
macro_suffix: Macro name suffix.
names: List of symbol names.
name_offsets: List of symbol offsets.
base_address: Base starting address for symbols,
symbol_size: Symbol size in bytes (all have the same size).
spacing_size: Additionnal bytes between symbols.
Returns:
String containing C source fragment.
"""
out = (
r'''// Auto-generated macro used to list all symbols
// XX must be a macro that takes the following parameters:
// name: symbol name (quoted).
// str_offset: symbol name offset in string table
// address: virtual address.
// size: size in bytes
''')
out += '#define LIST_ELF_SYMBOLS_%s(XX) \\\n' % variable_prefix
address = base_address
for sym, offset in zip(names, name_offsets):
out += ' XX("%s", %d, 0x%x, %d) \\\n' % (
sym, offset, address, symbol_size)
address += symbol_size + spacing_size
out += ' // END OF LIST\n'
return out
|
def shorten_url(youtube_id: str) -> str:
"""Return the shortened url for YouTube video."""
return f'https://youtu.be/{youtube_id}'
|
def _saferound(value, decimal_places):
"""
Rounds a float value off to the desired precision
"""
try:
f = float(value)
except ValueError:
return ''
format = '%%.%df' % decimal_places
return format % f
|
def strorblank(s):
"""return '' if not already string
"""
return "" if not isinstance(s, str) else s
|
def split_filename(name):
"""Splits a string containing a file name into the filename and extension"""
dot = name.rfind('.')
if (dot == -1):
return name, ''
return name[:dot], name[dot + 1:]
|
def to_iso_date(value):
"""
handle incomplete iso strings, so we can specify <year> or <year-month>,
in addition to <year-month-day>
"""
hyphen_count = value.count('-')
if hyphen_count < 2:
value += '-01' * (2 - hyphen_count)
return value
|
def threshold(number:int,minNumber:int=20) -> int:
"""Threshold a value to a minimum int"""
return number if abs(number) >= minNumber else 0
|
def tsym(name):
"""
Returns unicodes for common symbols.
Definition
----------
def tsym(name):
Input
-----
name Symbol name, case insensitive
Output
------
Raw unicode string
Restrictions
------------
Only knows a very limited number of names
Use empty function to return names
Known symbols
------------
deg Degree symbol
degree Degree symbol
degc Degree Celcius
degreec Degree Celcius
degree c Degree Celcius
mu Lowercase greek mu
peclet Peclet symbol
permil Permille sign
permille Permille sign
per mil Permille sign
per mille Permille sign
Examples
--------
>>> print(tsym('deg'))
\u00B0
>>> print(tsym('degree'))
\u00B0
>>> print(tsym('degC'))
\u2103
>>> print(tsym('degreec'))
\u2103
>>> print(tsym('degree C'))
\u2103
>>> print(tsym('mu'))
\u00B5
>>> print(tsym('peclet'))
\u2118
>>> print(tsym('permil'))
\u2030
>>> print(tsym('Permil'))
\u2030
>>> print(tsym('permille'))
\u2030
>>> print(tsym('per mil'))
\u2030
>>> print(tsym('per mille'))
\u2030
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2011-2013 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Jun 2011
Modified, MC, Feb 2013 - ported to Python 3
MC, Mar 2013 - removed raw
"""
#
# Define symbol dictionary
symdict = ({
'deg' : '\u00B0',
'degree' : '\u00B0',
'degc' : '\u2103',
'degreec' : '\u2103',
'degree c' : '\u2103',
'mu' : '\u00B5',
'peclet' : '\u2118',
'permil' : '\u2030',
'permille' : '\u2030',
'per mil' : '\u2030',
'per mille' : '\u2030'
})
#
# lookup symbol
try:
out = symdict[name.lower()]
except KeyError:
print("TSYM: Symbol not known: %s" % name)
return None
return out
|
def str2bool(text, test=True):
""" Test if a string 'looks' like a boolean value.
Args:
text: Input text
test (default = True): Set which boolean value to look for
Returns:
True if the text looks like the selected boolean value
"""
if test:
return str(text).lower() in ['1', 'y', 'yes', 't', 'true', 'ok', 'on', ]
else:
return str(text).lower() in ['0', 'n', 'no', 'none', 'f', 'false', 'off', ]
|
def detectEmptyTFR(frame):
"""'Empty' TFRs are NOTAM-TFRs which are NOTAM-TFRs that have been
sent previously, and are active, but which every other cycle is sent
with no text-- Just with it's number, an empty text field, and an
indication that it is still active.
Args:
frame (dict): Frame with product id of 8.
Returns:
Empty string if this cannot be an empty TFR. Otherwise, a
frame-string indicating that it is an EMPTY TFR.
"""
if 'records' in frame['contents']:
records = frame['contents']['records']
for x in records:
# Skip cancelled messages
if ('report_status' in x) and (x['report_status'] == 0):
continue
if ('text' in x) and (x['text'] == ''):
return ' [EMPTY TFR {}-{}]'.format(x['report_year'],\
x['report_number'])
return ''
|
def accuracy_score(actual, predicted):
"""This method predicts the accuracy percentage"""
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
|
def is_valid_mojang_uuid(uuid):
"""https://minecraft-de.gamepedia.com/UUID"""
allowed_chars = '0123456789abcdef'
allowed_len = 32
uuid = uuid.lower()
if len(uuid) != 32:
return False
for char in uuid:
if char not in allowed_chars:
return False
return True
|
def date_trigger_dict(ts_epoch):
"""A cron trigger as a dictionary."""
return {
'run_date': ts_epoch,
'timezone': 'utc',
}
|
def normalize(value, dict_type=dict):
"""Normalize values. Recursively normalizes dict keys to be lower case,
no surrounding whitespace, underscore-delimited strings."""
if isinstance(value, dict):
normalized = dict_type()
for k, v in value.items():
key = k.strip().lower().replace(' ', '_')
normalized[key] = normalize(v, dict_type)
return normalized
elif isinstance(value, list):
return [normalize(v, dict_type) for v in value]
elif isinstance(value, tuple):
return tuple([normalize(v, dict_type) for v in value])
else:
return value
|
def normalizeAddress(address):
"""We need this because we internally store email addresses in this format
in the black- and whitelists
"""
if address.startswith("<"):
return address
else:
return "<" + address + ">"
|
def get_index_name(create_index_statement, unique_constraint):
"""
Return the name of the index from the create index statement.
:param create_index_statement: The create index statement
:param unique_constraint: The unique constraint
:return: The name of the index
"""
splitted_statement = create_index_statement.split(' ', 6)
if unique_constraint:
if "." in splitted_statement[3]:
index_name = splitted_statement[3].split('.')[1]
else:
index_name = splitted_statement[3]
else:
if "." in splitted_statement[2]:
index_name = splitted_statement[2].split('.')[1]
else:
index_name = splitted_statement[2]
return index_name
|
def _as_set(spec):
"""Uniform representation for args which be name or list of names."""
if spec is None:
return []
if isinstance(spec, str):
return [spec]
try:
return set(spec.values())
except AttributeError:
return set(spec)
|
def create_bbox(boundries):
""" BBox serves for the plotting size figures"""
BBox = ((boundries[3], boundries[2],
boundries[1], boundries[0]))
return BBox
|
def make_poly(bit_length, msb=False):
"""Make `int` "degree polynomial" in which each bit represents a degree who's coefficient is 1
:param int bit_length: The amount of bits to play with
:param bool msb: `True` make only the MSBit 1 and the rest a 0. `False` makes all bits 1.
"""
if msb:
return 1 << ((8 * int(bit_length / 8)) - 1)
result = 0
for x in range(int(bit_length / 8)):
result += 0xff << int(x * 8)
return result
|
def __ior__(self,other): # supports syntax S |= T
"""Modify this set to be the union of itself an another set."""
for e in other:
self.add(e)
return self # technical requirement of in-place operator
|
def ok_for_raw_triple_quoted_string(s: str, quote: str) -> bool:
"""
Is this string representable inside a raw triple-quoted string?
Due to the fact that backslashes are always treated literally,
some strings are not representable.
>>> ok_for_raw_triple_quoted_string("blah", quote="'")
True
>>> ok_for_raw_triple_quoted_string("'", quote="'")
False
>>> ok_for_raw_triple_quoted_string("a ''' b", quote="'")
False
"""
return quote * 3 not in s and (not s or s[-1] not in [quote, '\\'])
|
def trim_spaces_from_args(args):
"""
Trim spaces from values of the args dict.
:param args: Dict to trim spaces from
:type args: dict
:return:
"""
for key, val in args.items():
if isinstance(val, str):
args[key] = val.strip()
return args
|
def _delist(val):
"""returns single value if list len is 1"""
return val[0] if type(val) in [list, set] and len(val) == 1 else val
|
def non_repeating(given_string):
"""
Go through each character in the string and map them to a dictionary
where the key is the character and the value is the time that character appear
in the string.
Because the dictionary doesn't store thing in order. Therefore, we cannot loop
through the dictionary and check if the value equal to 1 then return because
there will be a case that it's not the order that we put in. (maybe the last one,
but we expect the first one)
So we have 2 options:
1. Use another data structure, stack, to track back
2. Simply loop through the given string again and return if that character count
is 1. This time, it always the first one.
"""
char_count = {}
for char in given_string:
if char not in char_count:
char_count[char] = 1
else:
char_count[char] += 1
for char in given_string:
if char_count[char] == 1:
return char
return None
|
def remove_numbers(s):
"""Remove any number in a string
Args:
s (str): A string that need to remove number
Returns:
A formatted string with no number
"""
return ''.join([i for i in s if not i.isdigit()])
|
def rev_comp_motif( motif ):
"""
Return the reverse complement of the input motif.
"""
COMP = {"A":"T", \
"T":"A", \
"C":"G", \
"G":"C", \
"W":"S", \
"S":"W", \
"M":"K", \
"K":"M", \
"R":"Y", \
"Y":"R", \
"B":"V", \
"D":"H", \
"H":"D", \
"V":"B", \
"N":"N", \
"X":"X", \
"*":"*"}
rc_motif = []
for char in motif[::-1]:
rc_motif.append( COMP[char] )
return "".join(rc_motif)
|
def _parse_id(func):
"""Supplies the module name and functon name as a 2-tuple"""
return (func.__module__.split(".")[-1], func.__name__)
|
def split(ds, partition):
"""Split the dataset according to some partition.
Parameters
----------
ds :
partition :
Returns
-------
"""
n_data = len(ds)
# get the actual size of partition
partition = [int(n_data * x / sum(partition)) for x in partition]
ds_batched = []
idx = 0
for p_size in partition:
ds_batched.append(ds[idx : idx + p_size])
idx += p_size
return ds_batched
|
def cell(data, label, spec):
"""
Format the cell of a latex table
Parameters
----------
data : string
string representation of cell content
label : string
optional cell label, used for tooltips
spec : dict
options for the formatters
Returns
-------
string
"""
addMathMode = bool(("^" in data or "_" in data) and "$" not in data)
return "$" + data + "$" if addMathMode else data
|
def xgboost_problem_type(hyperparameters: dict) -> str:
""" XGboost problem type finder
The function finds the type of the problem that should be solved when using the xgboost method. The function uses
the objective variable to find out what type of problem that should be solved
:param dict hyperparameters: A dictionary that contains the hyperparameters which the selected training method
needs to train the model.
:return:
- problem_to_solve: A string that defines the problem to solve: regression or classification.
"""
try:
# the default value of the objective is regression reg:squarederror.
# Here the exception is caught to avoid that the objective is not given by the user
objective = hyperparameters["objective"]
if objective.startswith("reg"):
problem_to_solve = "regression"
else:
problem_to_solve = "classification"
except Exception as e:
print(f"The objective is not defined. The default value is reg:squarederror. Error: {e}")
problem_to_solve = "regression"
return problem_to_solve
|
def setup_permissions(bot):
"""Use this decorator to return a dictionary of required permissions."""
return {
'read_messages': "This is a dummy additional permission.",
'change_nickname': "This allows the bot to change its own nickname."
}
|
def rotate_list(l):
"""
Take a nested list of (x, y) dimensions, return an (y, x) list.
:param l: a 2-nested list
"""
# Without loss of generality, presume list is row-first and we need it
# column-first
r = [[None for x in range(len(l))] for x in range(len(l[0]))]
for row_index, row in enumerate(l):
for column_index, column_value in enumerate(row):
r[column_index][row_index] = column_value
return r
|
def span(array):
"""
Returns the span of array: the difference between the last and
first elements, or array[array.length-1] - array[0].
"""
return array[-1] - array[0]
|
def rev_word(s):
"""
Manually doing the splits on the spaces.
"""
words = []
length = len(s)
spaces = [' ']
# Index Tracker
i = 0
# While index is less than length of string
while i < length:
# If element isn't a space
if s[i] not in spaces:
# The word starts at this index
word_start = i
while i < length and s[i] not in spaces:
# Get index where word ends
i += 1
# Append that word to the list
words.append(s[word_start:i])
# Add to index
i += 1
"""
Reverse words.
"""
reversed_words = []
index_words = len(words) - 1
while index_words >= 0:
reversed_words.append(words[index_words])
index_words -= 1
return " ".join(reversed_words)
|
def desanitise(dic):
"""Removes underscores from dictionary"""
new_dic = dict()
for key, value in dic.items():
#omit the underscore from the key name
new_dic[key[1:]] = value
return new_dic
|
def size_color_unique_id(unique_id=999999999):
""" Return an interpolated color for a given byte size.
"""
#r1 = size[-3:]
#g1 = size[-6:-3]
#b1 = size[-9:-6]
red = (unique_id >> 16) & 0xff
green = (unique_id >> 8) & 0xff
blue = unique_id & 0xff
try:
red = int( float(red) / 1000 * 255)
green = int( float(green) / 1000 * 255)
blue = int( float(blue) / 1000 * 255)
return (red,green,blue)
except:
return (255,255,255)
|
def gini_index(groups: list, class_values: list) -> float:
"""
Calculate the Gini index for a split dataset (0 is perfect split)
:param groups: to compute the gini index (last item is class value)
:param class_values: class values present in the groups
:return: gini index
"""
gini = 0.0
for class_value in class_values:
for group in groups:
size = len(group)
if size == 0:
continue
proportion = [row[-1] for row in group].count(class_value) / float(size)
gini += (proportion * (1.0 - proportion))
return gini
|
def transDataRow(origins, pep, protDict):
"""
Called by writeToFasta(), this function takes the trans origin data for a given peptide and formats it for writing
to the csv file.
:param origins: the data structure containing information on where a given peptide was found within the input
proteins. Has the form: [[protName, startIndex, endIndex]..] where each sublist refers to the location of an
individual cleavage which can be combined with another cleavage somewhere in the protein file to create the peptide.
:param pep: the peptide which the origin data related to.
:param protDict: a dictionary containing the input protein data. This is needed to return slight differences in the
peptide and origin due to the program not treating I/J differently.
:return dataRows: a list of lists, where each sublist is a row of data which is to be written to file. Each sublist
has the format: [protName, peptide, pepInProt, location]
"""
dataRows = []
for location in origins:
if location == True:
dataRow = [pep, "Formed only by cleavages under max length."]
dataRows.append(dataRow)
else:
protName = location[0]
startRef = location[1]
endRef = location[2] + 1
pepInProt = protDict[protName][startRef:endRef]
dataRow = [location[0], pep, pepInProt, [startRef + 1, endRef]]
dataRows.append(dataRow)
return dataRows
|
def apply_prot_to_nuc(aligned_prot, nuc):
"""
Use aligned protein sequence to update the corresponding nucleotide
sequence.
:param aligned_prot: str, aligned amino acid sequence
:param nuc: str, original nucleotide sequence
:return: str, nucleotide sequence with codon gaps
"""
res = ''
i = 0
for aa in aligned_prot:
if aa == '-':
res += '---'
continue
res += nuc[i:(i + 3)]
i += 3
return res
|
def make_pair_table(ss, base=0, chars=['.']):
"""Return a secondary struture in form of pair table.
Args:
ss (str): secondary structure in dot-bracket format
base (int, optional): choose between a pair-table with base 0 or 1
chars (list, optional): a list of characters to be are ignored, default:
['.']
**Example:**
base=0: ((..)). => [5,4,-1,-1,1,0,-1]
i.e. start counting from 0, unpaired = -1
base=1: ((..)). => [7,6,5,0,0,2,1,0]
i.e. start counting from 1, unpaired = 0, pt[0]=len(ss)
Returns:
[list]: A pair-table
"""
stack = []
if base is 0:
pt = [-1] * len(ss)
elif base == 1:
pt = [0] * (len(ss) + base)
pt[0] = len(ss)
else:
raise ValueError("unexpected value in make_pair_table: \
(base = " + str(base) + ")")
for i, char in enumerate(ss, base):
if (char == '('):
stack.append(i)
elif (char == ')'):
try:
j = stack.pop()
except IndexError as e:
raise RuntimeError(
"Too many closing brackets in secondary structure")
pt[i] = j
pt[j] = i
elif (char not in set(chars)):
raise ValueError(
"unexpected character in sequence: '" + char + "'")
if stack != []:
raise RuntimeError("Too many opening brackets in secondary structure")
return pt
|
def create_network_config_kubernetes(config):
"""Create a BIG-IP Network configuration from the Kubernetes config.
Args:
config: Kubernetes BigIP config which contains openshift-sdn defs
"""
f5_network = {}
if 'openshift-sdn' in config:
openshift_sdn = config['openshift-sdn']
f5_network['fdb'] = openshift_sdn
return f5_network
|
def get_class_labels(labels_map):
"""Returns the list of class labels from the given labels map.
The class labels are returned for indexes sequentially from
`min(1, min(labels_map))` to `max(labels_map)`. Any missing indices are
given the label "class <index>".
Args:
a dictionary mapping indexes to label strings
Returns:
a list of class labels
"""
mini = min(1, min(labels_map))
maxi = max(labels_map)
return [labels_map.get(i, "class %d" % i) for i in range(mini, maxi + 1)]
|
def layer(height: int) -> int:
"""
Max number of nodes in the lowest layer of a tree of given height.
"""
assert height > 0
return 2 ** (height - 1)
|
def set_size(width, fraction=1, subplots=(1, 1)):
"""
Set figure dimensions to avoid scaling in LaTeX.
"""
golden_ratio = 1.618
height = (width / golden_ratio) * (subplots[0] / subplots[1])
return width, height
|
def calc_max_quant_value(bits):
"""Calculate the maximum symmetric quantized value according to number of bits"""
return 2**(bits - 1) - 1
|
def get_cluster_label(cluster_id):
"""
It assigns a cluster label according to the cluster id that is
supplied.
It follows the criterion from below:
Cluster id | Cluster label
0 --> A
1 --> B
2 --> C
25 --> Z
26 --> AA
27 --> AB
28 --> AC
Parameters
----------
cluster_id : int
The id of the cluster that will be used to generate the label
Returns
-------
cluster_label : str
The cluster label according to the supplied id and the criterion
mentioned above
"""
from string import ascii_uppercase
cluster_label = ''
current_index = cluster_id
while current_index >= 0:
if current_index < len(ascii_uppercase):
cluster_label += ascii_uppercase[current_index]
else:
for letter in reversed(cluster_label):
if letter != 'Z':
idx = ascii_uppercase.index(cluster_label[-1])
cluster_label = \
cluster_label[:-1] + ascii_uppercase[idx + 1]
break
else:
cluster_label = 'A' + cluster_label
current_index -= 26
return cluster_label
|
def is_utf8(bs):
"""Check if the given bytes string is utf-8 decodable."""
try:
bs.decode('utf-8')
return True
except UnicodeDecodeError:
return False
|
def get_matrix_diff_coords(indices):
"""returns coordinates for off diagonal elements"""
return [(i, j) for i in indices for j in indices if i != j]
|
def public_notification(title=None, alert=None, summary=None):
"""Android L public notification payload builder.
:keyword title: Optional string. The notification title.
:keyword alert: Optional string. The notification alert.
:keyword summary: Optional string. The notification summary.
"""
payload = {"title": title, "alert": alert, "summary": summary}
return {key: val for key, val in iter(payload.items()) if val is not None}
|
def _build_tree_string(root, curr_index, index=False, delimiter='-'):
"""
Recursively walk down the binary tree and build a pretty-print string.
Source: https://github.com/joowani/binarytree
"""
if root is None or root.leaf:
return [], 0, 0, 0
line1 = []
line2 = []
if index:
node_repr = '{}{}{}'.format(curr_index, delimiter, root.value)
else:
node_repr = str(root.value) + root.color
new_root_width = gap_size = len(node_repr)
# Get the left and right sub-boxes, their widths, and root repr positions
l_box, l_box_width, l_root_start, l_root_end = \
_build_tree_string(root.left, 2 * curr_index + 1, index, delimiter)
r_box, r_box_width, r_root_start, r_root_end = \
_build_tree_string(root.right, 2 * curr_index + 2, index, delimiter)
# Draw the branch connecting the current root node to the left sub-box
# Pad the line with whitespaces where necessary
if l_box_width > 0:
l_root = (l_root_start + l_root_end) // 2 + 1
line1.append(' ' * (l_root + 1))
line1.append('_' * (l_box_width - l_root))
line2.append(' ' * l_root + '/')
line2.append(' ' * (l_box_width - l_root))
new_root_start = l_box_width + 1
gap_size += 1
else:
new_root_start = 0
# Draw the representation of the current root node
line1.append(node_repr)
line2.append(' ' * new_root_width)
# Draw the branch connecting the current root node to the right sub-box
# Pad the line with whitespaces where necessary
if r_box_width > 0:
r_root = (r_root_start + r_root_end) // 2
line1.append('_' * r_root)
line1.append(' ' * (r_box_width - r_root + 1))
line2.append(' ' * r_root + '\\')
line2.append(' ' * (r_box_width - r_root))
gap_size += 1
new_root_end = new_root_start + new_root_width - 1
# Combine the left and right sub-boxes with the branches drawn above
gap = ' ' * gap_size
new_box = [''.join(line1), ''.join(line2)]
for i in range(max(len(l_box), len(r_box))):
l_line = l_box[i] if i < len(l_box) else ' ' * l_box_width
r_line = r_box[i] if i < len(r_box) else ' ' * r_box_width
new_box.append(l_line + gap + r_line)
# Return the new box, its width and its root repr positions
return new_box, len(new_box[0]), new_root_start, new_root_end
|
def tag_to_regex(tag_name, tags):
"""Convert a decomposition rule tag into regex notation.
Parameters
----------
tag_name : str
Tag to convert to regex notation.
tags : dict
Tags to consider when converting to regex.
Returns
-------
w : str
Tag converted to regex notation. Empty if `tag_name` is not in `tags`.
"""
w = ''
if tag_name in tags:
# Make a regex separating each option with OR operator (e.g. x|y|z)
w = r'\b(' + '|'.join(tags[tag_name]) + r')\b'
return w
|
def get_most_visible_camera_annotation(camera_data_dict: dict) -> dict:
"""
Get the most visibile camera's annotation.
:param camera_data_dict: Dictionary of form:
{
'CAM_BACK': {'attribute_tokens': ['cb5118da1ab342aa947717dc53544259'],
'bbox_corners': [600.8315617945755,
426.38901275036744,
643.6756536789582,
476.66593163100237],
'category_name': 'vehicle.bus.rigid',
'filename': 'samples/CAM_BACK/n015-2018-10-02-10-50-40+0800__CAM_BACK__1538448750037525.jpg',
'instance_token': '9cba9cd8af85487fb010652c90d845b5',
'next': 'ef90c2e525244b7d9eeb759837cf2277',
'num_lidar_pts': 0,
'num_radar_pts': 0,
'prev': '6628e81912584a72bd448a44931afb42',
'sample_annotation_token': '06b4886e79d2435c80bd23e7ac60c618',
'sample_data_token': '0008443755a14b3ca483f1489c767040',
'visibility_token': '4'},
'CAM_FRONT': ...
...
}
:return: The camera annotation with highest visibility.
"""
# Loop through all the camera views to find the best view of this instance
# Each of the cameras will have a corresponding bounding box and visibility.
# We want the largest bounding box and highest visibility.
best_visibility = ''
largest_area = -1
best_camera_token = None
for camera_token in camera_data_dict:
visibility = camera_data_dict[camera_token]['visibility_token']
bbox_area = camera_data_dict[camera_token]['bbox_area']
if visibility > best_visibility or (visibility == best_visibility and bbox_area > largest_area):
best_camera_token = camera_token
largest_area = bbox_area
best_visibility = visibility
if not best_camera_token:
print('Unable to find any good views for camera data dict: {}'.format(
camera_data_dict))
best_instance_data = camera_data_dict[best_camera_token]
return best_instance_data
|
def maybe_coerce_with(converter, obj, **kwargs):
"""Apply converter if str, pass through otherwise."""
obj = getattr(obj, "original_object", obj)
return converter(obj, **kwargs) if isinstance(obj, str) else obj
|
def getUniqueValuesFromList(inlist):
"""
Returns the unique values from the list containing no. of items""
Parameters
----------
inlist (list): List containing items to get unique values from
Returns
-------
values(list): list containing unique values sorted in order
"""
values=[]
for feature in inlist:
if feature not in values:
values.append(feature)
return values
|
def store_error_star(params, substep, state_history, state, policy_input):
"""
Store the error_star state, which is the error between the target and market price.
"""
error = policy_input["error_star"]
return "error_star", error
|
def R13(FMzul, P, d2, DKm, mu_Gmin, mu_Kmin,
MU = 0, MKZu = 0):
"""
R13 Determining the tightening torque MA
(Sec 5.4.3)
"""
# The tightening torque may be calculated :
MA = (FMzul * (0.16 * P + 0.58 * d2 * mu_Gmin
+ mu_Kmin * (DKm / 2.0))) # (R13/1)
#
if MU != 0:
MAS = MA + MU + MKZu # (R13/1)
return MAS
#
return MA
#
|
def skip_add(n):
""" Takes a number n and returns n + n-2 + n-4 + n-6 + ... + 0.
>>> skip_add(5) # 5 + 3 + 1 + 0
9
>>> skip_add(10) # 10 + 8 + 6 + 4 + 2 + 0
30
>>> # Do not use while/for loops!
>>> from construct_check import check
>>> # ban iteration
>>> check(this_file, 'skip_add',
... ['While', 'For'])
True
"""
return sum(n-x for x in range(0,n+1,2))
|
def cid_with_annotation2(cid, expected_acc=None):
"""Given a cluster id, return cluster id with human readable annotation.
e.g., c0 --> c0 isoform=c0
c0/89/3888 -> c0/89/3888 isoform=c0;full_length_coverage=89;isoform_length=3888;expected_accuracy=0.99
c0/f89p190/3888 -> c0/f89p190/3888 isoform=c0;full_length_coverage=89;non_full_length_coverage=190;isoform_length=3888;expected_accuracy=0.99
"""
fields = cid.split('/')
short_id, fl_coverage, nfl_coverage, seq_len = None, None, None, None
if len(fields) != 1 and len(fields) != 3:
raise ValueError("Not able to process isoform id: {cid}".format(cid=cid))
short_id = fields[0]
if len(fields) == 3:
seq_len = fields[2]
if "f" in fields[1]:
if "p" in fields[1]: # f89p190
fl_coverage = fields[1].split('p')[0][1:]
nfl_coverage = fields[1].split('p')[1]
else: # f89
fl_coverage = fields[1][1:]
else:
fl_coverage = fields[1]
annotations = ["isoform={short_id}".format(short_id=short_id)]
if fl_coverage is not None:
annotations.append("full_length_coverage={fl}".format(fl=fl_coverage))
if nfl_coverage is not None:
annotations.append("non_full_length_coverage={nfl}".format(nfl=nfl_coverage))
if seq_len is not None:
annotations.append("isoform_length={l}".format(l=seq_len))
if expected_acc is not None:
annotations.append("expected_accuracy={0:.3f}".format(expected_acc))
return "{cid} {annotation}".format(cid=cid, annotation=";".join(annotations))
|
def cow_line_splitter(cow_line):
"""
Turn a single line from the cow data file into a list of [cow, weight].
"""
return cow_line.strip().split(",")
|
def parse_version_req(version):
"""
Converts a version string to a dict with following rules:
if string starts with ':' it is a channel
if string starts with 'sha256' it is a digest
else it is a release
"""
if version is None:
version = "default"
if version[0] == ':' or version.startswith('channel:'):
parts = {'key': 'channel', 'value': version.split(':')[1]}
elif version.startswith('sha256:'):
parts = {'key': 'digest', 'value': version.split('sha256:')[1]}
else:
parts = {'key': 'version', 'value': version}
return parts
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.