content
stringlengths 42
6.51k
|
---|
def iso2sec(iso: str) -> int:
"""
:param iso: e.g. 1:01:02
:return: sec in int
"""
arr = iso.split(':')
len_arr = len(arr)
if len_arr <= 3:
arr = ['0'] * (3 - len_arr) + arr
else:
raise Exception('len_arr > 3, arr: {}'.format(arr))
return int(arr[0]) * 60 * 60 + int(arr[1]) * 60 + int(arr[2])
|
def is_prime(n):
"""https://stackoverflow.com/questions/15285534/isprime-function-for-python-language"""
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
r = int(n**0.5)
f = 5
while f <= r:
if n%f == 0: return False
if n%(f+2) == 0: return False
f +=6
return True
|
def createRasterPreset(steps, step_size, step_rest):
"""Creates a raster for one axis each
Parameters:
steps (int): amount of parts the axis shall be split into
steps_size (int): average pixel size for each step
step_rest (int): amount of pixels which are added in last iteration
Returns:
list: [step_start_coordinate, step_end_coordinate]
"""
output = []
counter = 0
for i in range(steps):
buffer = [counter]
counter += step_size
output.append(buffer + [counter])
output[-1][1] += step_rest
return output
|
def COLOR1(obj):
"""Format an object into string of highlight color 1 (blue) in console.
Args:
obj: the object to be formatted.
Returns:
None
"""
return '\x1b[1;34m' + str(obj) + '\x1b[0m'
|
def country(country_code):
"""Extract the country from the given variable."""
return country_code[0:country_code.find('-')]
|
def email_is_valid(email):
"""
Check if email is a valid one
"""
at_pos = email.find("@")
dot_pos = email.find(".")
if at_pos == -1 or dot_pos == -1 or dot_pos == len(email) - 1 or dot_pos == at_pos + 1: #Various email format checks
return False
return True
|
def check_config_inputs(arg):
"""
Checks that all the data that should be numerical from that config
can be represented as a float.
Parameters
__________
arg: unknown
any argument can be passed.
Returns
_______
is_number: Boolean
Value is True if the arg is a number, False if not.
"""
try:
return isinstance(float(arg), float)
except:
return False
|
def ShortBranchName(branch):
"""Convert a name like 'refs/heads/foo' to just 'foo'."""
return branch.replace('refs/heads/', '')
|
def sample_lines():
"""Get some sample lines."""
return [
"This is some lines.",
"yeah quite a few.",
"In real life this would be real messages.",
"From real discord users.",
"But for privacy reasons.",
"And testing purposes.",
"We must instead provide some dummy messages.",
"In this list.",
"I am honestly not sure.",
"What other messages should go here.",
"But whatever.",
"This should be good enough.",
"Or so I thought.",
"For if there isn't enough message diversity,",
"There just arent any good results it seems?",
]
|
def drop_mid_name(name):
"""Get only first and last name of a student."""
names = name.split(" ")
if len(names) == 3:
return names[0] + ' ' + names[2]
else:
return names[0] + ' ' + names[1]
|
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
|
def RPL_ENDOFINFO(sender, receipient, message):
""" Reply Code 374 """
return "<" + sender + ">: " + message
|
def calculate_config_dist(tpa, tpb):
"""Trivially calculate the distance of two configs"""
ca, cb = tpa[0], tpb[0]
num_diff_field = 0
for k in ca:
if ca[k] != cb[k]:
num_diff_field += 1
return num_diff_field
|
def is_list_of_str(value):
"""
Check if an object is a list of strings
:param value:
:return:
"""
return bool(value) and isinstance(value, list) and all(isinstance(elem, str) for elem in value)
|
def parse_row(row, fields):
"""Returns a well-formed JSON row containing interesting fields."""
output = {}
for index, field_name in fields.items():
output[field_name] = row[index]
return output
|
def numericrange_to_tuple(r):
"""Helper method to normalize NumericRange into a tuple."""
if r is None:
return (None, None)
lower = r.lower
upper = r.upper
if lower and not r.lower_inc:
lower += 1
if upper and not r.upper_inc:
upper -= 1
return lower, upper
|
def doubleclick(x, y, bombs, cal, flags):
"""
Shortcut for double click
:param x: horizontal position of the click
:param y: vertical position of the click
:param bombs: list of bomb positions
:param cal: number of the position
:param flags: list of flags
:return: status after the double click (0 for wrong judgment, 1 for right judgment, -1 for invalid double click),
list of positions to expand (empty list for status 0 or -1)
"""
tests = [(m, n) for m in (x - 1, x, x + 1) for n in (y - 1, y, y + 1) if (m, n) not in flags]
if cal == 9 - len(tests):
for t in tests:
if t in bombs:
return 0, []
return 1, tests
return -1, []
|
def sort_012(input_arr):
"""
Given an input array consisting on only 0, 1, and 2, sort the array in a single traversal.
Time Complexity O(n)
Space Complexity O(n)
Where n is the array size.
Args:
input_arr(array): Array to be sorted
Returns:
sorted_arr(array): Sorted array
"""
# Test that input_arr consists of digits between 0 and 9
for element in input_arr:
if element < 0 or element > 2:
return (-1, -1)
bin_zeros = []
bin_ones = []
bin_twos = []
for element in input_arr:
if element == 0:
bin_zeros.append(element)
elif element == 1:
bin_ones.append(element)
elif element == 2:
bin_twos.append(element)
sorted_arr = bin_zeros + bin_ones + bin_twos
return sorted_arr
|
def parse_service_uri(uri):
"""Parses a service uri into separate provider, service, and catalog_id strings.
Examples:
usgs-nwis:dv/0800345522
gebco-bathymetry
usgs-ned:1-arc-second
Args:
uri (string): A string that is a uri.
Returns:
Three strings are returned from the parsed uri.
"""
svc, catalog_id = (uri.split('://')[-1].split('/', 1) + [None])[:2]
provider, service = (svc.split(':') + [None])[:2]
return provider, service, catalog_id
|
def get_underlying_address_objects(address_group_name, name_to_address_groups, name_to_addresses):
"""
address_group_entry: An AddressGroup object
name_to_address_groups: Dict of name -> AddressGroup objects
name_to_addresses: Dict of name -> Address objects
Return: A list of Address objects
"""
addresses = []
for member_name in name_to_address_groups[address_group_name]:
if member_name in name_to_address_groups:
addresses += get_underlying_address_objects(member_name, name_to_address_groups, name_to_addresses)
elif member_name in name_to_addresses:
addresses += [member_name]
else:
raise Exception(f"Unresolved member name '{member_name}'. This shouldn't be possible!")
return addresses
|
def drop_empty_lists(X):
"""
:param X: a data matrix: a list wrapping a list of strings, with each sublist being a sentence.
:return:
>>> drop_empty_lists([['Catullus'], [], [], ['I.', 'ad', 'Cornelium'], ['Cui', 'dono', 'lepidum', 'novum', 'libellum', 'arida', 'modo', 'pumice', 'expolitum', '?']])
[['Catullus'], ['I.', 'ad', 'Cornelium'], ['Cui', 'dono', 'lepidum', 'novum', 'libellum', 'arida', 'modo', 'pumice', 'expolitum', '?']]
"""
return [sentence for sentence in X if sentence]
|
def flatten(lst):
"""
Flatterns a list of lists one level.
:param lst: list of lists
:return: list
"""
return [subelem for elem in lst for subelem in elem]
|
def find_b(p, a):
"""Calculates b for p and a combo using derived formula. False if b not
int"""
num = p*p/2 - a*p
den = p - a
if num % den:
return False
return num // den
|
def as_request_params(**kwargs):
"""Coerce kwargs into a tuple of param=value strings."""
return tuple('{0}={1}'.format(k, v) for k, v in kwargs.items())
|
def parse_response_browse_node(browse_nodes_response_list):
"""
The function parses Browse Nodes Response and creates a dict of BrowseNodeID to AmazonBrowseNode object
params
*browse_nodes_response_list*
List of BrowseNodes in GetBrowseNodes response
return
Dict of BrowseNodeID to AmazonBrowseNode object
"""
mapped_response = {}
for browse_node in browse_nodes_response_list:
mapped_response[browse_node.id] = browse_node
return mapped_response
|
def syncsafe(num, size):
""" Given a number, sync safe it """
result = ''
for i in range(0,size):
x = (num >> (i*7)) & 0x7f
result = chr(x) + result
return result
|
def dedup_and_title_case_names(names):
"""Should return a list of names, each name appears only once"""
return list(set(name.title() for name in names))
|
def token_accuracy(references, hypotheses, level="word"):
"""
Compute the accuracy of hypothesis tokens: correct tokens / all tokens
Tokens are correct if they appear in the same position in the reference.
:param hypotheses: list of hypotheses (strings)
:param references: list of references (strings)
:param level: segmentation level, either "word", "bpe", or "char"
:return:
"""
correct_tokens = 0
all_tokens = 0
split_char = " " if level in ["word", "bpe"] else ""
assert len(hypotheses) == len(references)
for hyp, ref in zip(hypotheses, references):
all_tokens += len(hyp)
for h_i, r_i in zip(hyp.split(split_char), ref.split(split_char)):
# min(len(h), len(r)) tokens considered
if h_i == r_i:
correct_tokens += 1
return (correct_tokens / all_tokens) * 100 if all_tokens > 0 else 0.0
|
def linha_para_inteiro(l):
"""
Devolve um inteiro correspondente a linha da posicao inserida.
:param l: string, linha da posicao.
:return: int, valor correspondente da linha.
"""
l_int = {'1': 0, '2': 1, '3': 2}
return l_int[l]
|
def gmof(res, sigma):
"""
Geman-McClure error function
- residual
- sigma scaling factor
"""
x_squared = res ** 2
sigma_squared = sigma ** 2
return (sigma_squared * x_squared) / (sigma_squared + x_squared)
|
def get_by_path(dic, path):
"""
Gets a key (can be specified via dot notation) `path` in a nested `dic`
"""
parts = path.split('.')
loc = dic
for part in parts:
if part not in loc:
raise KeyError(path)
loc = loc[part]
return loc
|
def is_country(feat_code: str):
"""Test a feature code"""
if not feat_code: return False
return feat_code.startswith("PCL")
|
def get_page_filename(page_path: str) -> str:
"""Generate a filename for a rendered page."""
return f'{page_path}.j2'
|
def get_first_key(dictionary, value):
"""Get first key in a dict for a given value.
:param dict dictionary:
:param string value:
"""
for key, val in dictionary.items():
if val == value:
return key
return None
|
def elapsed_time(seconds, suffixes=['y','w','d','h','m','s'], add_s=False, separator=' '):
"""
Takes an amount of seconds and turns it into a human-readable amount of time.
"""
# the formatted time string to be returned
time = []
# the pieces of time to iterate over (days, hours, minutes, etc)
# - the first piece in each tuple is the suffix (d, h, w)
# - the second piece is the length in seconds (a day is 60s * 60m * 24h)
parts = [(suffixes[0], 60 * 60 * 24 * 7 * 52),
(suffixes[1], 60 * 60 * 24 * 7),
(suffixes[2], 60 * 60 * 24),
(suffixes[3], 60 * 60),
(suffixes[4], 60),
(suffixes[5], 1)]
# for each time piece, grab the value and remaining seconds, and add it to
# the time string
for suffix, length in parts:
value = seconds / length
floored = int(value)
if floored > 0:
seconds = seconds % length
time.append('%d%s' % (floored,
(suffix, (suffix, suffix + 's')[floored > 1])[add_s]))
if seconds < 1:
break
return separator.join(time)
|
def f(z):
"""
A function of metallicity Z, which we will set as the yield of Sr from
core-collapse supernovae.
"""
return 3.5e-8 * (z / 0.014)
|
def calc_D_lin(c, D_params):
"""
Estimates diffusivity [m^2/s] as a function of the concentration c [kg/m^3]
assuming a linear model (parameters obtained from lin_fit_D_c()).
"""
a, b = D_params
return a*c + b
|
def encrypt_this(text: str) -> str:
"""
Encrypts each word in the message using the following rules:
* The first letter needs to be converted to its ASCII code.
* The second letter needs to be switched with the last letter
Keepin' it simple: There are no special characters in input.
:param text: a string containing space separated words
:return: secret messages which can be deciphered by the "Decipher this!" kata
"""
if not text:
return ""
results = list()
for word in text.split(' '):
if len(word) == 1:
results.append("{}".format(ord(word[0])))
elif len(word) == 2:
results.append("{}{}".format(ord(word[0]), word[-1]))
else:
results.append("{}{}{}{}".format(
ord(word[0]), word[-1], word[2:-1], word[1]))
return ' '.join(results)
|
def _fmt(key, value, template='s'):
""" Helper SQL formatting to feed parametrized queries
Examples:
>>> _fmt('name', 'yogi')
"name = %(name)s"
>>> _fmt('id >=', 10)
"id >= %(id)s"
"""
ctypes = {tuple: 'in'}
if ' ' in key:
key, compare = key.split()
else:
compare = ctypes.get(type(value), '=')
fmtd_string = '{k} {c} %({k}){t}'.format(k=key, c=compare, t=template)
return fmtd_string
|
def valid_book_definition(name, author, year, pages):
"""Does some simple checks on whether the provided arguments are sufficient to defien a book"""
if len(name) <= 1:
return False
if len(author) <= 1:
return False
try:
int(pages)
int(year)
except ValueError:
return False
return True
|
def _parseTCP(factory, port, interface="", backlog=50):
"""
Internal parser function for L{_parseServer} to convert the string
arguments for a TCP(IPv4) stream endpoint into the structured arguments.
@param factory: the protocol factory being parsed, or C{None}. (This was a
leftover argument from when this code was in C{strports}, and is now
mostly None and unused.)
@type factory: L{IProtocolFactory} or C{NoneType}
@param port: the integer port number to bind
@type port: C{str}
@param interface: the interface IP to listen on
@param backlog: the length of the listen queue
@type backlog: C{str}
@return: a 2-tuple of (args, kwargs), describing the parameters to
L{IReactorTCP.listenTCP} (or, modulo argument 2, the factory, arguments
to L{TCP4ServerEndpoint}.
"""
return (int(port), factory), {'interface': interface,
'backlog': int(backlog)}
|
def valid_if_true(value, should_raise=True):
"""Validator that passes if the input is True."""
if value is True:
return True
if should_raise:
raise TypeError
return False
|
def fib_recursion(n):
"""Many duplicated calculation"""
if n < 3:
return 1
else:
print("{0} = {1} + {2}".format(n, n - 1, n - 2))
return fib_recursion(n - 1) + fib_recursion(n - 2)
|
def underscored2camel_case(v):
"""Converts ott_id to ottId."""
vlist = v.split('_')
c = []
for n, el in enumerate(vlist):
if el:
if n == 0:
c.append(el)
else:
c.extend([el[0].upper(), el[1:]])
return ''.join(c)
|
def fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
|
def poly3(x,C0,C1,C2,C3):
"""
Calculate a polynomial function of degree 3 with a single variable 'x'.
Parameters
----------
x : numeric
Input variable.
C0, C1, C2, C3 : numeric
Polynomial coefficients
Returns
-------
numeric
Result of the polynomial function.
"""
return C0+C1*x+C2*x**2+C3*x**3
|
def foldr(function, list, initial):
"""
given a function, a list, and an initial accumulator,
fold (reduce) each item into the accumulator from the right using function(item, accumulator)
"""
for item in list[::-1]:
initial = function(item, initial)
return initial
|
def create_mapnik_config(layer_name, file_path, layer_srs):
""" Creates a mapnik config file
file_path is the absolute path to
the geotiff file """
return """
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE Map[]>
<Map srs="+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over" font-directory="./fonts">
<Style name="raster-style">
<Rule>
<RasterSymbolizer>
<RasterColorizer default-mode="linear" default-color="white" epsilon="0.001">
<stop color="#a6611a" value = "0" />
<stop color="#dfc27d" value = "25" />
<stop color="#f5f5f5" value = "100" />
<stop color="#80cdc1" value = "175"/>
<stop color="#018571" value = "250"/>
</RasterColorizer>
</RasterSymbolizer>
</Rule>
</Style>
<Layer name="{}" status="on" srs="{}">
<StyleName>raster-style</StyleName>
<Datasource>
<Parameter name="type">gdal</Parameter>
<Parameter name="file">{}</Parameter>
<Parameter name="format">tiff</Parameter>
<Parameter name="band">1</Parameter>
</Datasource>
</Layer>
</Map>
""".format(layer_name, layer_srs, file_path)
|
def flipXYZ(oldXYZ): # This is an example of a nice Modular function.
"""Flip XY coords of an XYZ coordinate string
This function inverts the xy coordinates generated by afni's
3dclust command. It is presently acting as a sort of Helper
function to getClusterStats function defined below.
Params:
oldXYZ -- a string containing the original xyz coordinates
oldXYZ == '58 42 8'
Returns:
xyz -- a new string with the inverted xy + z coordinates
xyz == '-58 -42 8'
"""
coordList = oldXYZ.split()
x = int(coordList[0]) * -1
y = int(coordList[1]) * -1
xyz = ' '.join([str(x), str(y), coordList[2]])
return xyz
|
def friendly_display(port,begin=8000):
"""for wehn you want some 'web' ports"""
ret= port-begin
if ret < 0 or port<0:
raise ValueError('neg values')
return ret
|
def getcomplementaryUnitsPairs(refined):
"""This function ensures that two units do not share the
same functionality and planarity. At the next stage,
further complementarity based on complementarity of
building units and directionality needs to be encoded."""
complementarypairs = []
for i, unit1 in enumerate(refined):
for l, unit2 in enumerate(refined[i+1:]):
if unit1['Binding_Functionality'] != unit2['Binding_Functionality'] \
and unit1['Planarity_Type'] != unit2['Planarity_Type']:
complementarypairs.append([unit1, unit2])
return complementarypairs
|
def _ring_port(ring_path, node):
"""Determine correct port from relation settings for a given ring file."""
for name in ['account', 'object', 'container']:
if name in ring_path:
return node[('{}_port'.format(name))]
|
def __get_average_kor__(qars: list, department):
"""
Get the average of kor in the department passed as parameter in the benin republic area
"""
_all = list(filter(lambda c: c.department == department, qars))
total = 0
count = len(_all)
if count == 0:
count = 1
for i, x in enumerate(_all):
total += x.kor
result = total / count
return "{:.2f}".format(result) if result != 0 else "NA"
|
def wrapLines ( docin, maxlen = 100 ):
"""
Break line length to a max while retaining the leading spaces...
"""
import textwrap as textwrap
import string
docout = []
for line in docin:
prefix=0
for c in line:
if c in string.whitespace:
prefix += 1
else:
break
for l in textwrap.wrap( line, maxlen ):
line = ' '*prefix + l.lstrip()
docout.append ( line )
return docout
|
def feature_vectorizer(features, term_voc):
"""
Produces vector of features
term_voc : core.TermVocabulary
returns: dict
vector {index1: value1, ..., indexN: valueN}
"""
vector = {}
for feature_name in features.keys():
if not term_voc.contains(feature_name):
term_voc.insert_term(feature_name)
index = term_voc.get_term_index(feature_name)
vector[index] = features[feature_name]
return vector
|
def mergeDictionaries(inputDict, separator=';'):
"""This function merges two or more dictionaries whereas values from different sources for the same key are combined by indicating the provenance.
For example sourceA = {'a': 'val1'} and sourceB = {'a': 'val2'} will be merged into {'a': 'val1 (sourceA)\nval2 (sourceB)}.
The given dictionary contains the two dictionaries with their respective names as keys (which will be used to indicate provenance)
>>> mergeDictionaries({'sourceA': {'a': 'val1'}, 'sourceB': {'a': 'val2'} })
{'a': 'val1 (sourceA);val2 (sourceB)'}
"""
keyValues = {}
for sourceName in inputDict:
for key in inputDict[sourceName]:
value = inputDict[sourceName][key]
valueString = f'{value} ({sourceName})'
if key in keyValues:
keyValues[key].append(valueString)
else:
keyValues[key] = [valueString]
outputDict = {}
for k in keyValues:
outputDict[k] = separator.join(keyValues[k])
return outputDict
|
def _apply_directives(stream, directives, ctxt, vars):
"""Apply the given directives to the stream.
:param stream: the stream the directives should be applied to
:param directives: the list of directives to apply
:param ctxt: the `Context`
:param vars: additional variables that should be available when Python
code is executed
:return: the stream with the given directives applied
"""
if directives:
stream = directives[0](iter(stream), directives[1:], ctxt, **vars)
return stream
|
def cp(container, destination, tarname):
"""
The Docker people said they would not implement "cp" at the API level.
So, we live with what we have.
"""
try:
with open(tarname, 'rb') as tarhandle:
container.put_archive(destination, tarhandle)
return(True)
except Exception as e:
print(e)
return(False)
|
def params_for(prefix, kwargs):
"""Extract parameters that belong to a given sklearn module prefix from
``kwargs``. This is useful to obtain parameters that belong to a
submodule.
Example usage
-------------
>>> kwargs = {'encoder__a': 3, 'encoder__b': 4, 'decoder__a': 5}
>>> params_for('encoder', kwargs)
{'a': 3, 'b': 4}
"""
if not prefix.endswith('__'):
prefix += '__'
return {key[len(prefix):]: val for key, val in kwargs.items()
if key.startswith(prefix)}
|
def symop_string_from_symop_matrix_tr(matrix, tr=(0, 0, 0), eps=0):
"""
Construct a CIF representation of symmetry operator plus translation.
See International Tables for Crystallography Vol. A. (2002) for
definition.
:param matrix: 3x3 matrix, representing the symmetry operator
:param tr: translation vector of length 3 (default 0)
:param eps: epsilon parameter for fuzzy comparison x == 0
:return: CIF representation of symmetry operator
"""
import re
axes = ["x", "y", "z"]
parts = ["", "", ""]
for i in range(0, 3):
for j in range(0, 3):
sign = None
if matrix[i][j] > eps:
sign = "+"
elif matrix[i][j] < -eps:
sign = "-"
if sign:
parts[i] = format("{}{}{}".format(parts[i], sign, axes[j]))
if tr[i] < -eps or tr[i] > eps:
sign = "+"
if tr[i] < -eps:
sign = "-"
parts[i] = format("{}{}{}".format(parts[i], sign, abs(tr[i])))
parts[i] = re.sub('^\+', '', parts[i])
return ",".join(parts)
|
def isCCW(ring):
"""
Determines if a LinearRing is oriented counter-clockwise or not
"""
area = 0.0
for i in range(0,len(ring)-1):
p1 = ring[i]
p2 = ring[i+1]
area += (p1[1] * p2[0]) - (p1[0] * p2[1])
if area > 0:
return False
else:
return True
|
def is_int(INTEGER_MAYBE):
""" Checks if a string could be expressed as an integer. '"""
try:
int(INTEGER_MAYBE)
return True
except ValueError:
return False
|
def convert_bytes(tot_bytes):
"""Convert bytes to human-readable GB, MB, KB.
Parameters
----------
tot_bytes : int
Total number of bytes.
Returns
-------
[GB, MB, KB, rem] : list
Bytes divided into Giga, Mega, Kilo bytes.
"""
GB, rem = divmod(tot_bytes, 1024 * 1024 * 1024)
MB, rem = divmod(rem, 1024 * 1024)
KB, rem = divmod(rem, 1024)
return [GB, MB, KB, rem]
|
def _f(fpath, dir_):
"""Helper function for contains_parent_dir function."""
return fpath.startswith(dir_)
|
def process_results_datasud(results, nb_results):
"""
Input: results: results from datasud
nb_results: number of results to use
Output: List of results formatted to reranking API
"""
formatted_results = []
results = results["result"]["results"][0:nb_results]
for result in results:
tags_list = [x["display_name"] for x in result["tags"]]
groups_list = [
{"name": x["display_name"], "description": x["description"]}
for x in result["groups"]
]
formatted_results.append(
{
"title": result["title"].replace('"', "'"),
"url": result["name"],
"description": result["notes"].replace('"', "'"),
"portal": "datasud",
"owner_org": result["organization"]["title"],
"owner_org_description": result["organization"]["description"].replace(
'"', "'"
),
"maintainer": result["maintainer"],
"dataset_publication_date": result["dataset_publication_date"],
"dataset_modification_date": result["dataset_modification_date"],
"metadata_creation_date": result["metadata_created"],
"metadata_modification_date": result["metadata_modified"],
"tags": tags_list,
"groups": groups_list,
}
)
return formatted_results
|
def vanderpol(y,t,mu):
""" Return the derivative vector for the van der Pol equations."""
y1= y[0]
y2= y[1]
dy1=y2
dy2=mu*(1-y1**2)*y2-y1
return [dy1, dy2]
|
def color565(r, g, b):
"""Convert red, green, blue components to a 16-bit 565 RGB value. Components
should be values 0 to 255.
"""
return ((r & 0xF8) << 8) | ((g & 0xFC) << 3) | (b >> 3)
|
def _evaluate_params(params_name: list, params_index: list, config_params_available: bool):
"""
This ensures that (1) if parameter names are provided in the config file, one and only one of
the arguments, `params_name` or `params_index`, might be given, and (2) if parameter names are
NOT provided in the config file, the arg `params_name` and only that MUST be given.
:param params_name: See the same argument in `do_extraction`.
:param params_index: See the same argument in `do_extraction`.
:param config_params_available: `True` if the key `MVTS_PARAMETERS` in the config file, is
associated with a list of parameters, and `False` otherwise.
:return: True, if no exception was raised.
"""
if params_index is None: params_index = []
if params_name is None: params_name = []
given_by_list, given_by_index = False, False
if len(params_name) > 0:
given_by_list = True
if len(params_index) > 0:
given_by_index = True
if not config_params_available: # if parameters in config file are not provided
if given_by_list and not given_by_index:
return True
else:
# if (1) both args are given, or (2) if none of them are provided, or (3) if
# params_index is given
raise ValueError(
"""
If a list of parameter names is not provided by the config file,
the arg `params_name` and only that MUST be given.
"""
)
else:
if given_by_list + given_by_index > 1: # if both args are provided
raise ValueError(
"""
Both of the arguments, `params_name` and `params_index`, cannot be given at the
same time.
"""
)
return True
|
def _to_shape(shape):
"""convert shape into tuple."""
return tuple(int(sh) for sh in shape)
|
def quadratic_easeout(pos):
"""
Easing function for animations: Quadratic Ease Out.
"""
return -(pos * (pos - 2))
|
def mkdown_blockquote(text):
"""
Generates the markdown syntax for a blockquote.
"""
return '\n'.join([f'> {line.strip()}' for line in text.splitlines()])
|
def bin2hex(bin_bytes):
"""
Converts a binary string to a string of space-separated hexadecimal bytes.
"""
return ' '.join('%02x' % ord(c) for c in bin_bytes)
|
def pad_sentences(sentences, padding_word="<PAD/>", maxlen=0):
"""
Pads all the sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
if maxlen > 0:
sequence_length = maxlen
else:
sequence_length = max(len(s) for s in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
replaced_newline_sentence = []
for char in list(sentence):
if char == "\n":
replaced_newline_sentence.append("<NEWLINE/>")
elif char == " ":
replaced_newline_sentence.append("<SPACE/>")
else:
replaced_newline_sentence.append(char)
new_sentence = replaced_newline_sentence + [padding_word] * num_padding
# new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
|
def dump_version(command):
"""
Dump version for GCC/Clang compilers
Parameters
----------
command : str
Compiler command.
Returns
-------
float or None: version if found else None
"""
from subprocess import run, PIPE, CalledProcessError
try:
if (
command
not in run(
[command, "--version"],
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
check=True,
).stdout
):
# Command is linked to another compiler
return
except (FileNotFoundError, CalledProcessError):
return
for method in ("-dumpversion", "-dumpfullversion"):
process = run(
[command, method], stdout=PIPE, stderr=PIPE, universal_newlines=True
)
if not process.returncode and "." in process.stdout:
return float(".".join(process.stdout.split(".", 2)[:2]))
|
def is_ssl(socket):
"""True if socket is an active SSLSocket."""
return bool(getattr(socket, '_sslobj', False))
|
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
"""Test if a and b are close with given relative and absolute precision."""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
|
def oct2bin(x):
"""
Convert octal string to binary string.
For instance: '11' -> '1001'
"""
return bin(int(x, 8))[2:]
|
def EnvsKwargs(task,env_kwargs):
"""
create several environments kwargs
:param tasks: the task we need the omnirobot to perform
:param env_kwargs: the original env_kwargs from previous pre-trained odel
:return: a list of env_kwargs that has the same length as tasks
"""
t=task
tmp=env_kwargs.copy()
tmp['simple_continual_target'] = False
tmp['circular_continual_move'] = False
tmp['square_continual_move'] = False
tmp['eight_continual_move'] = False
if (t=='sc'):
tmp['simple_continual_target']=True
elif (t=='cc'):
tmp['circular_continual_move']=True
elif (t=='sqc'):
tmp['square_continual_move']=True
elif (t=='ec'):
tmp['eight_continual_move']=True
return tmp
|
def play_recursive(p1, p2):
"""Get the winning deck of the recursive combat game."""
states_seen = set() # List of all states seen. If repeaded p1 wins
while p1 and p2:
# Check for duplicate play state
state = str(p1) + str(p2)
if state in states_seen:
return 1, p1
states_seen.add(state)
a = p1.pop(0)
b = p2.pop(0)
if a > len(p1) or b > len(p2):
# A player does not have enough cards to recurse, high wins
winner = 1 if a > b else 2
else:
# had misread the problem and been using p1[:], p2[:] here
# that makes it run for hours
winner, _ = play_recursive(p1[:a], p2[:b])
if winner == 1:
p1 += [a,b]
else:
p2 += [b,a]
return (1, p1) if p1 else (2, p2)
|
def normalize_grid_frame(grid_frame):
"""
Given a range of acceptible abbreviations and spellings returns the exact frame name that we need.
Parameters
----------
grid_frame : str
The name of the grid frame that we're trying to match to.
Returns
-------
str - normalized frame name if a match was found
"""
if grid_frame.lower() in ['ak', 'alaska']:
return 'Alaska'
elif grid_frame.lower() in ['ca', 'can', 'canada']:
return 'Canada'
elif grid_frame.lower() in ['conus', 'us', 'usa', 'united states']:
return 'Conus'
elif grid_frame.lower() in ['hi', 'hawaii']:
return 'Hawaii'
elif grid_frame.lower() in ['mex', 'mx', 'mexico']:
return 'Mexico'
elif grid_frame.lower() in ['pr', 'puerto rico', 'puertorico']:
return 'PuertoRico'
else:
raise Exception("The specified grid frame name {grid_frame} is not one of 'Alaska', 'Canada', 'Conus', 'Hawaii', 'Mexico', or 'PuertoRico")
|
def filter_article(event):
"""
Optionally filters original article text from response.
Expects "original_text" and optional "filter_article"
fields in event.
"""
if event.get("filter_article", True):
del event["original_text"]
return event
|
def RemoveIntervallsContained(intervalls):
"""
remove intervalls that are fully contained in another.
[(10, 100), (20, 50), (70, 120), (130, 200), (10, 50), (140, 210), (150, 200)]
results:
[(10, 100), (70, 120), (130, 200), (140, 210)]
"""
if not intervalls:
return []
new_intervalls = []
intervalls.sort()
last_from, last_to = intervalls[0]
for this_from, this_to in intervalls[1:]:
# this is larger:
if this_from <= last_from and this_to >= last_to:
last_from, last_to = this_from, this_to
continue
# last is larger
if last_from <= this_from and last_to >= this_to:
continue
# no complete overlap
new_intervalls.append((last_from, last_to))
last_from, last_to = this_from, this_to
new_intervalls.append((last_from, last_to))
return new_intervalls
|
def list_of_dicts_to_dict_of_lists(list_of_dicts, key_order):
"""
Transform a list of dictionaries into a dictionary of lists.
"""
keys = list(set().union(*(list(d.keys()) for d in list_of_dicts)))
columns = {}
for key in keys:
columns[key] = [d[key] for d in list_of_dicts if key in d]
return {k: columns[k] for k in key_order}
|
def _code(function):
"""Return the byte code for a function.
If this Python interpreter does not supply the byte code for functions,
then this function returns NaN so that all functions compare unequal.
"""
return (function.__code__.co_code
if hasattr(function, '__code__') else float('nan'))
|
def create_positional_array(cigar_tuples):
"""
# ========================================================================
CREATE POSITIONAL ARRAY
PURPOSE
-------
Create a positional array that maps positions in a
CIGAR tuple to a list.
Ex. CIGAR tuple is: [(0, 4), (2, 1) (1, 2)]
Positional Array is Initialized to Empty.
position (an int) starts at 0.
We look at each item in the CIGAR tuple where
the first item is the operation (ex. match, delete, insert)
and the second item is number of bases involved in the operation.
The returned array maps positions the read (as a list indicies)
to relative positions in the reference. This returned list of
relative positions starts at 0.
If we have a match we append the current reletive position
of the reference to the positional array (which represents
positions in the read) and then we will increase the relative
position in the reference. This process is repeated for the
length of the match.
If the operation is a insertion we appending the positional array
with the left anchored relative position of the insert in
the reference. This proccess is repeated for the length of the insert.
This means the same relative position is appended multiple times.
If the operation is a deletion we will increase the relative position
in the reference by the length of the operation.
This means no value gets appended to the positional array.
So for the CIGAR tuple list above we would get a positional
array that looks as follows:
1. Looking at first tuple in the list:
The tuple's operation is 0 (i.e a match).
positional_array = [0, 1, 2, 3]
position: 4
2. Looking at second tuple in the list:
The tuple's operation is 2 (i.e a delete)
positional_array: [0, 1, 2, 3] (didn't change)
position: 5
3. Looking at the third tuple in the list:
The tuple's operation is 1 (i.e an insert)
positional_array = [0, 1, 2, 3, 4,4]
position: 5
INPUT
-----
[[CIGAR] TUPLE] [cigar_tuples]
- A list containing the CIGAR tuples. (operation, length).
Return
------
[[LIST] [INT]]
- A positional array that maps CIGAR tuples to the read.
# ========================================================================
"""
positional_array = []
OPERATION = 0
LENGTH = 1
position = 0 # 0-based
MATCH = 0
INSERT = 1
DELETE = 2
for tup in cigar_tuples:
if tup[OPERATION] == MATCH:
for i in range(tup[LENGTH]):
positional_array.append(position) # consume read
position = position + 1 # consume reference
if tup[OPERATION] == INSERT:
for i in range(tup[LENGTH]):
positional_array.append(position - 1) # consume read
if tup[OPERATION] == DELETE:
position += tup[LENGTH] # consume reference
return positional_array
|
def kill_kth_bit(n, k):
"""
use bitwise AND
0 & 0 = 0
1 & 0 = 0
to clear the bit: perform a bitwise AND of the number with a reset bit
n = n & ~(1 << k)
OR
n &= ~(1 << k)
"""
return n & ~(1 << k - 1)
|
def month_by_name(name):
""" Return the number of a month by (locale-independently) English name """
ENGLISH_NAMES = [
'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
try:
return ENGLISH_NAMES.index(name) + 1
except ValueError:
return None
|
def _depgrep_node_label_use_action(_s, _l, tokens):
"""
Returns the node label used to begin a tgrep_expr_labeled. See
`_depgrep_segmented_pattern_action`.
Called for expressions like (`tgrep_node_label_use`)::
=s
when they appear as the first element of a `tgrep_expr_labeled`
expression (see `_depgrep_segmented_pattern_action`).
It returns the node label.
"""
assert len(tokens) == 1
assert tokens[0].startswith("=")
return tokens[0][1:]
|
def intToStr(i):
"""
What we know.
1. Only have to look at the loop as there are no function calls
2. Within while loop, constant number of steps
3. How many times through loop?
i. how many times can one divide i by 10
ii. O(log(i)), log base 10 for the size of i
Nugget: It is linear in the number of digits in n, but log in the size of n and since we decided to measure this in the size of the input it is logarithmic
>>> intToStr(12)
'12'
>>> intToStr(1)
'1'
>>> intToStr(9)
'9'
"""
digits = '0123456789'
if i == 0:
return '0'
res = ''
while i > 0:
res = digits[i % 10] + res
i = i // 10
return res
|
def munge_time(t):
"""Take a time from nvprof and convert it into a chrome://tracing time."""
# For strict correctness, divide by 1000, but this reduces accuracy.
return t
|
def _replace_from_end(s, a, b, times=None):
"""replace from end"""
return s[::-1].replace(a, b, times)[::-1]
|
def count_non_zero_length(aaa):
"""
aaa = [[116, 105, 114, 97, 115],
[ 97, 110, 110, 0, 0],
[101, 108, 0, 0, 0]]
ans -> [5, 3, 2]
"""
bbb = []
for item in aaa:
counting = 0
for element in item:
if element != 0:
counting += 1
bbb.append(counting)
return bbb
|
def arrangeFlatsByExpId(exposureList, exposureIdList):
"""Arrange exposures by exposure ID.
There is no guarantee that this will properly group exposures, but
allows a sequence of flats that have different illumination
(despite having the same exposure time) to be processed.
Parameters
----------
exposureList : `list`[`lsst.afw.image.exposure.exposure.ExposureF`]
Input list of exposures.
exposureIdList : `list`[`int`]
List of exposure ids as obtained by dataId[`exposure`].
Returns
------
flatsAtExpId : `dict` [`float`,
`list`[(`lsst.afw.image.ExposureF`, `int`)]]
Dictionary that groups flat-field exposures (and their IDs)
sequentially by their exposure id.
Notes
-----
This algorithm sorts the input exposures by their exposure id, and
then assigns each pair of exposures (exp_j, exp_{j+1}) to pair k,
such that 2*k = j, where j is the python index of one of the
exposures (starting from zero). By checking for the IndexError
while appending, we can ensure that there will only ever be fully
populated pairs.
"""
flatsAtExpId = {}
# sortedExposures = sorted(exposureList,
# key=lambda exp: exp.getInfo().getVisitInfo().getExposureId())
assert len(exposureList) == len(exposureIdList), "Different lengths for exp. list and exp. ID lists"
# Sort exposures by expIds, which are in the second list `exposureIdList`.
sortedExposures = sorted(zip(exposureList, exposureIdList), key=lambda pair: pair[1])
for jPair, expTuple in enumerate(sortedExposures):
if (jPair + 1) % 2:
kPair = jPair // 2
listAtExpId = flatsAtExpId.setdefault(kPair, [])
try:
listAtExpId.append(expTuple)
listAtExpId.append(sortedExposures[jPair + 1])
except IndexError:
pass
return flatsAtExpId
|
def convert_seq_to_codons(seq):
"""Convert a string (sequence) into a list of 3-letter strings (triplets)."""
seq_codons = [seq[i : i + 3] for i in range(0, len(seq), 3)]
return seq_codons
|
def value_right(self, right):
"""
Returns the value of the type instance calling an to use in an
operator method, namely when the method's instance is on the
right side of the expression.
"""
return self if isinstance(right, self.__class__) else self.value
|
def is_sequence(arg):
"""
Checks if arg is a sequence
"""
if isinstance(arg, str):
return False
elif hasattr(arg, '__iter__'):
return True
elif not hasattr(arg, 'strip') and hasattr(arg, '__getitem__'):
return True
else:
return False
|
def count_digit(n, digit):
"""Return how many times digit appears in n.
>>> count_digit(55055, 5)
4
"""
if n == 0:
return 0
else:
if n%10 == digit:
return count_digit(n//10, digit) + 1
else:
return count_digit(n//10, digit)
|
def enough_objects(dictionary):
"""
Checks whether the json (converted to a dict) has enough objects (Monday-Sunday)
Returns True if it does, False otherwise.
"""
keys = list(dictionary.keys())
# still have no idea why there's 8 keys instead of 7
reference = ["0", "1", "2", "3", "4", "5", "6", "7"]
return keys == reference
|
def test_accuracy(reference_src, reference_tar, aligned_src, aligned_tar, penalty_points=None):
"""
Tests aligned lists of strings against reference lists, typically hand aligned.
Args:
reference_src: list of reference source strings
reference_tar: list of reference target strings
aligned_src: list of auto-aligned source strings
aligned_tar: list of auto-aligned target strings
penalty_points: dict of error types and penalty points. Default is {'bad': 1, 'noise': 1, 'missed': 1}
Returns: dict
"""
if penalty_points is None:
penalty_points = {'bad': 1, 'noise': 1, 'missed': 1}
if not (isinstance(reference_src, list) and
isinstance(reference_tar, list) and
isinstance(aligned_src, list) and
isinstance(aligned_tar, list)):
raise Exception("Expecting reference_src, reference_tar, aligned_src, and aligned_tar to be of type list.")
if len(reference_src) != len(reference_tar):
raise Exception(
"Expecting reference_src and reference_tar to have the same length")
if len(aligned_src) != len(aligned_tar):
raise Exception(
"Expecting aligned_src and aligned_tar to have the same length")
reference_src = [item.lower().strip() for item in reference_src]
reference_tar = [item.lower().strip() for item in reference_tar]
aligned_src = [item.lower().strip() for item in aligned_src]
aligned_tar = [item.lower().strip() for item in aligned_tar]
# find mismatches. Penalize by 1 point per mismatch
bad = []
missed = []
missed_points = 0
bad_points = 0
correct_count = 0
for src_index, src in enumerate(reference_src):
tar = reference_tar[src_index]
if src not in aligned_src: # no match here between src lists
missed_points += 1
missed.append(src)
continue
tar_index = aligned_src.index(src)
if aligned_tar[tar_index] != tar:
bad_points += 1
bad.append(src)
else:
correct_count += 1
# find noise. Penalize by 1 point per noisy item
noise = []
noise_points = 0
for src_index, src in enumerate(aligned_src):
if src not in reference_src:
noise_points += 1
noise.append(src)
# apply weights to penalty factors
bad_points = bad_points * penalty_points['bad']
noise_points = noise_points * penalty_points['noise']
missed_points = missed_points * penalty_points['missed']
# find score
# score = (len(reference_src) - bad_points - noise_points - missed_points) / len(reference_src)
error_rate = (bad_points + noise_points) / len(reference_src)
return {'correct_count': "{}/{}".format(correct_count, len(reference_src)), 'error_rate': error_rate,
'correct_rate': correct_count / len(reference_src), 'bad_points': bad_points,
'noise_points': noise_points, 'missed_points': missed_points, 'bad': bad, 'noise': noise, 'missed': missed}
|
def nextIter(it, default=None):
"""
Returns the next element of the iterator,
returning the default value if it's empty,
rather than throwing an error.
"""
try:
return next(iter(it))
except StopIteration:
return default
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.