content
stringlengths 42
6.51k
|
---|
def bezier_quadratic(p0, p1, p2, t):
"""Returns a position on bezier curve defined by 3 points and t."""
return p1 + (1-t)**2*(p0-p1) + t**2*(p2-p1)
|
def trade(first, second):
"""Exchange the smallest prefixes of first and second that have equal sum.
>>> a = [1, 1, 3, 2, 1, 1, 4]
>>> b = [4, 3, 2, 7]
>>> trade(a, b) # Trades 1+1+3+2=7 for 4+3=7
'Deal!'
>>> a
[4, 3, 1, 1, 4]
>>> b
[1, 1, 3, 2, 2, 7]
>>> c = [3, 3, 2, 4, 1]
>>> trade(b, c)
'No deal!'
>>> b
[1, 1, 3, 2, 2, 7]
>>> c
[3, 3, 2, 4, 1]
>>> trade(a, c)
'Deal!'
>>> a
[3, 3, 2, 1, 4]
>>> b
[1, 1, 3, 2, 2, 7]
>>> c
[4, 3, 1, 4, 1]
"""
m, n = 1, 1
equal_prefix = lambda: sum(first[0:m])==sum(second[0:n])
while (not equal_prefix())and m<len(first) and n<len(second):
if sum(first[0:m])<sum(second[0:n]):
m += 1
else:
n += 1
if equal_prefix():
first[:m], second[:n] = second[:n], first[:m]
return 'Deal!'
else:
return 'No deal!'
|
def clean_state_dict(state_dict):
"""save a cleaned version of model without dict and DataParallel
Arguments:
state_dict {collections.OrderedDict} -- [description]
Returns:
clean_model {collections.OrderedDict} -- [description]
"""
clean_model = state_dict
# create new OrderedDict that does not contain `module.`
from collections import OrderedDict
clean_model = OrderedDict()
if any(key.startswith('module') for key in state_dict):
for k, v in state_dict.items():
name = k[7:] # remove `module.`
clean_model[name] = v
else:
return state_dict
return clean_model
|
def make_file_string(year):
"""
Create a file name string from the year, e.g. 'laetoli_csho_1998.xls'
:param year: four digit integer value
:return: string representation of file name
"""
return 'laetoli_csho_'+str(year)+'.xls'
|
def is_backward_locking(locked_keys, key):
# ### Naive dead lock detect:
#
# Locks must be acquired in alphabetic order(or other order, from left to right.
# Trying to acquire a `lock>=right_most_locked`, is a forward locking Otherwise it
# is a backward locking.
#
# Always do forward locking we can guarantee there won't be a dead lock.
# Since a deadlock needs at least one backward locking to form a circular dependency.
#
# If a process fails to acquire a lock in a backward locking,
# it should release all locks it holds and redo the entire transaction.
#
# E.g. suppose X has acquired lock a and c, Y has acquired lock b:
#
# ```
# Acquired locks by process X and Y
# locks are ordered left-to-right
# ---------------------------------------------
# proc-X a(locked) c(locked)
# proc-Y b(locked)
# ```
#
# If
# X tries to acquire b(**backward**),
# Y tries to acquire c(forward):
# There is a deadlock. X should release all locks.
#
# ```
# ---------------------------------------------
# proc-X a(locked) .------------ c(locked)
# v ^
# proc-Y b(locked) ----'
# ```
#
# If
# X tries to acquire b(**backward**)
# Y tries to acquires a(**backward**)
# There is a deadlock, X and Y should both release their locks.
#
# ```
# ---------------------------------------------
# proc-X a(locked) .------------ c(locked)
# ^ v
# proc-Y '---------- b(locked)
"""
Check if the operation of locking `key` is a backward-locking.
:param locked_keys: is a collection support `in` operator that contains already locked keys.
:param key: is the key to lock.
:return: a `bool` indicate if locking `key` would be a backward-locking.
"""
locked_keys = sorted(locked_keys)
assert key not in locked_keys, 'must not re-lock a key'
if len(locked_keys) == 0:
is_backward = False
else:
is_backward = key < locked_keys[-1]
return is_backward
|
def generate_doc_text_replace_options(use_case, model_name, precision, mode):
"""
Returns a dictionary of the default text replace options that are used for
generating documentation. This is used as the default key/value pairs when
"""
# Define dictionary of keywords to replace and try to set proper
# capitalization for common words in model names and precisions. This
# does not cover the preferred formatting for *ALL* models, but it covers
# some of the basics. The final model name is defined in the spec yaml.
formatted_model_name = model_name.title().replace("-", " "). \
replace("Ssd ", "SSD-"). \
replace("Resnet", "ResNet"). \
replace("Mobilenet", "MobileNet"). \
replace("Densenet", "DenseNet"). \
replace("Bert", "BERT"). \
replace("Rfcn", "RFCN"). \
replace("Gnmt", "GNMT")
formatted_precision = precision.replace("fp32", "FP32"). \
replace("int8", "Int8"). \
replace("bfloat16", "BFloat16")
return {
"<model name>": formatted_model_name,
"<precision>": formatted_precision,
"<mode>": mode,
"<package url>": "",
"<package name>": "{}-{}-{}.tar.gz".format(model_name, precision, mode),
"<package dir>": "{}-{}-{}".format(model_name, precision, mode),
"<docker image>": "",
"<use case>": use_case
}
|
def select_files(files, search):
"""Select files based on a search term of interest.
Parameters
----------
files : list of str
File list.
search : str
String to use to keep files.
Returns
-------
list of str
File list with selected files kept.
"""
return [file for file in files if search in file]
|
def convert_to_list(input, _type=int, _default_output=None):
"""
Converts the input to a list if not already.
Also performs checks for type of list items and will set output to the default value if type criteria isn't met
Args:
input: input that you are analyzing to convert to list of type
type (optional): type of items in list to confirm. Defaults to int. A value of None for this input result in no type checking
default_val (optional): value to pass if no other criteria is used. Defaults to None.
"""
output = _default_output
if type(input) is list:
if _type is not None:
if all([(type(x) is _type) for x in input]):
# set to output if is list and all instances match type
output = input
else:
# set to output if is list (no type specified)
output = input
elif ((type(input) is _type) and (_type is not None)) or ((input is not None) and (_type is None)):
# set to output as a single element list if is type match, or no type was specified and input is not None
output = [input]
return output
|
def metaslice(alen, nmeta, trim=0, thickness=False):
"""
Return *[imin,imax,istp]* so that `range(imin,imax+1,istp)` are the
boundary indices for *nmeta* (centered) metaslices.
Note: *imax* is the end boundary index of last metaslice,
therefore element *imax* is *not* included in last
metaslice.
If *thickness* is True, *nmeta* is actually the thickness of
metaslices. Non-null *trim* is the number of trimmed elements at
each edge of the array.
>>> a = range(0,141,10); a # 15 elements
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140]
>>> imin,imax,istp = metaslice(len(a), 3, trim=2) # Make 3 metaslices
>>> imin,imax,istp
(3, 12, 3)
>>> ibounds = range(imin,imax+1,istp); ibounds
[3, 6, 9, 12]
>>> for i in xrange(len(ibounds)-1): print a[ibounds[i]:ibounds[i+1]]
[30, 40, 50]
[60, 70, 80]
[90, 100, 110]
>>> N.reshape(a[imin:imax],(-1,istp))
array([[ 30, 40, 50],
[ 60, 70, 80],
[ 90, 100, 110]])
"""
if alen <= 0 or nmeta <= 0 or trim < 0:
raise ValueError("Invalid input (alen=%d>0, nmeta=%d>0, trim=%d>=0)" %
(alen, nmeta, trim))
elif alen <= 2*trim:
raise ValueError("Trimmed array would be empty")
if thickness:
istep = nmeta # Metaslice thickness
nmeta = (alen - 2*trim) // istep # Nb of metaslices
if nmeta == 0:
raise ValueError("Metaslice thickness is too big")
else:
istep = (alen - 2*trim) // nmeta # Metaslice thickness
if istep <= 0:
raise ValueError("Null-thickness metaslices")
# Center metaslices on (trimmed) array
imin = trim + ((alen - 2*trim) % nmeta) // 2 # Index of 1st px of 1st slice
imax = imin + nmeta*istep - 1 # Index of last px of last sl.
return [imin, imax+1, istep] # Return a list to please pySNIFS
|
def file_parts(filename):
"""
prefix is part before the first dot
extension is all the parts after the first dot
"""
prefix = filename.split(".")[0]
extension = ".".join(filename.split(".")[1:]).lstrip(".")
return prefix, extension
|
def _join_prodigy_tokens(text):
"""Return all prodigy tokens in a single string
"""
return "\n".join([str(i) for i in text])
|
def TOTAL_DEGREE_FREEDOM(N_DOFSNODE, N_NODES):
"""
This function determines the quantity and ID values of the structure's global degrees of freedom.
Input:
N_DOFSNODE | Number of degress of freedom per node | Integer
N_NODES | Number of nodes | Integer
Output:
DOF_GLOBAL | ID global degree of freedom | Py list[N_DOFSGLOBAL]
N_DOFSGLOBAL | Total of degrees of freedom | Integer
"""
DOF_GLOBAL = []
N_DOFSGLOBAL = N_NODES * N_DOFSNODE
for I_COUNT in range (N_DOFSGLOBAL):
DOF_GLOBAL.append(I_COUNT)
return DOF_GLOBAL, N_DOFSGLOBAL
|
def middlePoint(A, B):
""" Finds the point between two points
@parameter A: point #1
@parameter B: point #2
@returns: typle(x,y) halfway from A toward B"""
return (int((B[0] + A[0]) / 2), int((B[1] + A[1]) / 2))
|
def check_all_there(the_list):
"""Checks if all of the elements are not None or
if there is an empty list.
Keywords:
(list) the_list: input list of everything
Returns:
(bool) True: if everything inside the list
"""
# For each row in the list
for row in the_list:
# For each index in the row
for i, item in enumerate(row):
# If the row is empty, raise error
if not row[i]:
raise ValueError('Ehh, empty list, sir!')
else:
return True
|
def dict_of(cls):
"""Decorator that converts a class into a dict of its public members."""
return {k: v for k, v in cls.__dict__.items() if not k.startswith('_')}
|
def identify_ossim_kwl(ossim_kwl_file):
"""
parse geom file to identify if it is an ossim model
:param ossim_kwl_file : ossim keyword list file
:type ossim_kwl_file : str
:return ossim kwl info : ossimmodel or None if not an ossim kwl file
:rtype str
"""
try:
with open(ossim_kwl_file, encoding="utf-8") as ossim_file:
content = ossim_file.readlines()
geom_dict = {}
for line in content:
(key, val) = line.split(": ")
geom_dict[key] = val.rstrip()
if "type" in geom_dict:
if geom_dict["type"].strip().startswith("ossim"):
return geom_dict["type"].strip()
return None
except Exception: # pylint: disable=broad-except
return None
|
def bin2str(binary):
"""Convert biary 16 bit binary message into a string of chars."""
assert isinstance(binary, bytes), 'must give a byte obj'
assert len(binary) % 16 == 0, 'must have 16 bit valuess'
return ''.join([chr(int(binary[n: n+16], 2))
for n in range(0, len(binary), 16)]).encode()
|
def getURL(key):
"""Takes in Spreadsheet key and appends url attributes.
key -- Google Spreadsheet Key
"""
return "https://spreadsheets.google.com/feeds/list/"+str(key)+"/od6/public/basic?alt=json"
|
def _corner_to_coco_bbox(bbox):
"""
from corner format
to (tlx, tly, w, h)
:param bbox:
:return:
"""
return [(bbox[0]), (bbox[1]), (bbox[2] - bbox[0]), (bbox[3] - bbox[1])]
|
def _process_project_id(issues, project_id):
"""Set the project id of given issues"""
for task in issues:
task["project id"] = project_id
return issues
|
def get_args_string(cli_args):
"""Convert dict of cli argument into string
Args:
cli_args: dict of cli arguments
Returns:
str: string of joined key=values
"""
args = []
for key, value in cli_args.items():
if not value:
continue
if type(value) == list:
quoted_list = ['"{}"'.format(item) for item in value]
joined_values = ', '.join(quoted_list)
value = '[{}]'.format(joined_values)
args.append("-var '{0}={1}'".format(key, value))
return ' '.join(args)
|
def compare_lines(expected_path, to_test_path, num_lines, reverse=False):
"""Compares the first n lines of two files.
Args:
expected_path: Path to the file with the expected content, as a string.
to_test_path: Path to the file to compare with the expected file,
as a string.
num_lines: Number of lines to compare, as an int.
reverse: Whether to compare the last n lines, as a boolean.
Returns:
True, if the two files have the same content. False, otherwise.
"""
with open(expected_path, 'rb') as expected:
with open(to_test_path, 'rb') as to_test:
expected_lines = None
lines_to_test = None
if reverse:
expected_lines = expected.readlines()[-num_lines:]
lines_to_test = to_test.readlines()[-num_lines:]
else:
expected_lines = expected.readlines()[:num_lines]
lines_to_test = to_test.readlines()[:num_lines]
if len(lines_to_test) != len(expected_lines):
print(f'Expected to test {len(lines_to_test)} lines but '
f'got {len(expected_lines)} lines')
return False
for i in range(min(len(lines_to_test), num_lines)):
expected_line = expected_lines[i]
line_to_test = lines_to_test[i]
if expected_line != line_to_test:
print('WANT:', expected_line)
print('GOT:', line_to_test)
return False
return True
|
def split_list(l, size):
"""Split a list into evenly sized chunks
Parameters
----------
l : list
The list to split
size : n
The size of each chunk
Returns
-------
A list of sub-lists
Examples
--------
>>> from fnss.util import split_list
>>> split_list([1, 2, 3, 4, 5, 6], 2)
[[1, 2], [3, 4], [5, 6]]
"""
return [l[i: i + size] for i in range(0, len(l), size)]
|
def LineModel(line):
"""
Summary : This functions compares a string to model with
typo handling
Parameters : comparison string
Return : Boolean
"""
try:
line = line.lower()
except:
return False
if 'default:' in line:
return False
if 'model' in line:
return True
char_match = 0
if 'm' in line: char_match += 1
if 'o' in line: char_match += 1
if 'd' in line: char_match += 1
if 'e' in line: char_match += 1
if 'l' in line: char_match += 1
if char_match >= 4:
return True
return False
|
def merge(left, right):
"""When left side/right side is empty,
It means that this is an individual item and is already sorted."""
#We make sure the right/left side is not empty
#meaning that it's an individual item and it's already sorted.
if not len(left):
return left
if not len(right):
return right
result = []
leftIndex = 0
rightIndex = 0
totalLen = len(left) + len(right)
#
while (len(result) < totalLen):
#Perform the required comparisons and merge the two parts
if left[leftIndex] < right[rightIndex]:
result.append(left[leftIndex])
leftIndex += 1
else:
result.append(right[rightIndex])
rightIndex += 1
if leftIndex == len(left) or rightIndex == len(right):
result.extend(left[leftIndex:] or right[rightIndex:])
break
return result
|
def collide(Aleft, Aright, Bleft, Bright):
"""
optimised for speed.
"""
# quickest rejections first;
if Aright < Bleft:
return(False)
if Aleft > Bright:
return(False)
if Aleft == Bleft: return(1) # I have to cheat here otherwise it returns 0 which will evaluate as False;
if Aleft == Bright: return(1)
if Aright == Bleft: return(1)
if Aright == Bright: return(1)
if Aleft <= Bright and Aright >= Bright:
A = abs(Aleft - Bright)
B = abs(Aright - Bright)
C = abs(Aleft - Bleft)
D = abs(Aright - Bleft)
closest = min(A, B, C, D)
return(closest) # Bright point is within A, thus collision
if Aright >= Bleft and Aleft <= Bleft:
A = abs(Aleft - Bright)
B = abs(Aright - Bright)
C = abs(Aleft - Bleft)
D = abs(Aright - Bleft)
closest = min(A, B, C, D)
return(closest) # Bleft point is within A, thus collision.
if Bleft <= Aright and Bright >= Aright:
A = abs(Aleft - Bright)
B = abs(Aright - Bright)
C = abs(Aleft - Bleft)
D = abs(Aright - Bleft)
closest = min(A, B, C, D)
return(closest) # Aright point is within B, thus collision
if Bright >= Aleft and Bleft <= Aleft:
A = abs(Aleft - Bright)
B = abs(Aright - Bright)
C = abs(Aleft - Bleft)
D = abs(Aright - Bleft)
closest = min(A, B, C, D)
return(closest) # Aleft point is within B, thus collision.
#print "unhandled!"
return(False)
|
def get_dice_coefficient(a_license, b_license):
"""Sorensen dice coefficient may be calculated for two strings,
x and y for the purpose of string similarity measure. Dice coefficient
is defined as 2nt/(na + nb), where nt is the number of character bigrams
found in both strings, na is the number of bigrams in string a and nb
is the number of bigrams in string b.
Arguments:
a_license {string} -- Normalized license text a.
b_license {string} -- Normalized license text b.
Return:
float -- A statistic used to gauge the similarity of two license texts.
"""
# Case for empty license text
if not len(a_license) or not len(b_license):
return 0.0
# Case for true duplicates
if a_license == b_license:
return 1.0
# If a != b, and a or b are single chars, then they can't possibly match
if len(a_license) == 1 or len(b_license) == 1:
return 0.0
# Create bigrams
a_bigram_list = [a_license[i:i+2] for i in range(len(a_license)-1)]
b_bigram_list = [b_license[i:i+2] for i in range(len(b_license)-1)]
a_bigram_list.sort()
b_bigram_list.sort()
# Assignments to save function calls
lena = len(a_bigram_list)
lenb = len(b_bigram_list)
# Matches is used to count the matches between a_bigram_list and b_bigram_list
matches = i = j = 0
while (i < lena and j < lenb):
if a_bigram_list[i] == b_bigram_list[j]:
matches += 1
i += 1
j += 1
elif a_bigram_list[i] < b_bigram_list[j]:
i += 1
else:
j += 1
score = float(2*matches)/float(lena + lenb)
return score
|
def find_sum(num_digits, nums):
""" Find the total sum of the numbers entered """
sum = 0
for count in range (num_digits):
sum = sum + nums[count]
return sum
|
def printable(s):
"""make a string printable. Converts all non-printable ascii characters and all
non-space whitespace to periods. This keeps a string to a fixed width when
printing it. This is not meant for canonicalization. It is far more
restrictive since it removes many things that might be representable.
It is appropriate for generating debug output binary strings that might
contain ascii substrings, like peer-id's. It explicitly excludes quotes
and double quotes so that the string can be enclosed in quotes.
"""
l = []
for c in s:
if ord(c) >= 0x20 and ord(c) < 0x7F and c != '"' and c != "'":
l.append(c)
else:
l.append('.')
return "".join(l)
|
def secant(func, min_guess, max_guess, err_tolerance, max_try=10000):
"""
Find the root of a function using secant method. (Newton method)
arguments:
func: f(x)
min_guess: minimum x as guess
max_guess: maximum x as guess
err_tolerance: value where f(root) must be less than err_tolerance
"""
y_min = func(min_guess)
y_max = func(max_guess)
x_new = max_guess - (y_max * (min_guess - max_guess)/(y_min - y_max))
y_new = func(x_new)
print("x_new: ", x_new)
if abs(y_new) <= err_tolerance:
return x_new
elif max_try == 0:
return ValueError('Unable to find root after try')
else:
min_guess = x_new if y_new <= 0 else min_guess
max_guess = x_new if y_new > 0 else max_guess
return secant(func, min_guess, max_guess, err_tolerance, max_try - 1)
|
def get_digit(number, position, base=10):
"""Return the digit we are looking at based on the position and the base.
For example:
number = 1234, position = 3, looking at the 1.
position_digit = 10 ** 3 = 1000.
divide 1234 by 1000 floored = 1.
modulo the result by 10 as we are doing it base 10 = 1.
If number = 1234, position = 2, looking at the 2.
position_digit = 10 ** 2 = 100.
divide 1234 by 1000 floored = 12
modulo the result by 12 = 2.
If the number is 1234, position = 0, looking at the 4.
position_digit = 10 ** 0 = 1
divide 1234 by 1 floored = 1234
modulo the result by 10 = 4.
Handles the base in which len(number) < position, will result in 0.
"""
position_digit = base ** position # The 1 in the resulting digit is the position of the digt we are looking at.
floored = number // position_digit
return floored % base
|
def product(cdc):
"""
Given a string, including a number, this function calculates the value of digit's number product.
"""
nb = 1
for elt in cdc:
nb *= int(elt)
return nb
|
def unique(iterable):
"""Return unique values from iterable."""
seen = set()
return [i for i in iterable if not (i in seen or seen.add(i))]
|
def cursor(s):
"""'ab|c' -> (2, 'abc')"""
cursor_offset = s.index("|")
line = s[:cursor_offset] + s[cursor_offset + 1 :]
return cursor_offset, line
|
def _list_subclasses(cls):
"""
Recursively lists all subclasses of `cls`.
"""
subclasses = cls.__subclasses__()
for subclass in cls.__subclasses__():
subclasses += _list_subclasses(subclass)
return subclasses
|
def get_pairs(X):
"""
Given a list NX returns a list of the
len(X) choose 2 pairs
"""
pairs = []
for i in range(len(X)):
for j in range(len(X)):
if i < j:
pairs.append((X[i], X[j]))
return pairs
|
def _format_meter_reference(counter_name, counter_type, counter_unit):
"""Format reference to meter data.
"""
return "%s!%s!%s" % (counter_name, counter_type, counter_unit)
|
def evaluateUDGoogleSpecial(tokensArrayGT, tokensArrayProvider):
"""See evaluateUD, except we account for Google's service not being able to recognize the POS tags 'INTJ', 'SCONJ', and 'SYM'. The total count of ground-truth tokens is deducted by the occurence count of these three tags.
Arguments:
tokensArrayGT {List} -- List of ground-truth POS tokens
tokensArrayProvider {List} -- List of provider POS tokens to be tested
Returns:
Dictionary -- The number of ground-truth tokens and provider tokens (to calculate recall) and the number of correct provider tokens (to calculate precision)
"""
gtTokenCount = 0 # dif
for tokenGTfc in tokensArrayGT: # dif
if not tokenGTfc['pos_tag'] == 'INTJ' and not tokenGTfc['pos_tag'] == 'SCONJ' and not tokenGTfc['pos_tag'] == 'SYM': # dif
gtTokenCount += 1 # dif
providerTokenCount = len(tokensArrayProvider)
providerTokenCorrectCount = 0
for tokenGT in tokensArrayGT:
tokenText = tokenGT['token_text']
#tokenBeginOffset = tokenGT['token_begin_offset']
posTag = tokenGT['pos_tag']
for tokenProvider in tokensArrayProvider:
# if tokenProvider['token_text'] == tokenText and tokenProvider['token_begin_offset'] == tokenBeginOffset:
# if tokenProvider['pos_tag'] == posTag:
# providerTokenCorrectCount += 1
# break
# this ignores noun-verb ambiguities
if tokenProvider['token_text'] == tokenText and tokenProvider['pos_tag'] == posTag:
providerTokenCorrectCount += 1
break
if providerTokenCorrectCount > len(tokensArrayProvider) or providerTokenCorrectCount > len(tokensArrayGT): # dif
print("Error evaluateUD GoogleSpecial")
print(tokensArrayGT)
return {}
return({
"gt_Tokens": gtTokenCount,
"provider_Tokens": providerTokenCount,
"provider_Tokens_Correct": providerTokenCorrectCount
})
|
def update_global_map(global_map, map_update):
"""
Maps an article copy to a source article in the global map. If the source article isn't found,
adds the source article and copy to the global map.
:param global_map: List of articles with associated copies
:param map_update: Dictionary of data to update the global map
:return: Updated list of articles
"""
updated = False
for article in global_map:
if article['source_id'] == map_update['source_id']:
article['copies'].append(map_update['copies'][0])
updated = True
break
if updated is False: # new source article
global_map.append(map_update)
return global_map
|
def consistency_zero_division(line):
"""
This function returns a new line for each Row of the rdd with the evaluation of the Consistency dimension
Args:
line(Row): row of the rdd
"""
try:
return (line[0], float(line[1][1]) / line[1][0])
except Exception:
return (line[0], 0.0)
|
def ensure_uid(doc_or_uid):
"""
Accept a uid or a dict with a 'uid' key. Return the uid.
"""
try:
return doc_or_uid['uid']
except TypeError:
return doc_or_uid
|
def GetTryJobCommand(change_list, extra_change_lists, options, builder):
"""Constructs the 'tryjob' command.
Args:
change_list: The CL obtained from updating the packages.
extra_change_lists: Extra change lists that would like to be run alongside
the change list of updating the packages.
options: Options to be passed into the tryjob command.
builder: The builder to be passed into the tryjob command.
Returns:
The 'tryjob' command with the change list of updating the packages and
any extra information that was passed into the command line.
"""
tryjob_cmd = ['cros', 'tryjob', '--yes', '--json', '-g', '%d' % change_list]
if extra_change_lists:
for extra_cl in extra_change_lists:
tryjob_cmd.extend(['-g', '%d' % extra_cl])
tryjob_cmd.append(builder)
if options:
tryjob_cmd.extend('--%s' % option for option in options)
return tryjob_cmd
|
def countKey(theDict, name):
""" Return the number of times the given par exists in this dict-tree,
since the same key name may be used in different sections/sub-sections. """
retval = 0
for key in theDict:
val = theDict[key]
if isinstance(val, dict):
retval += countKey(val, name) # recurse
else:
if key == name:
retval += 1
# can't break, even tho we found a hit, other items on
# this level will not be named "name", but child dicts
# may have further counts
return retval
|
def format_height_imperial(height):
"""Formats a height in decimeters as F'I"."""
return "%d'%.1f\"" % (
height * 0.32808399,
(height * 0.32808399 % 1) * 12,
)
|
def development_overhead_cost_discount(hybrid_plant_size_MW, technology_size_MW):
"""
Overhead is 2% of total cost (before management cost) at 100 MW. And 12 % of TIC at 5
MW.
https://www.nrel.gov/docs/fy19osti/72399.pdf
"""
if technology_size_MW == 0:
overhead_discount_multiplier = 0
else:
overhead_discount_multiplier = (31.422 * (technology_size_MW ** (-0.598))) - \
(31.422 * (hybrid_plant_size_MW ** (-0.598)))
return overhead_discount_multiplier / 100
|
def upper_power_of_two(value) -> int:
"""Returns the value of 2 raised to some power which is the smallest
such value that is just >= *value*."""
result = 1
while result < value:
result <<= 1
return result
|
def getSQLT(timestamp):
"""Make timestamp for SQLite from Python timestamp, meaning a UNIX epoch INTEGER.
:param timestamp:
:return: SQLite compatible timestamp in the form of a UNIX epoch INTEGER"""
# I know this is a very small function, but now it's clear what SQL needs
return int(timestamp)
|
def Sphere(individual):
"""Sphere test objective function.
F(x) = sum_{i=1}^d xi^2
d=1,2,3,...
Range: [-100,100]
Minima: 0
"""
#print(individual)
return sum(x**2 for x in individual)
|
def throttling_mod_func(d: list, e: int):
"""Perform the modular function from the throttling array functions.
In the javascript, the modular operation is as follows:
e = (e % d.length + d.length) % d.length
We simply translate this to python here.
"""
return (e % len(d) + len(d)) % len(d)
|
def _flatten_dependency_maps(all_dependency_maps):
"""Flatten a list of dependency maps into one dictionary.
Dependency maps have the following structure:
```python
DEPENDENCIES_MAP = {
# The first key in the map is a Bazel package
# name of the workspace this file is defined in.
"workspace_member_package": {
# Not all dependnecies are supported for all platforms.
# the condition key is the condition required to be true
# on the host platform.
"condition": {
# An alias to a crate target. # The label of the crate target the
# Aliases are only crate names. # package name refers to.
"package_name": "@full//:label",
}
}
}
```
Args:
all_dependency_maps (list): A list of dicts as described above
Returns:
dict: A dictionary as described above
"""
dependencies = {}
for workspace_deps_map in all_dependency_maps:
for pkg_name, conditional_deps_map in workspace_deps_map.items():
if pkg_name not in dependencies:
non_frozen_map = dict()
for key, values in conditional_deps_map.items():
non_frozen_map.update({key: dict(values.items())})
dependencies.setdefault(pkg_name, non_frozen_map)
continue
for condition, deps_map in conditional_deps_map.items():
# If the condition has not been recorded, do so and continue
if condition not in dependencies[pkg_name]:
dependencies[pkg_name].setdefault(condition, dict(deps_map.items()))
continue
# Alert on any miss-matched dependencies
inconsistent_entries = []
for crate_name, crate_label in deps_map.items():
existing = dependencies[pkg_name][condition].get(crate_name)
if existing and existing != crate_label:
inconsistent_entries.append((crate_name, existing, crate_label))
dependencies[pkg_name][condition].update({crate_name: crate_label})
return dependencies
|
def diam_fractal(DIM_FRACTAL, DiamInitial, NumCol):
"""Return the diameter of a floc given NumCol doubling collisions."""
return DiamInitial * 2**(NumCol / DIM_FRACTAL)
|
def touchdown(situation, new_situation, **kwargs):
"""Assumes successful XP and no 2PC -- revisit this for 2015?"""
new_situation['score_diff'] = situation['score_diff'] + 7
new_situation['yfog'] = 25
return new_situation
|
def newton_raphson(func, funcd, initial_guess=1, acc=0.000001, max_iter=100000):
"""
func -> Function to find root of
funcd -> Derivative of function
It does not work all the time!!
"""
guess = initial_guess
# CAREFUL: Dont forget abs()!!
while abs(func(guess)) > acc and max_iter >= 0:
guess -= func(guess) / funcd(guess)
max_iter += 1
return guess
|
def get_wait_time(retries, args):
""" Calculates how long we should wait to execute a failed task, based on
how many times it's failed in the past.
Args:
retries: An int that indicates how many times this task has failed.
args: A dict that contains information about when the user wants to retry
the failed task.
Returns:
The amount of time, in seconds, that we should wait before executing this
task again.
"""
min_backoff_seconds = float(args['min_backoff_sec'])
max_doublings = int(args['max_doublings'])
max_backoff_seconds = float(args['max_backoff_sec'])
max_doublings = min(max_doublings, retries)
wait_time = 2 ** (max_doublings - 1) * min_backoff_seconds
wait_time = min(wait_time, max_backoff_seconds)
return wait_time
|
def _MilestoneLabel(alerts):
"""Returns a milestone label string, or None."""
revisions = [a.start_revision for a in alerts if hasattr(a, 'start_revision')]
if not revisions:
return None
start_revision = min(revisions)
try:
milestone = _GetMilestoneForRevision(start_revision)
except KeyError:
logging.error('List of versions not in the expected format')
if not milestone:
return None
logging.info('Matched rev %s to milestone %s.', start_revision, milestone)
return 'M-%d' % milestone
|
def is_valid_config(config):
"""
Checks if a data structure can be considered as a valid binoas
configuration file. Returns true or false.
"""
return (
len(config.keys()) == 1 and
('binoas' in config)
)
|
def number_to_cells(num, num_cells):
"""Convert an integer into 32-bit cells"""
cells = []
for i in range(num_cells):
cells.insert(0, (0xFFFFFFFF & (num >> (32 * i))))
return " ".join(["0x%x" % x for x in cells])
|
def reverse_complement(sequence):
""" Return reverse complement of a sequence. """
complement_bases = {
'g':'c', 'c':'g', 'a':'t', 't':'a', 'n':'n',
'G':'C', 'C':'G', 'A':'T', 'T':'A', 'N':'N', "-":"-",
"R":"Y", "Y":"R", "S":"W", "W":"S", "K":"M", "M":"K",
"B":"V", "V":"B", "D": "H", "H": "D",
"r":"y", "y":"r", "s":"w", "w":"s", "k":"m", "m":"k",
"b":"v", "v":"b", "d": "h", "h": "d"
}
bases = list(sequence)
bases.reverse()
revcomp = []
for base in bases:
try:
revcomp.append(complement_bases[base])
except KeyError:
print("Unexpected base encountered: ", base, " returned as X!!!")
revcomp.append("X")
return "".join(revcomp)
|
def _uniquify(seq, sep='-'):
"""Uniquify a list of strings.
Adding unique numbers to duplicate values.
Parameters
----------
seq : `list` or `array-like`
A list of values
sep : `str`
Separator
Returns
-------
seq: `list` or `array-like`
A list of updated values
"""
dups = {}
for i, val in enumerate(seq):
if val not in dups:
# Store index of first occurrence and occurrence value
dups[val] = [i, 1]
else:
# Increment occurrence value, index value doesn't matter anymore
dups[val][1] += 1
# Use stored occurrence value
seq[i] += (sep+str(dups[val][1]))
return(seq)
|
def to_list(str_rep_list: str) -> list:
"""
Convenience function to change a string representation of a Python list into an actual list object.
:param str str_rep_list: String that represents a Python list. e.g. "['0.5', '2.0']"
:return: The parsed representative string.
:rtype: list
"""
in_parts = str_rep_list.strip("[").strip("]").split(',')
real_list = [part.strip(' ').strip("'").strip('"') for part in in_parts if part != '' and part != ' ']
return real_list
|
def replace_tup(tupl, old, new, count=-1):
"""
Creates a copy of ``tupl`` with all occurrences of value ``old`` replaced by ``new``.
Objects are replaced by value equality, not id equality (i.e. ``==`` not ``is``).
If the optional argument ``count`` is given, only the first count occurrences are
replaced.
:param tupl: The tuple to copy
:type tupl: ``tuple``
:param old: The old value to replace
:type old: ``any``
:param new: The new value to replace with
:type new: ``any``
:param count: The number of occurrences to replace
:type count: ``int``
:return: A copy of ``tupl`` with all occurrences of value ``old`` replaced by ``new``.
:rtype: ``tuple``
"""
assert type(tupl) == tuple, '%s is not a tuple' % tupl
result = []
count = len(tupl) if count == -1 else count
match = 0
for item in tupl:
if item == old and match < count:
result.append(new)
match += 1
else:
result.append(item)
return tuple(result)
|
def freeze(o):
"""
Makes a hash out of anything that contains only list,dict and hashable types
including string and numeric types
"""
if isinstance(o, dict):
return frozenset({ k:freeze(v) for k,v in o.items()}.items())
if isinstance(o,list):
return tuple(freeze(v) for v in o)
return hash(o)
|
def make_symmetric(dict):
"""Makes the given dictionary symmetric. Values are assumed to be unique."""
for key, value in list(dict.items()):
dict[value] = key
return dict
|
def shortest_row_F(F1):
"""
Get the minimum and maximum length of the elements of F1.
Parameters
----------
F1 : list of numpy 1D arrays.
Returns
-------
shortest_row
length of the shortest element in F1.
longest_row
length of the longest element in F1.
"""
shortest_row=-1
longest_row=-1
for i in range(len(F1)):
if (shortest_row<0)or(len(F1[i])<shortest_row):
shortest_row=len(F1[i])
if (shortest_row<0)or(len(F1[i])>longest_row):
longest_row=len(F1[i])
return([shortest_row,longest_row])
|
def _mat_vec_dot_fp(x, y):
"""Matrix (list of list) times vector (list)."""
return [sum(a * b for a, b in zip(row_x, y)) for row_x in x]
|
def parse_address(json) -> list:
"""
Function that parses the Google maps API json output
@param json: Google's reverse lookup json object
@returns: a list with address, zipcode, neighborhood, and locality (Google defined)
"""
result=json['results'][0]
address=result['formatted_address']
zipcode, neighborhood, locality=(None, None, None)
for entry in result['address_components']:
if 'postal_code' in entry['types']:
zipcode=entry['long_name']
elif 'neighborhood' in entry['types']:
neighborhood=entry['long_name']
elif 'locality' in entry['types']:
locality = entry['long_name']
return([address, zipcode, neighborhood, locality])
|
def _kappamstar(kappa, m, xi):
"""
Computes maximized cumulant of order m
Parameters
-----
kappa : list
The first two cumulants of the data
xi : int
The :math:`\\xi` for which is computed the p value of :math:`H_0`
m : float
The order of the cumulant
Returns
-----
k_out : list
The maximized cumulant of order m
"""
if xi == 1:
kappa_out = kappa[1]
else:
kappa_out = \
(kappa[1] * (xi ** (m - 1) - 1) -
kappa[0] * (xi ** (m - 1) - xi)) / (xi - 1)
return kappa_out
|
def remove_tokens_at_indices(tokens, indices):
""" param 'indices' is assumed to be sorted in increasing order
:type tokens: list[object]
:type indices: list[int]
"""
for index in reversed(indices):
tokens = tokens[0:index] + tokens[index+1:]
return tokens
|
def format_sec(s):
"""
Format seconds in a more human readable way. It supports units down to
nanoseconds.
:param s: Float of seconds to format
:return: String second representation, like 12.4 us
"""
prefixes = ["", "m", "u", "n"]
unit = 0
while s < 1 and unit + 1 < len(prefixes):
s *= 1000
unit += 1
return "{:.1f} {}s".format(s, prefixes[unit])
|
def BrickNameStrip(s, level=0):
"""Progressively strips (with increasing levels) a part description
by making substitutions with abreviations, removing spaces, etc.
This can be useful for labelling or BOM part lists where space is limited."""
sn = s
if level == 0:
sn = sn.replace(" ", " ")
sn = sn.replace(
"Plate 1 x 2 with Groove with 1 Centre Stud, without Understud",
"Plate 1 x 2 Jumper",
)
sn = sn.replace(
"Plate 1 x 2 without Groove with 1 Centre Stud", "Plate 1 x 2 Jumper"
)
sn = sn.replace(
"Plate 1 x 2 with Groove with 1 Centre Stud", "Plate 1 x 2 Jumper"
)
sn = sn.replace("Brick 1 x 1 with Headlight", "Brick 1 x 1 Erling")
sn = sn.replace("with Groove", "")
sn = sn.replace("Bluish ", "Bl ")
sn = sn.replace("Slope Brick", "Slope")
sn = sn.replace("0.667", "2/3")
sn = sn.replace("1.667", "1-2/3")
sn = sn.replace("1 And 1/3", "1-1/3")
sn = sn.replace("1 and 1/3", "1-1/3")
sn = sn.replace("1 & 1/3", "1-1/3")
sn = sn.replace("with Headlight", "Erling")
elif level == 1:
sn = sn.replace("with ", "w/")
sn = sn.replace("With ", "w/")
sn = sn.replace("without ", "wo/")
sn = sn.replace("Without ", "wo/")
sn = sn.replace("One", "1")
sn = sn.replace("Two", "2")
sn = sn.replace("Three", "3 ")
sn = sn.replace("Four", "4")
sn = sn.replace(" and ", " & ")
sn = sn.replace(" And ", " & ")
sn = sn.replace("Dark", "Dk")
sn = sn.replace("Light", "Lt")
sn = sn.replace("Bright", "Br")
sn = sn.replace("Reddish Brown", "Red Brown")
sn = sn.replace("Reddish", "Red")
sn = sn.replace("Yellowish", "Ylwish")
sn = sn.replace("Medium", "Med")
sn = sn.replace("Offset", "offs")
sn = sn.replace("Adjacent", "adj")
elif level == 2:
sn = sn.replace("Trans", "Tr")
sn = sn.replace(" x ", "x")
sn = sn.replace("Bl ", " ")
elif level == 3:
sn = sn.replace("Orange", "Org")
sn = sn.replace("Yellow", "Ylw")
sn = sn.replace("Black", "Blk")
sn = sn.replace("White", "Wht")
sn = sn.replace("Green", "Grn")
sn = sn.replace("Brown", "Brn")
sn = sn.replace("Purple", "Prpl")
sn = sn.replace("Violet", "Vlt")
sn = sn.replace("Gray", "Gry")
sn = sn.replace("Grey", "Gry")
sn = sn.replace("Axlehole", "axle")
sn = sn.replace("Cylinder", "cyl")
sn = sn.replace("Inverted", "inv")
sn = sn.replace("Centre", "Ctr")
sn = sn.replace("Center", "Ctr")
sn = sn.replace("Rounded", "round")
sn = sn.replace("Underside", "under")
sn = sn.replace("Vertical", "vert")
sn = sn.replace("Horizontal", "horz")
sn = sn.replace("Flex-System", "Flex")
sn = sn.replace("Flanges", "Flange")
sn = sn.replace("Type 1", "")
sn = sn.replace("Type 2", "")
elif level == 4:
sn = sn.replace("Technic", "")
sn = sn.replace("Single", "1")
sn = sn.replace("Dual", "2")
sn = sn.replace("Double", "Dbl")
sn = sn.replace("Stud on", "stud")
sn = sn.replace("Studs on Sides", "stud sides")
sn = sn.replace("Studs on Side", "side studs")
sn = sn.replace("Hinge Plate", "Hinge")
elif level == 5:
sn = sn.replace(" on ", " ")
sn = sn.replace(" On ", " ")
sn = sn.replace("Rounded", "Rnd")
sn = sn.replace("Round", "Rnd")
sn = sn.replace("Side", "Sd")
sn = sn.replace("Groove", "Grv")
sn = sn.replace("Minifig", "")
sn = sn.replace("Curved", "curv")
sn = sn.replace("Notched", "notch")
sn = sn.replace("Friction", "fric")
sn = sn.replace("(Complete)", "")
sn = sn.replace("Cross", "X")
sn = sn.replace("Embossed", "Emb")
sn = sn.replace("Extension", "Ext")
sn = sn.replace("Bottom", "Bot")
sn = sn.replace("Inside", "Insd")
sn = sn.replace("Locking", "click")
sn = sn.replace("Axleholder", "axle")
elif level == 6:
sn = sn.replace("Studs", "St")
sn = sn.replace("Stud", "St")
sn = sn.replace("Corners", "edge")
sn = sn.replace("w/Curv Top", "curved")
sn = sn.replace("Domed", "dome")
sn = sn.replace("Clips", "clip")
sn = sn.replace("Convex", "cvx")
return sn
|
def _get_type_and_value(entry):
"""Parse dmidecode entry and return key/value pair"""
r = {}
for l in entry.split('\n'):
s = l.split(':')
if len(s) != 2:
continue
r[s[0].strip()] = s[1].strip()
return r
|
def filter_and_rename_dict(indict, filterdict):
"""
Renames the keys of the input dict using the filterdict.
Keys not present in the filterdict will be removed.
Example:
>>> import pprint
>>> outdict =filter_and_rename_dict({'a': 1, 'b':[2,2]}, {'a': 'c', 'b': 'd', 'e': 'f'})
>>> pprint.pprint(outdict)
{'c': 1, 'd': [2, 2]}
"""
outdict = {
filterdict[k]: v
for k, v in indict.items()
if ((v is not None) and (k in filterdict.keys()))
}
return outdict
|
def make_message(msg_format, coll_id="testcoll", type_id="testtype", **kwargs):
"""
Combine supplied message format string with keyword parameters to build
a message text.
"""
msg_vals = dict(kwargs, coll_id=coll_id, type_id=type_id)
msg_text = msg_format%msg_vals
return msg_text
|
def httpDelete(
url,
contentType=None,
connectTimeout=10000,
readTimeout=60000,
username=None,
password=None,
headerValues=None,
bypassCertValidation=True,
):
"""Performs an HTTP DELETE to the given URL.
Args:
url (str): The URL to send the request to.
contentType (str): The MIME type used in the HTTP 'Content-type'
header. Optional.
connectTimeout (int): The timeout for connecting to the URL in
milliseconds. Default is 10,000. Optional.
readTimeout (int): The read timeout for the operation in
milliseconds. Default is 60,000. Optional.
username (str): If specified, the call will attempt to
authenticate with basic HTTP authentication. Optional.
password (str): The password used for basic HTTP authentication,
if the username parameter is also present. Optional.
headerValues (dict): A dictionary of name/value pairs that will
be set in the HTTP header. Optional.
bypassCertValidation (bool): If the target address in an HTTPS
address, and this parameter is TRUE, the system will bypass
all SSL certificate validation. This is not recommended,
though is sometimes necessary for self-signed certificates.
Optional.
Returns:
object: The content returned for the DELETE operation.
"""
print(
url,
contentType,
connectTimeout,
readTimeout,
username,
password,
headerValues,
bypassCertValidation,
)
return object
|
def unchunk(body: bytes):
"""
Unchunks a Transfer-encoding: chunked HTTP response
:param body: The bytes of the chunked response
:return: The unchunked response
"""
# new_body will have unchunked response
new_body = b""
# iterate through chunks until we hit the last chunk
crlf_loc = body.find(b"\r\n")
chunk_size = int(body[:crlf_loc], 16)
body = body[crlf_loc + 2 :]
while chunk_size != 0:
# add chunk content to new body and remove from old body
new_body += body[0:chunk_size]
body = body[chunk_size:]
# remove CRLF trailing chunk
body = body[2:]
# get chunk size
crlf_loc = body.find(b"\r\n")
chunk_size = int(body[:crlf_loc], 16)
body = body[crlf_loc + 2 :]
return new_body
|
def mu_prime(eta, eta_0, Kappa=0.4):
"""
eta = k z, vertical coordinate [Adi.]
Kappa, Von Karman constant (typically 0.4)
"""
return (1/Kappa)*(1/(eta + eta_0))
|
def g(n):
"""Return the value of G(n), computed recursively.
>>> g(1)
1
>>> g(2)
2
>>> g(3)
3
>>> g(4)
10
>>> g(5)
22
>>> from construct_check import check
>>> check(HW_SOURCE_FILE, 'g', ['While', 'For'])
True
"""
"*** YOUR CODE HERE ***"
if n < 4:
return n
else:
return g(n-1) + 2*g(n-2) + 3*g(n-3)
|
def suggestDType(x):
"""Return a suitable dtype for x"""
if isinstance(x, list) or isinstance(x, tuple):
if len(x) == 0:
raise Exception('can not determine dtype for empty list')
x = x[0]
if hasattr(x, 'dtype'):
return x.dtype
elif isinstance(x, float):
return float
elif isinstance(x, int):
return int
#elif isinstance(x, basestring): ## don't try to guess correct string length; use object instead.
#return '<U%d' % len(x)
else:
return object
|
def averageJCoup_on(models, my_data):
"""Returns a dictonary with the average J-Couplings for the given type:
averageJCoup[residue] = value"""
averageJCoup = {}
for model_num, model in enumerate(my_data):
if model_num not in models:
continue
for resnum in model:
if resnum in averageJCoup.keys():
averageJCoup[resnum] += model[resnum]
else:
averageJCoup[resnum] = model[resnum]
for resnum in list(averageJCoup.keys()):
averageJCoup[resnum] /= len(models)
return averageJCoup
|
def computeStrideLength(window_length, overlap_ratio):
"""
Convert window length (units of samples) and window overlap ratio to stride
length (number of samples between windows). This assumes uniform sampling.
"""
return round(window_length * (1 - overlap_ratio))
|
def split_sentence(s):
"""
Given a string, [s], split it into sentences.
# Arguments
* `s` (str) - a string to split.
# Returns
A list strings representing the sentences of the text. It is guaranteed
that each string is non-empty, has at least one whitespace character, and
both start and end on non-whitespace characters.
"""
lines = map(str.strip, s.splitlines())
return list([line for line in lines if line])
|
def ends_in_regular(string):
""" According to the specification, the font fullname should not end in
'Regular' for plain fonts. However, some fonts don't obey this rule. We
keep the style info, to minimize the diff. """
string = string.strip().split()[-1]
return string is 'Regular'
|
def objective(data):
"""
Objective function
Returns sum of array
Parameters:
data : np.array
"""
return -sum(data)
|
def check_allowed_references(
get_allows_none, get_allowed_types, request, ref_dict
):
"""Check the reference according to rules specific to requests.
In case the ``ref_dict`` is ``None``, it will check if this is allowed
for the reference at hand.
Otherwise, it will check if the reference dict's key (i.e. the TYPE)
is allowed.
"""
if ref_dict is None:
return get_allows_none(request)
if not len(ref_dict) == 1:
# we expect a ref_dict to have the shape {"TYPE": "ID"}
return False
ref_type = list(ref_dict.keys())[0]
return ref_type in get_allowed_types(request)
|
def uniq_add(my_list=[]):
"""Add all unique integers in a list (once for each integer)."""
result = 0
for x in set(my_list):
result += x
return (result)
|
def get_id(fastq_seq):
"""
Retrieve sequence ID for a READs.
:param fastq_seq: (list) a list contains four lines of a fastq file
:return: id: (string) a sequence id
"""
if "@" in fastq_seq[0]:
id = fastq_seq[0].strip().split()[0].replace("@", "")
return id
else:
raise ValueError("Wrong fastq format.")
|
def _merge_bounds(b1, b2):
"""Merge bounds coordenates."""
mins1, maxs1, sums1, n1 = b1
mins2, maxs2, sums2, n2 = b2
if n1 > 0:
if n2 > 0:
min_lat = min([mins1[0], mins2[0]])
min_lon = min([mins1[1], mins2[1]])
max_lat = max([maxs1[0], maxs2[0]])
max_lon = max([maxs1[1], maxs2[1]])
sums = [sums1[0]+sums2[0], sums1[1]+sums2[1]]
n = n1+n2
else:
min_lat = mins1[0]
min_lon = mins1[1]
max_lat = maxs1[0]
max_lon = maxs1[1]
sums = sums1
n = n1
else:
min_lat = mins2[0]
min_lon = mins2[1]
max_lat = maxs2[0]
max_lon = maxs2[1]
sums = sums2
n = n2
mins = [min_lat, min_lon]
maxs = [max_lat, max_lon]
return [mins, maxs, sums, n]
|
def rm_whitespace(data):
"""Function: rm_whitespace
Description: Remove white space from a data string.
Arguments:
(input) data -> Data string.
(output) Data string minus any white spaces.
"""
return data.replace(" ", "")
|
def lchloc(b5, b7):
"""
Leaf Chlorophyll Content (Wulf and Stuhler, 2015).
.. math:: LChloC = b7/b5
:param b5: Red-edge 1.
:type b5: numpy.ndarray or float
:param b7: Red-edge 3.
:type b7: numpy.ndarray or float
:returns LChloC: Index value
.. Tip::
Wulf, H.; Stuhler, S. 2015. Sentinel-2: Land Cover, \
Preliminary User Feedback on Sentinel-2A Data. \
in: Proceedings of the Sentinel-2A Expert Users \
Technical Meeting, Frascati, Italy, 29-30 September 2015.
"""
LChloC = b7/b5
return LChloC
|
def text2ascii(text):
""" Takes in a string text, returns the encoded text in ascii."""
ascii_string = " ".join(format(ord(x)) for x in text)
return ascii_string
|
def create(event, context):
"""Noop."""
return "MNPJobDefinitionCleanupHandler", {}
|
def _autotuple(a):
"""Automatically convert the supplied iterable to a scalar or tuple as appropriate."""
if not hasattr(a, "__iter__"):
return a
if len(a) == 1:
return a[0]
return tuple(a)
|
def make_line_points(y1, y2, line):
"""
Convert a line represented in slope and intercept into pixel points
"""
if line is None:
return None
#look at https://github.com/paramaggarwal/CarND-LaneLines-P1/blob/master/P1.ipynb for average moving lines
slope, intercept = line
# make sure everything is integer as cv2.line requires it
x1 = int((y1 - intercept)/slope)
x2 = int((y2 - intercept)/slope)
y1 = int(y1)
y2 = int(y2)
return ((x1, y1), (x2, y2))
|
def length(requests_mean):
"""
Arbitry choices to associate number of requests with length
"""
if requests_mean<=5.:
return 'short';
elif requests_mean<=10:
return 'medium';
else:
return 'long';
|
def fuse_slice_pair(slice0, slice1, length):
"""computes slice `s` such that array[s] = array[s0][s1] where array has length `length`"""
start0, stop0, step0 = slice0.indices(length)
start1, stop1, step1 = slice1.indices(len(range(start0, stop0, step0)))
stop01 = start0 + stop1 * step0
return slice(start0 + start1 * step0, None if stop01 < 0 else stop01, step0 * step1)
|
def getval(obj, getter):
"""Gets a value from an object, using a getter which
can be a callable, an attribute, or a dot-separated
list of attributes"""
if callable(getter):
val = getter(obj)
else:
val = obj
for attr in getter.split('.'):
val = getattr(val, attr)
if callable(val):
val = val()
if val is True:
val = 'yes'
elif val is False:
val = ''
return str(val or '')
|
def crop(w, h, bw, bh):
"""Crop
Makes sure one side fits and crops the other
Arguments:
w (int): The current width
h (int): The current height
bw (int): The boundary width
bh (int): The boundary height
Returns:
dict
"""
# Init the return
dRet = {}
# Easier to work with floats
w = float(w)
h = float(h)
# If the image is already smaller, make it bigger
if w < bw or h < bh:
# Which is the side that needs to grow more?
if (bw / w) > (bh / h):
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
else:
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
# Else, make it smaller
else:
# Which is the side that needs to shrink less?
if (w / bw) > (h / bh):
dRet['w'] = int(round(bh * (w / h)))
dRet['h'] = bh
else:
dRet['w'] = bw
dRet['h'] = int(round(bw * (h / w)))
# Return the new width and height
return dRet
|
def translate(x, lowerIn, upperIn, lowerOut, upperOut):
"""Map in value range to another range"""
y = (x - lowerIn) / (upperIn - lowerIn) * (upperOut - lowerOut) + lowerOut
return y
|
def get_5d_string( number ):
"""
takes an integer < 100000, returns a string of the number that's the 5 characters long.
If the number is < 10000, it puts zeros at the front to make the string 5 chars long
"""
thing = ""
for i in range(5):
thing+= str( int(number/( 10**(4-i))) % 10 )
return(thing)
|
def strip_characters(text: str, *args: str) -> str:
"""Remove specified characters from given text."""
for char in args:
text = text.replace(char, "")
return text
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.