content
stringlengths 42
6.51k
|
---|
def calculate_integration_error(
max_forth_derivative: float, low: float, high: float, length: int) -> float:
"""Find integration error if Simpson method is used.
Args:
max_forth_derivative: precalculated value.
low: left range boundary.
high: right range boundary.
length: number of values in range.
Returns:
float: approximate integration error.
"""
step = (high - low) / length
return (step)**4 / 180 * max_forth_derivative * (high - low)
|
def leafage(elts, fun_deps):
"""Get all leafs of DAG
>>> graph = {2: [1]}
>>> get_children = lambda n: graph.get(n, [])
>>> list(leafage([], get_children))
[]
>>> list(leafage([2, 1], get_children))
[1]
>>> sorted(leafage([2, 3], get_children))
[1, 3]
"""
done = set([])
todo = set(elts)
leafs = set([])
while todo:
e = todo.pop()
done.add(e)
deps = fun_deps(e)
if len(deps) == 0:
leafs.add(e)
else:
for d in deps:
if d not in done:
todo.add(d)
return leafs
|
def verify_turn_data(last_turn_data, new_turn_data):
"""Method for checking if new turn data does not cheat too much."""
if 'final_state' not in last_turn_data:
return True
return True
end_of_last_turn = last_turn_data['final_state']
start_of_new_turn = new_turn_data['initial_state']
end_data = json.dumps(end_of_last_turn)
start_data = json.dumps(start_of_new_turn)
if end_data != start_data:
return False
return True
|
def replace_null_values(dictionary, replacement_value):
"""
This is a utility method to replace null or empty values with a replacement value
:param dictionary: A dict of values
:param replacement_value: a value to be used as a substitute for nulls/empties, such as "--"
:return: a dict with null/empty values replaced
"""
clean_dictionary = {}
if dictionary is None and not dictionary:
return clean_dictionary
for key in dictionary:
if dictionary[key] in ('null', 'UNKNOWN', 'unknown', None, 'None'):
clean_dictionary[key] = replacement_value
else:
clean_dictionary[key] = dictionary[key]
return clean_dictionary
|
def shift(lis,ind):
"""
Function that shifts values
"""
for index in range(ind,len(lis)-1):
lis[index],lis[index+1] = lis[index+1],lis[index]
return lis
|
def path_vars_in(d):
"""
Extract all (and only) the path vars in a dictionary.
:param d: a .paths.json data structure
:return: all path var definitions without any special entries like '__ENV'
"""
return [p for p in d.items() if p[0] != '__ENV']
|
def is_binary_file(file_path, block_size=512):
"""
If a file can't be decoded by ascii or there are NULL ('\x00') bytes
assume this is a binary file.
"""
assert type(file_path) == str
try:
with open(file_path, 'rb') as f:
block = f.read(block_size)
if b'\x00' in block:
return True # Consider files containing null bytes binary
elif not block:
return False # Consider an empty file a text file
try:
block.decode('ascii')
return False
except UnicodeDecodeError:
return True
except IOError as io_error:
return False
|
def get_config_value(key_path, config):
"""
Args:
key_path:
config:
"""
if config is None:
return None
current_section = config
for key in key_path.split("."):
if key in current_section:
current_section = current_section[key]
else:
return None
return current_section
|
def reverse(s: str) -> str:
"""Reverses the string provided.
Example:
"Abcd" returns "dcbA".
"""
return s[::-1]
|
def fib_iterativa(number):
"""Fibonacci iterativa."""
last = 0
curr = 1
for _ in range(0, number):
last, curr = curr, curr + last
return last
|
def unit_mapping(unit,kern):
"""Given a unit and kernel, gives flat index into convolution. (starts at 1)
:param unit: unit integer (1-{3,4,6}, depending on block.)
:param kern: kernel string: "shortcut,conv1,conv2,conv3"
:returns: flat index into unit within the block.
"""
if unit == 1:
mapping = {"shortcut":0,
"conv1":1,
"conv2":2,
"conv3":3}
else:
offset = 4+3*(unit-2) # offset by 4 for the first unit, plus 3 for every additional unit.
mapping = {"conv1":0+offset,
"conv2":1+offset,
"conv3":2+offset}
return mapping[kern]+1
|
def parse_number(key, string) -> float:
"""
parse numeric values
Args:
key: type of html (currently ignored)
string: a string that represents the font
Returns:
float
"""
size = string.split(", ")[0].replace("px", "")
return float(size)
|
def _get_browser_url(is_livecd):
"""Get default browser URI for autostart and desktop icon.
This default URI can be different in Live CD and installed system.
"""
if is_livecd:
return 'http://localhost/'
else:
return 'http://localhost/'
|
def hp_testing_savage_dickey_ratio(models):
"""Computing the BF of a nested model, meaning computing the bf for the
"null" hypothesis of a given model.
Args:
- models: a dict, keys are models names while values are dictionaries
reporting prior and posterior probabilities for the "null".
Returns:
- bfs: a dictionary, bayes factor of the null for each model.
"""
bfs = {}
for model_name, model_dict in models.items():
bfs[model_name] = model_dict['posterior'] / model_dict['prior']
return bfs
|
def PostOrderTraversal(root):
"""
:type root: TreeNode
:rtype List[object]
"""
if not root:
return []
ans = []
def helper(root):
if not root:
return
helper(root.left)
helper(root.right)
ans.append(root.val)
helper(root)
return ans
|
def _parse_members(argument):
"""Convert the :members: options to class directive."""
if argument is None:
return True
return [arg.strip() for arg in argument.split(",")]
|
def replace_font_name(font_name, replacement_dict):
""" Replaces all keys with vals from replacement_dict in font_name. """
for key, val in replacement_dict.items():
font_name = font_name.replace(key, val)
return font_name
|
def post_summary(value, arg):
"""
Returns the first N characters of a block of text where N is the only argument.
"""
l = int(arg)
if len(value) > arg:
return value
else:
return value[:l] + '...'
|
def _pin_name(pin):
"""Returns the proper representation for a pin.
"""
pin = pin.strip()
if not pin:
return None
elif pin.isdigit():
return int(pin)
elif pin == 'NO_PIN':
return None
elif pin[0] in 'ABCDEFGHIJK' and pin[1].isdigit():
return pin
raise ValueError(f'Invalid pin: {pin}')
|
def _is_camel_case_ab(s, index):
"""Determine if the index is at 'aB', which is the start of a camel token.
For example, with 'workAt', this function detects 'kA'."""
return index >= 1 and s[index - 1].islower() and s[index].isupper()
|
def check_move(x, y, possible_moves):
""" Check possibility of the movement """
move = (x-1, y-1)
if move in possible_moves:
return True
else:
return False
|
def minutiae_at(pixels, i, j, kernel_size):
"""
https://airccj.org/CSCP/vol7/csit76809.pdf pg93
Crossing number methods is a really simple way to detect ridge endings and ridge bifurcations.
Then the crossing number algorithm will look at 3x3 pixel blocks:
if middle pixel is black (represents ridge):
if pixel on boundary are crossed with the ridge once, then it is a possible ridge ending
if pixel on boundary are crossed with the ridge three times, then it is a ridge bifurcation
:param pixels:
:param i:
:param j:
:return:
"""
# if middle pixel is black (represents ridge)
if pixels[i][j] == 1:
if kernel_size == 3:
cells = [(-1, -1), (-1, 0), (-1, 1), # p1 p2 p3
(0, 1), (1, 1), (1, 0), # p8 p4
(1, -1), (0, -1), (-1, -1)] # p7 p6 p5
else:
cells = [(-2, -2), (-2, -1), (-2, 0), (-2, 1), (-2, 2), # p1 p2 p3
(-1, 2), (0, 2), (1, 2), (2, 2), (2, 1), (2, 0), # p8 p4
(2, -1), (2, -2), (1, -2), (0, -2), (-1, -2), (-2, -2)] # p7 p6 p5
values = [pixels[i + l][j + k] for k, l in cells]
# count crossing how many times it goes from 0 to 1
crossings = 0
for k in range(0, len(values)-1):
crossings += abs(values[k] - values[k + 1])
crossings //= 2
# if pixel on boundary are crossed with the ridge once, then it is a possible ridge ending
# if pixel on boundary are crossed with the ridge three times, then it is a ridge bifurcation
if crossings == 1:
return "ending"
if crossings == 3:
return "bifurcation"
return "none"
|
def Pct_Converter(val, default_val=0.70):
"""Checks if a value passed is a decimal percent (val>=0<=1.0)
If it's between 1.0 and 100, it is converted to a decimal percent.
Args:
val (float): The value to test / convert to a percentage
default_val (float): The default value to use in case an invalid amount is passed
Returns:
[type]: [description]
"""
if default_val>1.0 and default_val<=100.0:
default_val = default_val/100.0
if default_val>100.0:
default_val = 0.7
if val>100: # The maximum expected is 100%
return default_val
elif val>1.0 and val<=100.0:
val = val/100.00
return val
|
def reformat_filter(fields_to_filter_by):
"""Get a dictionary with all of the fields to filter
Args:
fields_to_filter_by (dict): Dictionary with all the fields to filter
Returns:
string. Filter to send in the API request
"""
filter_req = ''
for field_key, field_value in fields_to_filter_by.items():
if field_value:
filter_req += f"{field_key}+eq+'{field_value}'&"
return filter_req
|
def find_root(tower):
"""
Every program in the tower lists its dependents.
Look at the list and determine which program wasn't listed
as dependent. That's the root.
"""
# Sublime's syntax highlighter complains about one of these parens.
# I've no idea why...
deps = {
dep for (prog, (weight, holding)) in tower.items()
for dep in holding
}
for (prog, (weight, holding)) in tower.items():
if prog not in deps:
return prog
|
def average_prfacc(*evals, ndigits=4):
"""Average for multiple evaluations.
Args:
evals [tuple]: several evals without limitation
ndigits [int]: decimal number of float
Returns:
avg [dict]: dict of the average values of all the evaluation metrics
"""
avg = {}.fromkeys(evals[0].keys())
for key in avg:
values = [e[key] for e in evals]
if isinstance(values[0], dict):
avg[key] = average_prfacc(*values)
else:
avg[key] = round(sum(values) / len(values), ndigits)
return avg
|
def appendToFile(fileName: str, content: str):
"""Writes content to the given file."""
with open(fileName, "a") as f:
f.write(content)
return None
|
def has_dhcp_option_perm(user_level, obj, ctnr, action):
"""
Permissions for dhcp-related options
DHCP options are global like SOAs, related to subnets and ranges
"""
return {
'cyder_admin': True,
'ctnr_admin': True,
'user': True,
'guest': action == 'view',
}.get(user_level, False)
|
def complement(sequence, reverse=False):
"""
Compute the complement of a DNA sequence.
If reverse is True, reverse it too.
"""
flip = {
'A': 'T',
'C': 'G',
'G': 'C',
'T': 'A'
}
revcomp = []
for i in list(sequence):
revcomp.append(flip[i])
if reverse:
return ''.join(revcomp[::-1])
return ''.join(revcomp)
|
def OVER(n):
"""
Returns control codes to set or unset XOR mode text. This has interesting results when
text or graphics is overwritten. Notably, writing text OVER the same text will erase the text.
See the ZX Spectrum manual for more details.
Use this in a ``PRINT`` or ``SET`` command. Example:
``PRINT(AT(0,0),"over and",AT(0,0),OVER(1),"over again we go")``
Args:
- n - integer - XOR mode or not (0-1)
"""
return "".join((chr(21),chr(int(n))))
|
def precision_ranges(result2rank, total_terms):
"""Computes precision at standard cutoff ranks: [5, 10, 15, 20, 30, 100, 200, 500, 1000]
Args:
result2rank: A dict of source to ranks of good translation candidates.
total_terms: The expected term count.
Returns:
A dict containing a precision value for each cutoff rank
"""
map_of_prec = dict()
for cutoff in [5, 10, 15, 20, 30, 100, 200, 500, 1000]:
map_of_prec[cutoff] = sum(1.0 for ranks in result2rank.values() if len([r for r in ranks if r <= cutoff]) > 0) / total_terms
return map_of_prec
|
def swap_parts(line):
"""
Swap key and value and return
"""
return (line[1], line[0])
|
def get_resource_type_unit(type):
"""Returns the Boto3 resource type & unit to the caller"""
if type:
if type == "codecommit":
resource_type = "codecommit"
unit = "repositories"
elif type == "codepipeline":
resource_type = "codepipeline"
unit = "pipelines"
elif type == "dynamodb":
resource_type = "dynamodb"
unit = "tables"
elif type == "ebs":
resource_type = "ebs"
unit = "volumes"
elif type == "ec2":
resource_type = "ec2"
unit = "instances"
elif type == "ecr":
resource_type = "ecr"
unit = "ecrrepositories"
elif type == "eks":
resource_type = "eks"
unit = "clusters"
elif type == "lambda":
resource_type = "lambda"
unit = "functions"
elif type == "rds":
resource_type = "rds"
unit = "rdsclusters"
elif type == "rdsInstances":
resource_type = "rdsInstances"
unit = "rdsInstances"
elif type == "redshift":
resource_type = "redshift"
unit = "redshiftclusters"
elif type == "s3":
resource_type = "s3"
unit = "buckets"
else:
# If no resource type specified, set type to Amazon EC2
resource_type = "ec2"
unit = "instances"
return resource_type, unit
|
def renaming_modelGW(name):
"""Renaming variables. This function simply translates variable names
used in the code into Latex style variables usable in plots. These
rules are valide for the double adder model
Parameters
----------
name : list of strings
list of names to translate
Returns
-------
name : list of strings
translated list of names
"""
name = [x.replace('tau_g','$\\alpha$') if type(x)==str else x for x in name]
name = [x.replace('Lig2_fit','$\\Lambda_f$') if type(x)==str else x for x in name]
name = [x.replace('Lb_fit','$\\Lambda_b$') if type(x)==str else x for x in name]
name = [x.replace('Lig_fit','$\\Lambda_i$') if type(x)==str else x for x in name]
name = [x.replace('DeltaLgi','$d\\Lambda_{if}$') if type(x)==str else x for x in name]
name = [x.replace('DeltaLigb','$d\\Lambda_{ib}$') if type(x)==str else x for x in name]
name = [x.replace('Tbg','$T_{ib}$') if type(x)==str else x for x in name]
name = [x.replace('Tg','$T_{if}$') if type(x)==str else x for x in name]
name = [x.replace('rLig','$R_{if}$') if type(x)==str else x for x in name]
return name
|
def number_to_digits(num):
"""Return sorted list of digits in number."""
return sorted(int(ch) for ch in str(num))
|
def make_edge_id(source_node_index, first_char):
"""Returns a string made from given params.
"""
return "{}::{}".format(source_node_index, first_char)
|
def getRasterBands(dataset, bandIndices):
"""
Retrieves the specified raster bands from the supplied dataset
"""
return list([dataset.GetRasterBand(i) for i in bandIndices])
|
def prepare_html(payload):
"""
replace the user input with correct html code. so that we can submit the data to Jama in an expected format
@params:
payload -> user input
"""
return payload.replace('\n', '<br>')
|
def canonical_base_path(base_path):
"""
Make given "basePath" a canonical base URL which can be prepended to paths starting with "/".
"""
return base_path.rstrip('/')
|
def insert_options(command, options):
"""Insert addition command line options into the given command and return the result."""
result = []
for arg in command:
if options and arg.startswith('--'):
result.extend(options)
options = None
result.append(arg)
return result
|
def _get_queue_arguments(rabbit_ha_queues, rabbit_queue_ttl):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we try to declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster. In RabbitMQ 3.0, queue mirroring is
no longer controlled by the x-ha-policy argument when declaring a
queue. If you just want to make sure that all queues (except those
with auto-generated names) are mirrored across all nodes, run:
rabbitmqctl set_policy HA '^(?!amq\.).*' '{"ha-mode": "all"}'
If the rabbit_queue_ttl option is > 0, then the queue is
declared with the "Queue TTL" value as described here:
https://www.rabbitmq.com/ttl.html
Setting a queue TTL causes the queue to be automatically deleted
if it is unused for the TTL duration. This is a helpful safeguard
to prevent queues with zero consumers from growing without bound.
"""
args = {}
if rabbit_ha_queues:
args['x-ha-policy'] = 'all'
if rabbit_queue_ttl > 0:
args['x-expires'] = rabbit_queue_ttl * 1000
return args
|
def sets_compare(sets_predict, sets_label):
"""
sets_compare_label
input: [data [tuple list [ tuple ...] ... ] ...]
output: num_correct, num_pred, num_label
NOTE: basic set should be tuple
"""
num_correct = 0
num_pred = 0
num_label = 0
for set_1, set_2 in zip(sets_predict, sets_label):
set_1 = set(set_1)
set_2 = set(set_2)
num_pred += len(set_1)
num_label += len(set_2)
num_correct += len(set_1 & set_2)
return (num_correct, num_pred, num_label)
|
def isFalse(str):
"""Checks for no, false, f and 0
@param str: The string containing a bool expression
@return: A bool object
@since 0.0.1-beta
"""
return str in ("no", "false", "f", "0")
|
def dict_to_opts(d):
""" We assume that if a value is None then k is a flag and should be appended with no value"""
opts = list()
for k, v in d.items():
opts.append(f"--{k}")
if type(v) is list:
for sub_v in v:
opts.append(str(sub_v))
elif v is None:
pass # k is a flag
else:
opts.append(str(v))
return opts
|
def generateReplNumsList(count_of_unknown_numbers):
"""
The mathematical formula to calculate the number of permutations is n^r where n is the number of options
in this case we have (0-9) giving us 10 options. And r = the permutations, the count of unknown numbers
"""
unkNums = count_of_unknown_numbers
count = 10**unkNums #n^r
my_list = list()
i = 0
for i in range (count):
n = str(i)
if len(n) < unkNums:
padding = int(unkNums - len(n))
n = n.zfill(padding+1)
my_list.append(n)
else:
my_list.append(n)
return(my_list)
|
def _process_custom_formatters(formatters, columns):
"""Re-keys a dict of custom formatters to only use column indices.
Args:
formatters: A dict of formatters, keyed by column index or name.
columns: The list of columns names.
Returns:
A dict of formatters keyed only by column index.
"""
if not formatters:
return {}
# Check that all keys provided are valid column names or indices.
# Warn if something doesn't check out.
column_set = set(columns)
for col in formatters:
if isinstance(col, int) and col >= len(columns):
print(('Warning: Custom formatter column index %d exceeds total number '
'of columns (%d)') % (col, len(columns)))
if not isinstance(col, int) and col not in column_set:
print(('Warning: Custom formatter column name %s not present in column '
'list') % col)
# Separate out the custom formatters that use indices.
output_formatters = {
k: v for k, v in formatters.items() if isinstance(k, int)
}
for i, name in enumerate(columns):
# Attempt to find a formatter based on column name.
if name in formatters:
if i in output_formatters:
print(('Warning: Custom formatter for column index %d present, '
'ignoring formatter for column name %s') % (i, name))
else:
output_formatters[i] = formatters[name]
return output_formatters
|
def join(*parts):
"""
Join path name components, inserting ``/`` as needed.
If any component looks like an absolute path (i.e., it starts with
``hdfs:`` or ``file:``), all previous components will be discarded.
Note that this is *not* the reverse of :func:`split`, but rather a
specialized version of os.path.join. No check is made to determine
whether the returned string is a valid HDFS path.
"""
try:
path = [parts[0].rstrip("/")]
except IndexError:
raise TypeError("need at least one argument")
for p in parts[1:]:
p = p.strip("/")
if p.startswith('hdfs:') or p.startswith('file:'):
path = [p]
else:
path.append(p)
return "/".join(path)
|
def separate_punctuation(x, punctuations):
""" Add space around pre-defined punctuations """
for p in punctuations:
x = x.replace(p, f" {p} ")
return x
|
def get_greetings(name):
"""
Used to test custom mimetypes
"""
data = {'greetings': f'Hello {name}'}
return data
|
def get_trader_fcas_availability_max_quantity_condition(params) -> bool:
"""At least one quantity band must have positive value"""
# Quantity greater than 0 for at least one quantity band for the given service
max_quantity = max(params['quantity_bands'].values())
return max_quantity > 0
|
def combine_images(center, left, right, measurement, correction):
"""
Combine the image paths from `center`, `left` and `right` using the correction factor `correction`
Returns ([image_paths], [measurements])
"""
image_paths = []
image_paths.extend(center)
image_paths.extend(left)
image_paths.extend(right)
measurements = []
measurements.extend(measurement)
measurements.extend([x + correction for x in measurement])
measurements.extend([x - correction for x in measurement])
return (image_paths, measurements)
|
def getindexlist(inputlist,acceptedvaluelist):
"""returns a list of index values that match accepted values
acceptedvaluelist should be entered as a list.
[1,-1] is suggested for breathing analysis
"""
indexlist=[i for i in
range(len(inputlist))
if inputlist[i] in acceptedvaluelist]
return indexlist
|
def contains_repeated_pairs(string):
"""
It contains a pair of any two letters that appears at least twice in the string without overlapping,
like xyxy (xy) or aabcdefgaa (aa), but not like aaa (aa, but it overlaps).
"""
import re
return bool(re.search(r"(..).*\1", string))
|
def first_in_set(seq, items):
"""Returns first occurrence of any of items in seq, or None."""
for s in seq:
if s in items:
return s
|
def is_valid_binary(binary):
"""Prints if the binary was found in the given path.
:param binary: The full path to the binary that needs to be verified.
:return True: If the binary was found
False: If the binary was not found.
"""
import os
return os.path.isfile(binary) and os.access(binary, os.X_OK)
|
def mean(lyst):
"""Returns the mean of a list of numbers."""
sum = 0
for number in lyst:
sum += number
if len(lyst) == 0:
return 0
else:
return sum / len(lyst)
|
def generate_jobdf_index(old_df, jobid_mask, keys, values):
""" Generates index for jobdf from mask for job_id naming. """
if not jobid_mask:
nold = len(old_df.index) if old_df is not None else 0
start = nold-1 if nold > 0 else 0
return range(start, start + values.shape[0])
return [jobid_mask % dict(zip(keys, v)) for v in values]
|
def _multiplot_interval(from_date, to_date, points):
"""
Computes the size of the interval between points in a multiplot.
:return: the multiplot interval size.
:rtype: ``float``
"""
if points < 2:
return 0.0
return (to_date - from_date) / (points - 1)
|
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
# Arguments
x: A `Variable`.
increment: A tensor of same shape as `x`.
# Returns
The variable `x` updated.
"""
return (x, x + increment)
|
def _CodepointString(unicode_text):
"""Converts unicode string to string of integers in uppercase hex.
Integers correspond to the codepoints within the string. Each integer is
followed by a space.
Args:
unicode_text: unicode string
Returns:
string
"""
return " ".join("%04X" % ord(c) for c in unicode_text)
|
def encode_kw11(to_encode):
"""
The function to encode kwargs.
:param: to_encode: kwargs to encode.
:return: encoded kwargs: dict
"""
encoded_kw = {}
if not isinstance(to_encode, dict):
return encoded_kw
for k, v in to_encode.items():
try:
int(k)
except ValueError:
encoded_kw.update({k: v})
return encoded_kw
|
def nfp_rectangle(A, B):
"""
:param A: {x:12, y:10}
:param B: {x:12, y:10}
:return:
"""
min_ax = A[0]['x']
min_ay = A[0]['y']
max_ax = A[0]['x']
max_ay = A[0]['y']
for point in A[1:]:
if point['x'] < min_ax:
min_ax = point['x']
if point['x'] > max_ax:
max_ax = point['x']
if point['y'] < min_ay:
min_ay = point['y']
if point['y'] > max_ay:
max_ay = point['y']
min_bx = B[0]['x']
min_by = B[0]['y']
max_bx = B[0]['x']
max_by = B[0]['y']
for point in B[1:]:
if point['x'] < min_bx:
min_bx = point['x']
if point['x'] > max_bx:
max_bx = point['x']
if point['y'] < min_by:
min_by = point['y']
if point['y'] > max_by:
max_by = point['y']
if max_bx - min_bx > max_ax - min_ax:
return None
if max_by - min_by > max_ay - min_ay:
return None
return [[
{'x': min_ax-min_bx+B[0]['x'], 'y': min_ay-min_by+B[0]['y']},
{'x': max_ax-max_bx+B[0]['x'], 'y': min_ay-min_by+B[0]['y']},
{'x': max_ax-max_bx+B[0]['x'], 'y': max_ay-max_by+B[0]['y']},
{'x': min_ax-min_bx+B[0]['x'], 'y': max_ay-max_by+B[0]['y']}
]]
|
def calc_z_shifts(zphase, zsubstrate, margin):
"""Calculate shifts from z = 0 for each phase."""
shifts = {}
shifts['sub_bottom'] = margin
sub_bottom_upper_margin = shifts['sub_bottom'] + zsubstrate
shifts['phase'] = sub_bottom_upper_margin + margin
phase_upper_margin = shifts['phase'] + zphase
shifts['sub_top'] = phase_upper_margin + margin
sub_top_upper_margin = shifts['sub_top'] + zsubstrate
shifts['box_z'] = sub_top_upper_margin + margin
return shifts
|
def get_featuretype_name(geolevel_name, subject_name=None):
"""
A uniform mechanism for generating featuretype names.
"""
if subject_name is None:
return 'demo_%s_none' % geolevel_name
else:
return 'demo_%s_%s' % (geolevel_name, subject_name)
|
def greedy(items, maxCost, keyFunction):
"""Assumes items a list, maxCost >= 0,
keyFunction maps elements of items to numbers"""
itemsCopy = sorted(items, key = keyFunction, reverse = True)
result = []
totalValue, totalCost = 0.0, 0.0
for i in range(len(itemsCopy)):
if (totalCost+itemsCopy[i].getCost()) <= maxCost:
result.append(itemsCopy[i])
totalCost += itemsCopy[i].getCost()
totalValue += itemsCopy[i].getValue()
return (result, totalValue)
|
def make_subsequences(x, y, step=1, max_len=2 ** 31):
"""
Creates views to all subsequences of the sequence x. For example if
x = [1,2,3,4]
y = [1,1,0,0]
step = 1
the result is a tuple a, b, where:
a = [[1],
[1,2],
[1,2,3],
[1,2,3,4]
]
b = [1,1,0,0]
Note that only a view into x is created, but not a copy of elements of x.
Parameters
----------
X : array [seq_length, n_features]
y : numpy array of shape [n_samples]
Target values. Can be string, float, int etc.
step : int
Step with which to subsample the sequence.
max_len : int, default 2 ** 31
Step with which to subsample the sequence.
Returns
-------
a, b : a is all subsequences of x taken with some step, and b is labels assigned to these sequences.
"""
r = range(step-1, len(x), step)
X = []
Y = []
for i in r:
start = max(0, i - max_len)
stop = i+1
X.append(x[start:stop])
Y.append(y[i])
return X, Y
|
def main(stdin):
"""
Print lines from standard in.
Value is just a place holder.
"""
for line in stdin:
# Remove trailing newlines.
line = line.rstrip()
# Omit empty lines.
if line != '':
print(("{line}\t{num}").format(line=line, num=1))
return None
|
def merge_map(a, b):
"""Recursively merge elements of argument b into argument a.
Primarly used for merging two dictionaries together, where dict b takes
precedence over dict a. If 2 lists are provided, they are concatenated.
"""
if isinstance(a, list) and isinstance(b, list):
return a + b
if not isinstance(a, dict) or not isinstance(b, dict):
return b
for key in b.keys():
a[key] = merge_map(a[key], b[key]) if key in a else b[key]
return a
|
def merger_mass_loss(q, a_1, a_2):
"""Compute mass lost during a GW merger between two BHs. Expression from
Tichy & Marronetti 2008.
Parameters
----------
q : `float/array`
Mass ratio of binary
a_1 : `float/array`
Spin of primary BH
a_2 : `float/array`
Spin of secondary BH
Returns
-------
loss : `float/array`
Fraction of total binary mass lost during merger
"""
v = q / (1 + q)**2
loss = 0.2 * v + 0.208 * v**2 * (a_1 + a_2)
return loss
|
def optionalFlagParse(raw:str) -> bool:
"""
Parses the allowed values for the optional JSON and TIMESTAMP
configuration flags, and returns their value as a boolean.
"""
try:
return bool(int(raw))
except ValueError:
try:
return {"FALSE": False, "TRUE": True}[raw]
except KeyError:
raise ValueError("Invalid value: %s" % raw)
|
def create_name_tags(name):
"""Returns [{'Key': 'Name', 'Value': name}] """
return [{'Key': 'Name', 'Value': name}, {'Key': 'ncluster', 'Value': 'ncluster'}]
|
def create_empty_gid_matrix(width, height):
"""Creates a matrix of the given size initialized with all zeroes."""
return [[0] * width for row_index in range(height)]
|
def attachedcase(var): # samevariable
"""
Attached case convention. Join all elements together.
:param var: Variable to transform
:type var: :py:class:`list`
:returns: **transformed**: (:py:class:`str`) - Transformed input in ``attachedcase`` convention.
.. warning:: It will lost the possibility to analyse or transform the output.
"""
return "".join(var)
|
def is_ports_valid(ports: list) -> list:
"""
Determines whether the supplied list of ports is within the valid range.
:param ports: A list of ports to check
:type: hostname: list
:return: list
:raises ValueError: If any number of the provided ports are outside the valid range
"""
invalid = []
for port in ports:
try:
if 1 <= int(port) <= 65535:
continue
invalid.append(str(port))
except ValueError:
raise ValueError(f"{port} is not a valid integer!")
if invalid:
raise ValueError(f"Port(s) '{', '.join(invalid)}' is not in a valid range of (1-65535)")
return ports
|
def bubble_sort(lst):
"""Sort list by using BubbleSort algorithm."""
for i in range(len(lst)):
for j in range(0, len(lst) - i - 1):
if lst[j] > lst[j + 1]:
lst[j], lst[j + 1] = lst[j + 1], lst[j]
return lst
|
def sizeof_fmt(num, suffix='B'):
""" Transform number to readable unit """
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
|
def get_list_item(list, index):
"""Get item of list without the risk of an error being thrown"""
if 0 <= index < len(list):
return list[index]
else:
return None
|
def format_seconds(seconds):
"""Convert seconds to a formatted string
Convert seconds: 3661
To formatted: " 1:01:01"
"""
# ensure to show negative time frames correctly
hour_factor = 1
if seconds < 0 :
hour_factor = -1
seconds = - seconds
hours = seconds // 3600
minutes = seconds % 3600 // 60
seconds = seconds % 60
return "{:4d}:{:02d}:{:02d}".format(hour_factor * hours, minutes, seconds)
|
def find_index_of_ts_op_tuple(ts_op_tuples, op_id):
"""Finds the index of the given operator id at the ts and op tuple list."""
for i, ts_op_tuple in enumerate(ts_op_tuples):
if ts_op_tuple.op_id == op_id:
return i
return -1
|
def mean(data):
"""Return the sample arithmetic mean of data."""
return sum(data) / float(len(data))
|
def create_file_name(num: int, length: int) -> str:
"""Create file name.
Args:
num: number of iteration
length: max string length
Returns:
File name string
"""
return "dump-" + "0" * (len(str(length)) - len(str(num))) + str(num) + ".jpg"
|
def _return_default(default):
"""
If the default is a callable, return result of its execution. Return raw value otherwise.
"""
return default() if callable(default) else default
|
def other_options(options):
"""
Replaces None with an empty dict for plotting options.
"""
return dict() if options is None else options.copy()
|
def make_dict(first, rest, data):
"""Make a dictionary from a list of keys"""
last = first
result = {first: data}
current = result
for part in rest:
current[last] = {}
current = current[last]
current[part] = data
last = part
return result
|
def rightview(root):
"""
level traverse, time:O(n); space:O(logn) 55ms, beats 51.17%
my own solution
"""
l = []
if root:
q = [root]
while q:
l.append(q[-1].val)
temp = []
for i in q:
if i.left:
temp.append(i.left)
if i.right:
temp.append(i.right)
q = temp
return l
|
def ellipsize(msg, max_size=80):
"""This function will ellipsize the string.
:param msg: Text to ellipsize.
:param max_size: The maximum size before ellipsizing,
default is 80.
:return: The ellipsized string if len > max_size, otherwise
the original string.
"""
if len(msg) >= max_size:
return "%s (...)" % msg[0 : max_size - 6]
else:
return msg
|
def get_quant_NAs(quantdata, quantheader):
"""Takes quantdata in a dict and header with quantkeys
(eg iTRAQ isotopes). Returns dict of quant intensities
with missing keys set to NA."""
out = {}
for qkey in quantheader:
out[qkey] = quantdata.get(qkey, 'NA')
return out
|
def _default_output(result):
"""The default output filter
Returns the string value of the result unless it is None (in which
case it returns None)."""
return str(result) if result is not None else None
|
def get_formatted_place(city, country):
"""Generate the name of a place, neatly formatted."""
place = f"{city} {country}"
return place.title()
|
def __get_leading_zeros(fl):
"""Returns the number of leading zeros in a float decimal."""
if fl > 1.0:
return 0
else:
fl_splitted = str(fl).split(".")[-1]
N_unstripped = len(fl_splitted)
N_left_stripped = len(fl_splitted.lstrip("0"))
return N_unstripped - N_left_stripped
|
def is_valid_selection(ext):
""" Checks if selection is correct """
formats = ['System', 'Protein', 'Protein-H', 'C-alpha', 'Backbone', 'MainChain', 'MainChain+Cb', 'MainChain+H', 'SideChain', 'SideChain-H', 'Prot-Masses', 'non-Protein', 'Water', 'SOL', 'non-Water', 'Ion', 'NA', 'CL', 'Water_and_ions']
return ext in formats
|
def write_partition_str(partitions) -> str:
"""
Return the string representation of the partition function.
"""
output = "let partition node = match node with\n"
for i, nodes in enumerate(partitions):
output += "\n".join([f" | {node}n -> {i}" for node in nodes]) + "\n"
return output
|
def restart_function(func):
"""
:param func: a function that returns None if it needs to be rerun
:return: the value returned by func
"""
while True:
returned_value = func()
if returned_value is None:
continue
else:
break
return returned_value
|
def generate_moku_manager_params(drop_mode,
moku_size,
board_size,
gpu_id,
num_res_layers,
num_channels,
ckpt_path=None,
replace_unloaded_resnet_by_naivenet=True):
"""
Args:
drop_mode: boolean: indicating if the game is played in drop mode.
moku_size: int: the number of same pieces in a line that are
required to win the game.
board_size: tuple: two values indicating number of rows and number of
columns of the board.
gpu_id: int: id of the GPU to use, or -1 to use CPU.
num_res_layers: int: number of residual blocks in the CNN.
num_channels: int: number of channels in the CNN layers.
ckpt_path: string or None: path to the checkpoint file to be loaded.
replace_unloaded_resnet_by_naivenet: boolean: if True, when ckpt_path
is None, a NaiveNet is used instead of a ResNet with random weights.
"""
if gpu_id < 0:
tf_device = '/cpu:0'
else:
tf_device = '/gpu:' + str(gpu_id)
game_config_string = 'moku_%d_%dx%d' % (
moku_size, board_size[0], board_size[1])
if drop_mode:
game_config_string += '_drop'
game_manager_module = ['moku_manager', 'MokuManager']
game_manager_kwargs = {
'drop_mode': drop_mode,
'moku_size': moku_size,
'board_size': board_size,
'tf_device': tf_device,
'num_res_layers': num_res_layers,
'num_channels': num_channels,
'ckpt_path': ckpt_path,
'replace_unloaded_resnet_by_naivenet':
replace_unloaded_resnet_by_naivenet}
game_manager_io_module = ['moku_manager_io', 'MokuManagerIO']
game_manager_io_kwargs = {
'drop_mode': drop_mode,
'board_size': board_size}
return (game_config_string, game_manager_module, game_manager_kwargs,
game_manager_io_module, game_manager_io_kwargs)
|
def binary_string_to_hex(s):
"""
Convert a binary string to hexadecimal.
If any non 0/1 values are present such as 'x', return that single character
as a representation.
:param s: the string to be converted
:type s: str
"""
for c in s:
if not c in '01':
return c
return hex(int(s, 2))[2:]
|
def get_rs_chosen_from_bs(bs_chosen, b_to_r_master_dict_1, b_to_r_master_dict_2):
"""
this function returns a list of R-groups chosen based on the list of
chosen B's. It requires the b_to_r_master_dict_1 for both ligands to
function.
Inputs:
:param list bs_chosen: A list of the chosen B-groups. ie) ['1B1', 1B2',
'2B3']
:param dict b_to_r_master_dict_1: a Dictionary to reference B and R-groups
from mol_1. keys are names of B-groups; items are R-groups that a B-group
represents. ie) {'1B1':['1R1'],'1B2':['1R2','1R3','1R4'],'1B3': ['1R5']}
:param dict b_to_r_master_dict_2: a Dictionary to reference B and R-groups
from mol_2. keys are names of B-groups; items are R-groups that a B-group
represents. ie) {'2B1':['2R1'],'2B2':['2R2','2R3','2R4'],'2B3':
['2R5','2R6]}
Returns:
:returns: list rs_chosen: a list containing all the R-groups represented
by the chosen B-groups. ie) ['1R1', '1R2', '1R3','1R4', '2R5', '2R6']
"""
rs_chosen = []
for B in bs_chosen:
Rs_for_the_B = []
lig_number = B[0]
B_number = B[2]
if lig_number == str(1):
for i in b_to_r_master_dict_1[B]:
Rs_for_the_B.append(i)
elif lig_number == str(2):
for i in b_to_r_master_dict_2[B]:
Rs_for_the_B.append(i)
for i in Rs_for_the_B:
rs_chosen.append(i)
# rs_chosen looks like ['1R1', '1R5', '2R2']
return rs_chosen
|
def serializable_key(*args, **kwargs):
"""Return JSON-serializable key for given arguments."""
if len(args) == 1 and not kwargs:
return args[0] # simplify trivial case
else:
return str(args) + str(kwargs)
|
def get_distances(vectors, distance_func):
"""
Pre-computes the distance between each two vectors
"""
empty_vec = []
for i in range(len(vectors[0])):
empty_vec.append(0)
distances = []
for i in range(len(vectors)):
distances.append([])
for j in range(len(vectors)):
distances[i].append(0)
for i in range(len(vectors)):
for j in range(i, len(vectors)):
distance = distance_func(vectors[i], vectors[j]) if i != j else distance_func(vectors[i], empty_vec)
distances[i][j] = distance
distances[j][i] = distance
return distances
|
def tobool(x):
"""Convert an object to boolean.
Examples:
>>> print(tobool('TRUE'))
True
>>> print(tobool(True))
True
>>> print(tobool(1))
True
>>> print(tobool('FALSE'))
False
>>> print(tobool(False))
False
>>> print(tobool(0))
False
>>> print(tobool('Foo'))
Traceback (most recent call last):
ValueError: Unknown boolean specifier: 'Foo'.
>>> print(tobool(None))
Traceback (most recent call last):
ValueError: Unknown boolean specifier: 'None'.
"""
try:
if x.lower() in ['true', 'yes', 'y']:
return True
elif x.lower() in ['false', 'no', 'n']:
return False
except AttributeError:
pass
try:
return bool(int(x))
except (ValueError, TypeError):
pass
raise ValueError("Unknown boolean specifier: '%s'." % x)
|
def is_json(fname):
"""
Returns True for string, enclosed with '[' ']', False else
"""
if str(fname) and (str(fname)[0] == "[") and (str(fname)[-1] == "]"):
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.