content
stringlengths 42
6.51k
|
---|
def map_sentinel_data(mappings, data, logger, data_type, subtype):
"""Filter the raw data and returns the filtered data, which will be further pushed to Azure Sentinel.
:param mappings: List of fields to be pushed to Azure Sentinel (read from mapping string)
:param data: Data to be mapped (retrieved from Netskope)
:param logger: Logger object for logging purpose
:param data_type: The type of data being mapped (alerts/events)
:param subtype: The subtype of data being mapped (for example DLP is a subtype of alerts data type)
:return: Mapped data based on fields given in mapping file
"""
mapped_dict = {}
ignored_fields = []
for key in mappings:
if key in data:
mapped_dict[key] = data[key]
else:
ignored_fields.append(key)
return mapped_dict
|
def round_to_closest(x, y):
"""
A function to round x to divisible by y
"""
return int( y * ( ( x // y) + (x % y > 0) ) )
|
def transition(old, new, jugs):
"""
returns a string explaining the transition from old state/node to new state/node
old: a list representing old state/node
new: a list representing new state/node
jugs: a list of two integers representing volumes of the jugs
"""
a = old[0]
b = old[1]
a_prime = new[0]
b_prime = new[1]
a_max = jugs[0]
b_max = jugs[1]
if a > a_prime:
if b == b_prime:
return "Clear {0}-liter jug:\t\t\t".format(a_max)
else:
return "Pour {0}-liter jug into {1}-liter jug:\t".format(a_max, b_max)
else:
if b > b_prime:
if a == a_prime:
return "Clear {0}-liter jug:\t\t\t".format(b_max)
else:
return "Pour {0}-liter jug into {1}-liter jug:\t".format(b_max, a_max)
else:
if a == a_prime:
return "Fill {0}-liter jug:\t\t\t".format(b_max)
else:
return "Fill {0}-liter jug:\t\t\t".format(a_max)
|
def gridsquare(coords):
"""
Generate list of coordinates for gridsquare
coords -- 4-tuple of grid centre coords, dlongitude, dlatitude
returns list of 4 (lon, lat) coords for grid corners
"""
(lon, lat, dlon, dlat) = coords
gs = [(lon - dlon/2., lat - dlat/2.), (lon - dlon/2., lat + dlat/2.), (lon + dlon/2., lat + dlat/2.), (lon + dlon/2., lat - dlat/2.)]
return gs
|
def call(callable_, *args, **kwargs):
""":yaql:call
Evaluates function with specified args and kwargs and returns the
result.
This function is used to transform expressions like '$foo(args, kwargs)'
to '#call($foo, args, kwargs)'.
Note that to use this functionality 'delegate' mode has to be enabled.
:signature: call(callable, args, kwargs)
:arg callable: callable function
:argType callable: python type
:arg args: sequence of items to be used for calling
:argType args: sequence
:arg kwargs: dictionary with kwargs to be used for calling
:argType kwargs: mapping
:returnType: any (callable return type)
"""
return callable_(*args, **kwargs)
|
def is_left(p0, p1, p2):
"""
is_left(): tests if a point is Left|On|Right of an infinite line.
Input: three points P0, P1, and P2
Return: >0 for P2 left of the line through P0 and P1
=0 for P2 on the line
<0 for P2 right of the line
See: Algorithm 1 "Area of Triangles and Polygons"
http://geomalgorithms.com/a03-_inclusion.html
:param p0: point [x,y] array
:param p1: point [x,y] array
:param p2: point [x,y] array
:return:
"""
v = (p1[0] - p0[0]) * (p2[1] - p0[1]) - (p2[0] - p0[0]) * (p1[1] - p0[1])
return v
|
def get_rename(add_command):
"""
Checks add_command for "RENAME: <old_name>".
Returns the value of old_name if there is a match.
Otherwise returns an empty string.
"""
if add_command is None or len(add_command) == 0:
return ""
s = add_command.strip()
if s.lower().startswith("rename:"):
old_name = s.split(":")[1].strip().strip('"').strip("'")
assert 0 < len(old_name)
return old_name
else:
return ""
|
def GetParentUriPath(parent_name, parent_id):
"""Return the URI path of a GCP parent resource."""
return '/'.join([parent_name, parent_id])
|
def is_numeric(value):
"""
check whether the given value is a numeric value. Numbers with decimal point or thousand separator can be properly determined.
:param value: the given value to be verified
:return: True if the given value is numeric
"""
value_str = str(value).replace(',', '')
try:
float(value_str)
return True
except:
return False
|
def calculate_code_symbols_count(code: str) -> int:
""" Calculate number of symbols in code. """
if isinstance(code, str):
return len(code)
return 0
|
def remove_unwanted_keys(data, keys_to_remove):
"""Removes unwanted keys
Method returns data after removing unwanted keys
"""
for key in keys_to_remove:
data.pop(key, None)
return data
|
def to_title_case(x):
""" underscore or dash to title case notation """
return x.replace('_', ' ').replace('-', ' ').title()
|
def hamming_distance(seq1, seq2):
"""Return the Hamming distance between equal-length sequences."""
if len(seq1) != len(seq2):
raise ValueError('Undefined for sequences of unequal length')
return sum(elem1 != elem2 for elem1, elem2 in zip(seq1, seq2))
|
def append(x, y):
"""
Appends M{y} to M{x}
@type x: list
@type y: Any
@return: A new list created by appending y to x
@rtype: list
"""
return x[:].append(y) if y is not None else x
|
def strip_quotes(string):
""" strip first and last (double) quotes"""
if string.startswith('"') and string.endswith('"'):
return string[1:-1]
if string.startswith("'") and string.endswith("'"):
return string[1:-1]
return string
|
def fs_replace_badchars(payload):
"""replace characters that conflict with the flowsynth syntax"""
badchars = ['"', "'", ';', ":", " "]
for char in badchars:
payload = payload.replace(char, "\\x%s" % str(hex(ord(char)))[2:])
payload = payload.replace("\r\n", '\\x0d\\x0a')
return payload
|
def starts_with(full, part) -> bool:
""" return True if part is prefix of full
"""
if len(full) < len(part):
return False
return full[:len(part)] == part
|
def intersect(range_1, range_2):
"""Return intersection size."""
return min(range_1[1], range_2[1]) - max(range_1[0], range_2[0])
|
def _unflatten_lists(d: dict) -> dict:
"""
Note: modifies dict
In []: _unflatten_lists({'a': {'0': {'b': {'f': {'0': 1, '1': 2, '2': 3}}}, '1': {'c': 2}}, 'd': {'e': 1}})
Out[]: {'a': [{'b': {'f': [1, 2, 3]}}, {'c': 2}], 'd': {'e': 1}}
"""
for k, v in d.items():
try:
# Following line's purpose is just to trigger an error when needed:
# it only works if v is a dict whose keys are integer (all of them)
[int(kk) for kk in v]
d[k] = [
_unflatten_lists(d[k][kk]) if isinstance(d[k][kk], dict) else d[k][kk]
for kk in v
]
except Exception:
if isinstance(v, dict):
d[k] = _unflatten_lists(v)
return d
|
def average(values):
"""
Computes the arithmetic mean of a list of numbers.
>>> average([0.9, 0.9, 0.9, 1.0, 0.8, 0.9])
0.9
"""
return (1.0 * sum(values) / len(values)) if values else None
|
def generate_conf_suffix(d_params):
""" generating suffix strung according given params
:param dict d_params: dictionary
:return str:
>>> params = {'my_Param': 15}
>>> generate_conf_suffix(params)
'_my-Param=15'
>>> params.update({'new_Param': 'abc'})
>>> generate_conf_suffix(params)
'_my-Param=15_new-Param=abc'
"""
suffix = '_'
suffix += '_'.join('{}={}'.format(k.replace('_', '-'), d_params[k]) for k in sorted(d_params) if k != 'param_idx')
return suffix
|
def update_dataset_list(dataset_list, dataset_list_unique_series):
"""
Update the dataset_list with information that we found from the unique
series list. Since the unique_series_list does not contain all dataset
acquisitions, use the unique series ID (series_idx) to port information
over.
"""
for unique_dic in dataset_list_unique_series:
for data in dataset_list:
if data["series_idx"] == unique_dic["series_idx"]:
data["entities"] = unique_dic["entities"]
data["type"] = unique_dic["type"]
data["forType"] = unique_dic["forType"]
data["error"] = unique_dic["error"]
data["message"] = unique_dic["message"]
return dataset_list
|
def hikGetEffectorNodeName( character , effectorName ):
"""
This method returns the effector node name in the given HumanIK character
for the given generic HumanIK effector name.
"""
# FIXME: Find a way to get the effector from the node.
return character + '_Ctrl_' + effectorName
|
def ceil_div(a, b):
"""Return ceil(a / b) without performing any floating-point operations."""
if not isinstance(a, int) or not isinstance(b, int):
raise TypeError("unsupported operand type(s): %r and %r" % (type(a).__name__, type(b).__name__))
(q, r) = divmod(a, b)
if r:
return q + 1
else:
return q
|
def _make_socket_path(host: str, display: int, screen: int) -> str:
"""
Attempt to create a path to a bspwm socket.
No attempts are made to ensure its actual existence.
The parameters are intentionally identical to the layout of an XDisplay,
so you can just unpack one.
Parameters:
host -- hostname
display -- display number
screen -- screen number
Example:
>>> _make_socket_path(*_parse_display(':0'))
'/tmp/bspwm_0_0-socket'
"""
return f'/tmp/bspwm{host}_{display}_{screen}-socket'
|
def is_dataarray(X, require_attrs=None):
""" Check whether an object is a DataArray.
Parameters
----------
X : anything
The object to be checked.
require_attrs : list of str, optional
The attributes the object has to have in order to pass as a DataArray.
Returns
-------
bool
Whether the object is a DataArray or not.
"""
if require_attrs is None:
require_attrs = ["values", "coords", "dims", "to_dataset"]
return all([hasattr(X, name) for name in require_attrs])
|
def calculate_accumulation_distribution(open, high, low, close, volume):
"""
Calculates changes in accumulation/distribution line.
A/D = ((Close - Low) - (High - Close))/(High - Low)
Args:
open: Float representing exchange rate at the beginning of an interval
high: Float representing the highest exchange rate during the interval
low: Float respresenting the lowest exchange rate during the interval
close: Float representing the exchange rate at the end of an interval
volume: Float representing the number of trades during the interval
Returns:
Float representing the change in accumulation/distribution
"""
if high == low:
# Prevent x/0 undefined error
return 0
return ((2*close - low - high)/(high - low))*volume
|
def isGenericParamName(name):
"""Check if name is a generic parameter name."""
if name is None:
raise ValueError("parameter name is None")
return name.startswith('param')
|
def parse_envs(arg):
"""Parse environment configs as a dict.
Support format 'k1=v1,k2=v2,k3=v3..'. Note that comma is supported
in value field.
"""
envs = {}
if not arg:
return envs
i = 0
fields = arg.split("=")
if len(fields) < 2:
return envs
pre_key = ""
while i < len(fields):
if i == 0:
pre_key = fields[i]
elif i == len(fields) - 1:
envs[pre_key] = fields[i]
else:
r = fields[i].rfind(",")
envs[pre_key] = fields[i][:r]
pre_key = fields[i][r + 1 :] # noqa: E203
i += 1
return envs
|
def str_to_int(item: str) -> int:
"""[summary]
Args:
item (str): [description]
Returns:
int: [description]
"""
"""Converts a str to an int."""
return int(item)
|
def incremental_mean(prev_mean, new_vals):
"""Calculate the mean based upon the previous mean and update incrementally.
See here: http://datagenetics.com/blog/november22017/index.html """
# use the previous mean to incrementally update the new mean
mean = prev_mean
n = len(new_vals)
for x in new_vals:
mean = mean + (x - mean)/n
return mean
|
def get_factors(n):
"""Return sorted list of prime factors of n, for n <= 120"""
assert 1 <= n < 121, "Number too large"
return [i for i in range(1, n+1) if n % i == 0]
|
def url_path_join(*pieces):
"""Join components of url into a relative url
Use to prevent double slash when joining subpath. This will leave the
initial and final / in place
"""
initial = pieces[0].startswith("/")
final = pieces[-1].endswith("/")
stripped = [s.strip("/") for s in pieces]
result = "/".join(s for s in stripped if s)
if initial:
result = "/" + result
if final:
result = result + "/"
if result == "//":
result = "/"
return result
|
def has_methods(widget, methods_sets):
"""
Chick if the widget has methods from given set.
`methods_sets` is a list of method sets. The function returns ``True`` iff the widget has at least one method from each of the sets.
"""
for ms in methods_sets:
if not any([hasattr(widget,m) for m in ms]):
return False
return True
|
def count_literals(term):
"""
Counts the number of literals in a term
Args:
term : A string containing literals
Returns:
The number of literals in term
"""
count = 0
for char in term:
if char != "_":
count+=1
return count
|
def ipv4_subnet_details(addr, mask):
"""
Function that prints the subnet related details- Network, Broadcast, Host IP range and number of host addresses
:param addr: IP address
:param mask: subnet mask
:return: result dictionary containing the details
"""
network_address = []
broadcast_address = []
num_ips = 1 # to keep track of total no of ips in this subnet by multiplying (wcmask per octet+1) in the loop
wildcard_mask = {
"255": "0",
"254": "1",
"252": "3",
"248": "7",
"240": "15",
"224": "31",
"192": "63",
"128": "127",
"0": "255",
}
for _octet, _mask in zip(
addr.split("."), mask.split(".")
): # iterate over octet values and mask values simultaneously
network_address.append(
str(int(_octet) & int(_mask))
) # bitwise AND of the octet value and the mask--> gives the network ip
broadcast_address.append(
str(int(_octet) | int(wildcard_mask[_mask]))
) # bitwise OR of octet value and wc_mask--> gives the broadcast address
num_ips *= int(wildcard_mask[_mask]) + 1 # multiplies num hosts per octet
host_address_low = network_address[:3] + [
str(int(network_address[3]) + 1)
] # add 1 to last octet of network address to get the first usable host ip
host_address_high = broadcast_address[:3] + [
str(int(broadcast_address[3]) - 1)
] # subtract 1 from the last octet of the broadcast address to get the last usable host ip
host_ips = num_ips - 2 # subtract 2 that is network and bc address
result = dict()
result["nw_addr"] = ".".join(network_address)
result["bc_addr"] = ".".join(broadcast_address)
result[
"host_addr_range"
] = f"{'.'.join(host_address_low)} to {'.'.join(host_address_high)}"
result["usable_ips"] = host_ips
return result
|
def category(id):
"""
Return a category from its id
"""
return "category! "+id
|
def head(iterable):
"""
Gets the first element of the iterable.
:param iterable:
A non-empty iterable. If it is empty a StopIteration error will be raised
:type x:
iterable **(A)**
:returns:
A
"""
it = iter(iterable)
return next(it)
|
def spsp(n, a):
"""Rabin-Miller test.
n should be odd number and 1 < b < n-1.
"""
if n % 2 == 0:
return False
# find s,d: n-1=2^s*d, d: odd number
n1 = n - 1
d = n1
r = 1
s = 0
while r != 0:
r = d % 2
d = d // 2
s += 1
# start with p = a^q (mod n)
p = a
p = pow(p, d, n)
# when p=1 or p=n-1 with i=0, n is a prim number.
if (p == 1) or (p == n1):
return True
for i in range(1, s):
p = pow(p, 2, n)
if p == n1:
return True
# p!=n-1 with i<s, n is not a prim number.
return False
|
def assign_state(recipient):
"""
Try to assign a state to the recipient. If not possible, return "NULL".
States have alphabetical precedence.
"""
states = [
"Alabama", "Alaska", "Arizona",
"Arkansas", "California", "Colorado",
"Connecticut", "Delaware", "Florida",
"Georgia", "Hawaii", "Idaho",
"Illinois", "Indiana", "Iowa",
"Kansas", "Kentucky", "Louisiana",
"Maine", "Maryland", "Massachusetts",
"Michigan", "Minnesota", "Mississippi",
"Missouri", "Montana", "Nebraska",
"Nevada", "New Hampshire", "New Jersey",
"New Mexico", "New York", "North Carolina",
"North Dakota", "Ohio", "Oklahoma",
"Oregon", "Pennsylvania", "Rhode Island",
"South Carolina", "South Dakota", "Tennessee",
"Texas", "Utah", "Vermont",
"Virginia", "Washington", "West Virginia",
"Wisconsin", "Wyoming",
]
for s in states:
if s in recipient:
return "'" + s + "'"
return "NULL"
|
def flow_cell_mode(info_reads):
"""Return flow cell sequencing mode."""
res = ''
if info_reads:
read_lens = [
a['num_cycles'] for a in info_reads if not a['is_indexed_read']]
if len(read_lens) == 1:
res = '1x{}'.format(read_lens[0])
elif len(set(read_lens)) == 1:
res = '2x{}'.format(read_lens[0])
else:
res = ' + '.join(map(str, read_lens))
index_lens = [
a['num_cycles'] for a in info_reads if a['is_indexed_read']]
if index_lens:
res += '/'
if len(set(index_lens)) == 1:
res += '{}x{}'.format(len(index_lens), index_lens[0])
else:
res += '+'.join(map(str, index_lens))
return res or '?x?'
|
def inheritance(child, parent):
"""Aggregate into child what is missing from parent
"""
for v in child:
if (v in parent) and isinstance(child[v], dict) and isinstance(parent[v], dict):
parent[v] = inheritance(child[v], parent[v])
else:
parent[v] = child[v]
return parent
|
def calculate_r(vv,vh,G=1.0):
"""
Calculate anisotropy from vertical (vv), horizontal (vh), and empirical
G-factor (G).
"""
return (vv - G*vh)/(vv + 2*G*vh)
|
def get_n_steps_for_interpretability(
n_tokens: int, min_n_steps: int = 3, max_n_steps: int = 20, max_tokens: int = 500
) -> int:
"""
Get n_steps as a function of the number of tokens to speed up insight.
n_steps scaled linearly between min_n_steps and max_n_steps based on max_tokens
"""
token_ratio = 1 - (min(n_tokens, max_tokens) / max_tokens)
return int(min_n_steps + round(token_ratio * (max_n_steps - min_n_steps), 0))
|
def best_fit_decreasing(last_n_vm_cpu, hosts_cpu, hosts_ram,
inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram):
""" The Best Fit Decreasing (BFD) heuristic for placing VMs on hosts.
:param last_n_vm_cpu: The last n VM CPU usage values to average.
:type last_n_vm_cpu: int
:param hosts_cpu: A map of host names and their available CPU in MHz.
:type hosts_cpu: dict(str: int)
:param hosts_ram: A map of host names and their available RAM in MB.
:type hosts_ram: dict(str: int)
:param inactive_hosts_cpu: A map of inactive hosts and available CPU MHz.
:type inactive_hosts_cpu: dict(str: int)
:param inactive_hosts_ram: A map of inactive hosts and available RAM MB.
:type inactive_hosts_ram: dict(str: int)
:param vms_cpu: A map of VM UUID and their CPU utilization in MHz.
:type vms_cpu: dict(str: list(int))
:param vms_ram: A map of VM UUID and their RAM usage in MB.
:type vms_ram: dict(str: int)
:return: A map of VM UUIDs to host names, or {} if cannot be solved.
:rtype: dict(str: str)
"""
vms_tmp = []
for vm, cpu in vms_cpu.items():
last_n_cpu = cpu[-last_n_vm_cpu:]
vms_tmp.append((sum(last_n_cpu) / len(last_n_cpu),
vms_ram[vm],
vm))
vms = sorted(vms_tmp, reverse=True)
hosts = sorted(((v, hosts_ram[k], k)
for k, v in hosts_cpu.items()))
inactive_hosts = sorted(((v, inactive_hosts_ram[k], k)
for k, v in inactive_hosts_cpu.items()))
mapping = {}
for vm_cpu, vm_ram, vm_uuid in vms:
mapped = False
while not mapped:
for _, _, host in hosts:
if hosts_cpu[host] >= vm_cpu and \
hosts_ram[host] >= vm_ram:
mapping[vm_uuid] = host
hosts_cpu[host] -= vm_cpu
hosts_ram[host] -= vm_ram
mapped = True
break
else:
if inactive_hosts:
activated_host = inactive_hosts.pop(0)
hosts.append(activated_host)
hosts = sorted(hosts)
hosts_cpu[activated_host[2]] = activated_host[0]
hosts_ram[activated_host[2]] = activated_host[1]
else:
break
if len(vms) == len(mapping):
return mapping
return {}
|
def header_case(in_string):
"""
Return in_string with each word longer than 2 letters capitalized
Example:
SCOTTIES TOURNAMENT OF HEARTS -> Scotties Tournament of Hearts
women's world cup of curling -> Women's World Cup of Curling
"""
parts = in_string.lower().split()
out_string = " ".join([
p.capitalize() if len(p) > 2 else p for p in parts])
return out_string
|
def ArgsToDict(argv):
""" Collect command-line arguments of the form '--key value' into a
dictionary. Fail if the arguments do not fit this format. """
dictionary = {}
PREFIX = '--'
# Expect the first arg to be the path to the script, which we don't want.
argv = argv[1:]
while argv:
if argv[0].startswith(PREFIX):
dictionary[argv[0][len(PREFIX):]] = argv[1]
argv = argv[2:]
else:
raise Exception('Malformed input: %s' % argv)
return dictionary
|
def get_archive_url(version, archive_name):
"""Craft an archive download URL from a given version and archive name.
Despite the name, the `version` is really what the GitHub API for releases calls the `tag_name`.
Reference: https://docs.github.com/en/rest/releases/releases#get-a-release-by-tag-name
"""
github_base_uri = "https://github.com/phylum-dev/cli/releases"
archive_url = f"{github_base_uri}/download/{version}/{archive_name}"
return archive_url
|
def compare_ids(mirbase_id, rfam_id):
"""
Compare ids like MIPF0000024__mir-103 and mir-103
"""
parts = mirbase_id.split('__')
if parts[-1].lower() == rfam_id.lower():
return 'Yes'
else:
return 'No'
|
def dec_hex(getal):
"""
convert dec characters to their hex representative
"""
return bytes([int(getal)])
|
def str2dict(str_):
"""Change str to dict."""
if not isinstance(str_, str):
raise TypeError('"str_" must be a string, not {}'.format(type(str_)))
# to keep the keys order
str_.replace('dict(', 'OrderedDict(')
return eval(str_)
|
def _dof(mean_tau, sd_tau2):
"""
Returns the degrees of freedom for the chi-2 distribution from the mean and
variance of the uncertainty model, as reported in equation 5.5 of Al Atik
(2015)
"""
return (2.0 * mean_tau ** 4.) / (sd_tau2 ** 2.)
|
def aria2_format_trackers(text):
"""
:return: trackers.
"""
trackers = text.split("\n")
while "" in trackers:
trackers.remove("")
return ",".join(trackers)
|
def coerce(value: int or float, min_value: int or float, max_value: int or float) -> int or float:
"""
Forces a value to be within the given min and max value.
:param value: the value to coerce
:param min_value: minimum allowed value
:param max_value: maximum allowed value
:return: a value within the given range
"""
if value < min_value:
return min_value
if value > max_value:
return max_value
return value
|
def bonus_time(salary, bonus):
"""Check to see if bonus, and return salary accordingly."""
return "${}".format(salary * 10) if bonus else "${}".format(salary)
|
def get_image_directory(command_line_input, active_configuration):
"""
Provides path to image directory.
Arguments:
command_line_input (str | ``None``): A path that may optionally be submitted by user. A string
or ``None`` are expected types.
active_configuration (dict): Active configuration options.
Returns:
str: A path to the image directory. Default: ``images``
"""
if command_line_input is not None:
return command_line_input
elif 'image_directory' in active_configuration:
return active_configuration['image_directory']
return 'images'
|
def tab2sp(line, spaces):
""" convert an indent of tab to spaces
ex.) python main.py -tab2sp 4
--> an indenct of tab to 4 spaces
"""
cnt = 0
tabs = "\t"
while line.startswith(tabs):
cnt += 1
tabs += "\t"
return line.replace("\t", spaces, cnt)
|
def is_prime(n):
"""
is_prime returns True if N is a prime number, False otherwise
Parameters:
Input, integer N, the number to be checked.
Output, boolean value, True or False
"""
if n != int(n) or n < 1:
return False
p = 2
while p < n:
if n % p == 0:
return False
p += 1
return True
|
def normalize_alias(alias) -> str:
"""
Returns alias with '@'
"""
return '@' + str(alias).replace('@', '')
|
def is_pandigital(a, b, c):
"""Returns whether identity is pandigital"""
digits = [int(d) for d in str(a) + str(b) + str(c)]
if len(digits) != 9:
return False
if 0 in digits:
return False
while digits:
d = digits.pop(0)
if d in digits:
return False
return True
|
def validateRefs(cmodel):
"""Validate references for models and layers.
Args:
cmodel (dict): Sub-dictionary from config for specific model.
Returns:
tuple: (modelrefs, longrefs, shortrefs) where:
* modelrefs: dictionary of citation information for model
keys='longref', 'shortref'
* shortrefs: dictionary containing short reference for each
input layer
* longrefs: dictionary containing full references for each
input layer
"""
longrefs = {}
shortrefs = {}
modelrefs = {}
for key in cmodel['layers'].keys():
if 'longref' in cmodel['layers'][key]:
longrefs[key] = cmodel['layers'][key]['longref']
else:
print('No longref provided for layer %s' % key)
longrefs[key] = 'unknown'
if 'shortref' in cmodel['layers'][key]:
shortrefs[key] = cmodel['layers'][key]['shortref']
else:
print('No shortref provided for layer %s' % key)
shortrefs[key] = 'unknown'
try:
modelrefs['longref'] = cmodel['longref']
except BaseException:
print('No model longref provided')
modelrefs['longref'] = 'unknown'
try:
modelrefs['shortref'] = cmodel['shortref']
except BaseException:
print('No model shortref provided')
modelrefs['shortref'] = 'unknown'
return modelrefs, longrefs, shortrefs
|
def _get_data(title, func, dest):
"""Populate dest with data from the given function.
Args:
title: The name of the data
func: The function which will return the data
dest: a dict which will store the data
Returns:
dest: The modified destination dict
"""
# Process data
values = func()
for key, value in values.items():
dest[key][title] = value
return dest
|
def get_indent(op):
"""Get indentation for given level."""
ret = ""
for ii in range(op):
# Would tab be better?
ret += " "
return ret
|
def key_none(v):
""" Check if a keyword argument is set to None.
Parameters
----------
v : str
value of a keyword argument
Returns
-------
v : None, str
A value used by the settings dictionary
"""
if v.lower() == "none":
v = None
return v
|
def get_new_job_dict():
"""returns empty result dictionary, used as data template"""
empty = {"job":None,"company":None,"location":None,
"rating":None,"salary_min":None,"salary_max":None,"date":None}
return empty.copy()
|
def gql_assets(fragment):
"""
Return the GraphQL datasetAssets query
"""
return f'''
query($where: DatasetAssetWhere!, $first: PageSize!, $skip: Int!) {{
data: datasetAssets(where: $where, skip: $skip, first: $first) {{
{fragment}
}}
}}
'''
|
def secDegInterpolate(x1, x2, x3, y1, y2, y3, xfit):
"""second degree interpolation used by findApex"""
x12 = x1-x2
x13 = x1-x3
x23 = x2-x3
xf1 = xfit-x1
xf2 = xfit-x2
xf3 = xfit-x3
yfit = (y1*x23*xf2*xf3-y2*x13*xf1*xf3+y3*x12*xf1*xf2)/(x12*x13*x23)
return yfit
|
def get_zero_mask(number: int, max_len: int = 3) -> str:
"""
Returns string of numbers formated with zero mask
eg.
>>> get_zero_mask(100, 4)
'0100'
>>> get_zero_mask(101, 4)
'0101'
>>> get_zero_mask(101, 4)
'0101'
"""
return f'%0.{max_len}d' % number
|
def check_port(port):
""" Check if a port is valid. Return an error message indicating what is invalid if something isn't valid. """
if isinstance(port, int):
if port not in range(0, 65535):
return 'Source port must in range from 0 to 65535'
else:
return 'Source port must be an integer'
return None
|
def scale(matrix, scale_x, scale_y):
"""Scale a matrix in list format."""
return [
[_item for _item in _row for _ in range(scale_x)]
for _row in matrix for _ in range(scale_y)
]
|
def _rewrite_machine_info(
current_machine_info_contents: str, new_pretty_hostname: str
) -> str:
"""
Return current_machine_info_contents - the full contents of
/etc/machine-info - with the PRETTY_HOSTNAME=... line rewritten to refer
to new_pretty_hostname.
"""
current_lines = current_machine_info_contents.splitlines()
preserved_lines = [
ln for ln in current_lines if not ln.startswith("PRETTY_HOSTNAME")
]
new_lines = preserved_lines + [f"PRETTY_HOSTNAME={new_pretty_hostname}"]
new_contents = "\n".join(new_lines) + "\n"
return new_contents
|
def linear_search(array, item):
"""
Given an array and an item, this function traverses the array to locate
the item.
Parameters: array, item
Returns: index if item is found, -1 if the item is not found
"""
for i in range(len(array)):
if array[i] == item:
return i
return -1
|
def get_node_file_output_str(nset, varstr, frequency=99999999):
""" Get the string to add to the input file.
:param nset: Name of node set from which output should be output
:type nset: str
:param varstr: comma separated list of variables to output
:type varstr: str
:param frequency: How often to write output (increments)
:type frequency: int
:returns: The string to add
:rtype: str
"""
output_str = ('*NODE FILE, NSET=' + nset +
', FREQUENCY=%0.0f \n'
+ varstr) % (frequency)
return output_str
|
def _get_num_to_fold(stretch: float, ngates: int) -> int:
"""Returns the number of gates to fold to achieve the desired (approximate)
stretch factor.
Args:
stretch: Floating point value to stretch the circuit by.
ngates: Number of gates in the circuit to stretch.
"""
return int(round(ngates * (stretch - 1.0) / 2.0))
|
def strip_right(s, pattern):
"""
Strips a string right (end) of string if found. Otherwise, returns the original string.
:param s: str
:param pattern: str
:rtype: str
"""
if s.endswith(pattern):
return s[:-(len(pattern))]
else:
return s
|
def extract_version_from_filename(filename):
"""Extract the version from a filename with the following
format: `filename_1.1` or `filename_1.1.exe`
"""
components = filename.replace('.exe', '').split('_')
if len(components) == 2:
return components[1]
else:
return None
|
def string_permutation(string):
"""
:param string:string
:return: permutation list
"""
def recursion_core(pre_s, string):
"""
:param pre_s: n-1 sol
:param string: str waiting to add
:return: n sol
"""
if not string:
ans.append(pre_s)
return
for s in range(len(string)):
recursion_core(pre_s + string[s], string[:s]+string[s+1:])
ans = []
recursion_core('', string)
return ans
|
def sortbylength(src, ferts=None, maxlen=10000):
"""
:param data: List of tuples of source sentences and fertility values
:param maxlen: Maximum sentence length permitted
:return: Sorted data
"""
# src = [elem[0] for elem in data]
# tgt = [elem[1] for elem in data]
indexed_src = [(i,src[i]) for i in range(len(src))]
sorted_indexed_src = sorted(indexed_src, key=lambda x: -len(x[1]))
sorted_src = [item[1] for item in sorted_indexed_src if len(item[1])<maxlen]
sort_order = [item[0] for item in sorted_indexed_src if len(item[1])<maxlen]
if ferts:
sorted_tgt = [ferts[i] for i in sort_order]
else:
sorted_tgt = None
# sorted_data = [(src, tgt) for src, tgt in zip(sorted_src, sorted_tgt)]
return sorted_src, sorted_tgt
|
def _format_code(code):
"""
Format departement code (Add a 0 at the end if the length < 0).
:returns: formatted code
:rtype: str
"""
if len(code) < 3:
return code + '0'
return code
|
def rawLibraryLogic(data):
"""Returns a tuple of the data in a library logic file."""
versionString = data[0]
scheduleName = data[1]
architectureName = data[2]
deviceNames = data[3]
problemTypeState = data[4]
solutionStates = data[5]
indexOrder = data[6]
exactLogic = data[7]
rangeLogic = data[8]
otherFields = []
dataLength = len(data)
if dataLength > 9:
for idx in range(9, dataLength):
otherFields.append(data[idx])
return (versionString, scheduleName, architectureName, deviceNames,\
problemTypeState, solutionStates, indexOrder, exactLogic, rangeLogic, otherFields)
|
def clean_url(url):
""" Tacks an s3:// onto the beginning of a URL if necessary.
url: an S3 URL, or what should be one
Return value: S3 URL
"""
if url[:6] == 's3n://':
return ('s3://' + url[6:])
elif url[:5] != 's3://':
return ('s3://' + url)
else:
return url
|
def resolve_curie_to_identifiersorg(curie_string):
"""Take a CURIE and return the corresponding identifiers.org URI in the Nanopublication network
using the BioLink JSON-LD Context previously loaded
"""
# Quick fix to handle lowercase drugbank and omim
if curie_string.startswith('drugbank:'):
curie_string = curie_string.replace('drugbank:', 'DRUGBANK:')
if curie_string.startswith('omim:'):
curie_string = curie_string.replace('omim:', 'OMIM:')
return 'https://identifiers.org/' + curie_string
|
def round_partial(value, resolution):
"""Round to a fraction of a number, e.g. to closest 0.25
From https://stackoverflow.com/a/8118808
"""
return round(value/resolution) * resolution
|
def replace_comma_in_text(text):
"""
Parameters
----------
text: str of nominal values for a single mushroom species from primary_data_edited.csv
Returns
-------
replace commas outside of angular brackets with semicolons (but not inside of them)
Example
-------
text = "[a, b], [c, d]"
return: "[a, b]; [c, d]"
"""
result_text = ""
replace = True
for sign in text:
if sign == '[':
replace = False
if sign == ']':
replace = True
if sign == ',':
if replace:
result_text += ';'
else:
result_text += sign
else:
result_text += sign
return result_text
|
def GetTickText_deprecated(tick):
""" GetTickText(tick)
Obtain text from a tick. Convert to exponential notation
if necessary.
"""
# Correct -0: 0 has on some systems been reported to be shown as -0
if tick == -0:
tick = 0
# Get text
text = '%1.4g' % tick
iExp = text.find('e')
if iExp>0:
front = text[:iExp+2]
text = front + text[iExp+2:].lstrip('0')
return text
|
def c_is_fun(text):
"""
Return desired string for /c/<text> route, replace _ with space
"""
return "C {}".format(text.replace("_", " "))
|
def _tuple(thing):
"""Turn something into a tuple; everything but tuple and list is turned into a one-element tuple
containing that thing.
"""
if isinstance(thing, tuple) and not hasattr(thing, "_fields"): # exclude namedtuples
return thing
elif isinstance(thing, list):
return tuple(thing)
else:
return thing,
|
def trade(first, second):
"""Exchange the smallest prefixes of first and second that have equal sum.
>>> a = [1, 1, 3, 2, 1, 1, 4]
>>> b = [4, 3, 2, 7]
>>> trade(a, b) # Trades 1+1+3+2=7 for 4+3=7
'Deal!'
>>> a
[4, 3, 1, 1, 4]
>>> b
[1, 1, 3, 2, 2, 7]
>>> c = [3, 3, 2, 4, 1]
>>> trade(b, c)
'No deal!'
>>> b
[1, 1, 3, 2, 2, 7]
>>> c
[3, 3, 2, 4, 1]
>>> trade(a, c)
'Deal!'
>>> a
[3, 3, 2, 1, 4]
>>> b
[1, 1, 3, 2, 2, 7]
>>> c
[4, 3, 1, 4, 1]
"""
m, n = 1, 1
"""
This function tries to find the indicies that result in an equal sum starting from
the beginning of the lists. If either sides of the sums are smaller, the respective
index is incremented. If either of the indicies pass the boundaries of the list, then
an equal sum cannot be found.
"""
def find_equal_sum_partitions(fir, sec):
fi, si = 1, 1 # indicies for iterating through the given lists
while fi < len(fir) and si < len(sec):
sumfi, sumsi = sum(fir[:fi]), sum(sec[:si])
if sumfi < sumsi:
fi += 1
elif sumfi > sumsi:
si += 1
else:
return fi, si
return None, None
m, n = find_equal_sum_partitions(first, second)
if None not in [m, n]: # change this line!
first[:m], second[:n] = second[:n], first[:m]
return 'Deal!'
else:
return 'No deal!'
|
def get_ears_pos(e):
"""
Get position of the ears from the app slider
args:
e input from app slider
returns:
the angular position of the ear
"""
# define range of ears
ears_range = 50
# define center of ears
ears_offset = 100
# interpolate from slider range (0-100)
e_pos = (e-50)*(ears_range/50.0)+ears_offset
# print(e_pos)
return e_pos
|
def check_exam(arr1: list, arr2: list) -> int:
"""
This function returns the score for this array of answers,
giving +4 for each correct answer,
-1 for each incorrect answer, and +0 for each blank answer.
"""
count = 0
for i in range(len(arr2)):
if arr2[i] == arr1[i]:
count += 4
elif arr2[i] != arr1[i] and not len(arr2[i]) == 0:
count -= 1
return count if count > 0 else 0
|
def normalize_name(nm: str):
"""
convenience method that ensures we have some consistency on normalization of name
:param nm:
:return:
"""
return nm.replace("\u2019", "'").replace("\xa0", " ").strip().strip("'")
|
def cycle_length(k: int) -> int:
"""
Computes the repeated cycle length of the decimal expansion of 1/k.
e.g.
1/6 = 0.1(6) -> 1
1/7 = 0.(142857) -> 6
For k not equal to a multiple of 2 or 5,
1/k has a cycle of d digits if 10^d == 1 mod k = 0
"""
while k % 2 == 0:
k //= 2 # remove factors of 2
while k % 5 == 0:
k //= 5 # remove factors of 5
if k == 1:
return 0 # this is not a repeating decimal
d = 1
x = 10 % k
while x != 1:
x = (x*10) % k
d += 1
return d
|
def corrected_buckets(buckets, noise_probability=.05):
"""Returns a map of conversion bits --> corrected counts
buckets: A map from integer conversion metadata to conversion counts.
note, this needs to include buckets with 0 counts.
noise_probability: The probability the metadata was randomly selected
"""
total_records = sum(buckets.values())
num_conversion_buckets = len(buckets)
# |noise_probability| of the reports are noised and uniformly distributed
# among the conversion buckets so one can calculate how many values have
# were from noised, per bucket.
noised_values_per_bucket = total_records * noise_probability / num_conversion_buckets
# Subtract the reports added to each bucket due to noise, and rescale to
# account for the reports that were shifted due to the initial noise.
corrected_buckets = {
bucket: (v - noised_values_per_bucket) / (1 - noise_probability)
for bucket, v in buckets.items()
}
return corrected_buckets
|
def sign(x, y):
""" Returns the sign of x -y. """
if x == y: return 0
if x < y: return -1
return 1
|
def remove_space(s: str):
""" """
shorter_string = s
for space in " \n\t\r":
shorter_string = shorter_string.replace(space, "")
return shorter_string
|
def get_oaipmh_publications(event):
"""
Get all publications to an OAIPMH repository for an event.
:param event: The event
:type event: dict
:return: OAIPMH publications
:rtype: list
"""
return [(publication["id"], publication["url"]) for publication in event["publications"]
if "oaipmh" in publication["id"]]
|
def reindent_lines(new_leader, source):
"""Re-indent string lines."""
return new_leader + ('\n' + new_leader).join(source.split('\n'))
|
def assemble_multivalue_arguments(argument):
"""Assemble single string or list of strings into string."""
if isinstance(argument, (list, tuple)):
argument = ' '.join(argument)
return argument
|
def clean(s: str) -> str:
"""Remove accents."""
return s.strip().replace("'", "")
|
def update_grad_w(grad, grad_old, grad_new):
"""Update the global gradient for W
Parameters
----------
grad: theano tensor
The global gradient
grad_old: theano tensor
The previous value of the local gradient
grad_new: theano tensor
The new version of the local gradient
Returns
-------
grad: theano tensor
New value of the global gradient
"""
return grad - grad_old + grad_new
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.