content
stringlengths 42
6.51k
|
---|
def find_unique_target_clicks_count(clicks):
"""Return the number of unique clicks in a click set."""
uniq_users = set()
for click in clicks:
uniq_users.add(click["user"])
return len(uniq_users)
|
def duration_from_toml_value(value):
"""converter for dataset split durations.
If value is -1, that value is returned -- specifies "use the remainder of the dataset".
Other values are converted to float when possible."""
if value == -1:
return value
else:
return float(value)
|
def pattern_replacer(pattern, iterable_of_olds, new):
"""
Replaces a list of old terms from a given pattern for a new term.
Used for switching file paths in the 'bioprov.src.main.from_json' module.
:param pattern: pattern to replace.
:param iterable_of_olds: old terms.
:param new: new term.
:return:
"""
for old in iterable_of_olds:
pattern = pattern.replace(old, new)
return pattern
|
def _build_workhour_lookup(schedule, lunch_hour):
"""Build a lookup dict to determine whether a given hour of a given day of
week is a work hour.
"""
res = {d: [False] * 24 for d in range(7)}
for dow in res:
if len(schedule[dow]) == 0: # off day
continue
start_h, end_h = schedule[dow][0], schedule[dow][1]
for wh in range(start_h, end_h):
res[dow][wh] = True
res[dow][lunch_hour] = False
return res
|
def get_opt_val(cmd, opt_name):
"""Given a command as a list, return the value of opt_name."""
return cmd[cmd.index(opt_name) + 1]
|
def process_abi_type(type_abi):
"""Converts `tuple` (i.e struct) types into the (type1,type2,type3) form"""
typ = type_abi['type']
if typ.startswith('tuple'):
type_str = '(' + ','.join(process_abi_type(component) for component in type_abi['components']) + ')'
if typ[-1] == ']':
type_str += typ[5:]
return type_str
return type_abi['type']
|
def fit_info(pressures, temp_dct, err_dct):
""" Write the string detailing the temperature ranges and fitting errors
associated with the rate-constant fits at each pressure.
:param pressures: pressures the k(T,P)s were calculated at
:type pressures: list(float)
:param temp_dct: temperature ranges (K) fits were done at each pressure
:type temp_dct: dict[pressure, [temp range]]
:param err_dct: errors associated with the fits at each pressure
:type err_dct: dict[pressure, [mean err, max err]]
:return inf_str: string containing all of the fitting info
:rtype: str
"""
# Make temp, err dcts empty if fxn receives None; add 'high' to pressures
temp_dct = temp_dct if temp_dct else {}
err_dct = err_dct if err_dct else {}
if 'high' in temp_dct or 'high' in err_dct:
pressures += ['high']
# Check the temp and err dcts have same presures as rate_dcts
if temp_dct:
assert set(pressures) == set(temp_dct.keys())
err_dct = err_dct if err_dct else {}
if err_dct:
assert set(pressures) == set(err_dct.keys())
# Write string showing the temp fit range and fit errors
inf_str = '! Info Regarding Rate Constant Fits\n'
for pressure in pressures:
if temp_dct:
[min_temp, max_temp] = temp_dct[pressure]
temps_str = '{0:.0f}-{1:.0f} K'.format(
min_temp, max_temp)
temp_range_str = 'Temps: {0:>12s}, '.format(
temps_str)
else:
temp_range_str = ''
if err_dct:
[mean_err, max_err] = err_dct[pressure]
err_str = '{0:11s} {1:>5.1f}%, {2:7s} {3:>5.1f}%'.format(
'MeanAbsErr:', mean_err, 'MaxErr:', max_err)
else:
err_str = ''
# Put together the whole info string
if pressure != 'high':
pstr = '{0:<9.3f}'.format(pressure)
else:
pstr = '{0:<9s}'.format('High')
inf_str += '! Pressure: {0} {1} {2}\n'.format(
pstr, temp_range_str, err_str)
return inf_str
|
def detect_listing(lines, index, limit):
"""
Detects the lines of a listing.
Parameters
----------
lines : `list` of `str`
The lines of the section.
index : `int`
The starting index of the listing.
limit : `int`
The last element's index.
Returns
-------
index : `int`
The index where the listing is over. If there was no listing, returns the initial index.
"""
get_back_index = index
dash_count = 0
while True:
if index == limit:
return index
line = lines[index]
if line.startswith('-'):
dash_count += 1
index += 1
continue
if dash_count == 0:
# No dash is detected
return get_back_index
if not line:
index += 1
continue
if line[0] in (' ', '\t'):
index += 1
continue
if dash_count == 1:
return get_back_index
break
return index
|
def add_numbers(L):
"""
Add all numbers in a list
and return sum
"""
num_sum = 0
for i in L:
num_sum += i
return num_sum
|
def cap_feature(s):
"""
Capitalization feature:
0 = low caps
1 = all caps
2 = first letter caps
3 = one capital (not first letter)
"""
if s.lower() == s:
return 0
elif s.upper() == s:
return 1
elif s[0].upper() == s[0]:
return 2
else:
return 3
|
def weight(s, e, speed):
"""
Returns the weight to be assigned to the edges of the graph.
Parameters: (s, e, d)
s - (lat, lon)
e - (lat, lon)
speed - int
"""
return ((s[0] - e[0]) ** 2 + (s[1] - e[1]) ** 2) ** 0.5 / speed
|
def get_user_search_table_name(num):
""" pass cache_no, returns user search table name"""
return 'cache_' + str(num);
|
def nrGR(B, v):
"""
Calculate the nonrelativistic Larmor radius.
Parameters
----------
B: Magnetic field in Tesla
v: Transverse velocity in m/s
Returns
-------
r: Larmor radius in m
"""
me = 9.10938356e-31
p = me * v
e = 1.6e-19
r = p / (e * B)
return r
|
def naive_derivative(xs, ys):
""" naive forward or backward derivative """
return (ys[1]-ys[0])/(xs[1]-xs[0])
|
def ssh_cmd_docker_container_log(detail) -> str:
"""SSH command to access a docker instance logs."""
return f"TERM=xterm ssh {detail['ec2InstanceId']} docker logs -f --tail=100 {detail['runtimeId']}"
|
def isprime(n):
"""Returns True if n is prime.
It uses the fact that a prime (except 2 and 3)
is of form 6k - 1 or 6k + 1 and looks only at divisors of this form.
"""
if n == 2:
return True
if n == 3:
return True
if n % 2 == 0:
return False
if n % 3 == 0:
return False
i = 5
w = 2
while i * i <= n:
if n % i == 0:
return False
i += w
w = 6 - w
return True
|
def inverse_lagrange(x, y, ya):
"""Given two lists x and y, find the value of x = xa when y = ya, i.e., f(xa) = ya"""
assert(len(x) == len(y))
total = 0
for i in range(len(x)):
numer = 1
denom = 1
for j in range(len(x)):
if j != i:
numer *= (ya - y[j])
denom *= (y[i] - y[j])
total += numer * x[i] / denom
return total
|
def inverse_mapping(dct):
"""
Return reverse mapping:
>>> inverse_mapping({'x': 5})
{5: 'x'}
"""
return {v:k for k,v in dct.items()}
|
def get_similar_groups(target_group, inventory):
"""
Find group suggestions
:param target_group: Service to be disrupted
:param inventory: Parsed inventory file
"""
suggestions = []
for key in inventory.keys():
if target_group in key:
suggestions.append(key)
return suggestions
|
def list_dict(l):
"""
return a dictionary with all items of l being the keys of the dictionary
"""
return dict([(i,None) for i in l])
|
def process_gpus(container, all_configs):
"""
This function will expose two things, regarding GPUs:
1. The total number of GPUs that the Notebook has requested
2. A message describing how many GPUs from each venders it requested
This function will check the vendors that the admin has defined in the
app's config file.
"""
# get the GPU vendors from the app's config
if isinstance(all_configs, list):
config = all_configs[0]
else:
config = all_configs
cfg_vendors = config.get("gpus", {}).get("value", {}).get("vendors", [])
# create a dict mapping the limits key with the UI name.
# For example: "nvidia.com/gpu": "NVIDIA"
gpu_vendors = {v["limitsKey"]: v["uiName"] for v in cfg_vendors}
count = 0
gpus = []
resource_limits = container.get("resources", {}).get("limits", {})
for vendor in gpu_vendors.keys():
if vendor not in resource_limits:
continue
gpu_count = resource_limits[vendor]
count += int(gpu_count)
# final message will be like: 1 NVIDIA, 2 AMD
gpus.append("%s %s" % (gpu_count, gpu_vendors[vendor]))
return {"count": count, "message": ", ".join(gpus)}
|
def traverseGraph(graph, start, block=[]):
"""traverse graph, go not passed nodes in block.
"""
to_visit = [start, ]
visited = {}
while to_visit:
v = to_visit[0]
del to_visit[0]
visited[v] = 1
for n in graph[v]:
if n not in visited and n not in block:
to_visit.append(n)
return visited
|
def upper(text):
"""Returns the given text in upper case."""
return text.upper()
|
def argsort(a, reverse=False):
"""return index list to get `a` in order, ie
``a[argsort(a)[i]] == sorted(a)[i]``, which leads to unexpected
results with `np.nan` entries, because any comparison with `np.nan`
is `False`.
"""
return sorted(range(len(a)), key=a.__getitem__, reverse=reverse)
|
def iswest(bb1, bb2, north_vector=[0,1,0]):
""" Returns True if bb1 is west of bb2
For obj1 to be west of obj2 if we assume a north_vector of [0,1,0]
- The max X of bb1 is less than the min X of bb2
"""
#Currently a North Vector of 0,1,0 (North is in the positive Y direction)
#is assumed. At some point this should be updated to allow for non-traditional
#North to be taken and to allow for directions based on perspective.
if north_vector != [0,1,0]:
raise NotImplementedError
_,bb1_max = bb1
bb2_min,_ = bb2
x1,y1,z1 = bb1_max
x2,y2,z2 = bb2_min
return x1 < x2
|
def _check_hashes(x, y, datatype):
"""MPI reduction op to check if code hashes differ across ranks."""
if x == y:
return x
return False
|
def m_make_matrix(number_rows, number_columns, entry_fn):
"""
Returns a number_rows x number_columns matrix whose (i,j)th entry is entry_fn(i, j)
"""
return [[entry_fn(i, j) for j in range(number_columns)] for i in range(number_rows)]
|
def _get_text_baseline(angle, location, side, is_vertical, is_flipped_x,
is_flipped_y):
"""Return vertical alignment of a text.
Parameters
----------
angle : {0, 90, -90}
location : {'first', 'last', 'inner', 'outer'}
side : {'first', 'last'}
is_vertical : bool
is_flipped_x : bool
is_flipped_y : bool
Returns
{'alphabetic', 'top'}
"""
if is_vertical:
if angle == 0:
if is_flipped_y:
return "top"
else:
return "alphabetic"
elif angle == 90:
if is_flipped_x:
if (location == "last"
or (side == "first" and location == "inner")
or (side == "last" and location == "outer")):
return "alphabetic"
else:
return "top"
else:
if (location == "first"
or (side == "first" and location == "outer")
or (side == "last" and location == "inner")):
return "alphabetic"
else:
return "top"
elif angle == -90:
if is_flipped_x:
if (location == "first"
or (side == "first" and location == "outer")
or (side == "last" and location == "inner")):
return "alphabetic"
else:
return "top"
else:
if (location == "last"
or (side == "first" and location == "inner")
or (side == "last" and location == "outer")):
return "alphabetic"
else:
return "top"
else:
if angle == 0:
if is_flipped_y:
if (location == "first"
or (side == "first" and location == "outer")
or (side == "last" and location == "inner")):
return "alphabetic"
else:
return "top"
else:
if (location == "last"
or (side == "first" and location == "inner")
or (side == "last" and location == "outer")):
return "alphabetic"
else:
return "top"
elif angle == 90:
if is_flipped_x:
return "alphabetic"
else:
return "top"
elif angle == -90:
if is_flipped_x:
return "top"
else:
return "alphabetic"
|
def extract_processor_name_with_recipe_identifier(processor_name):
"""Returns a tuple of (processor_name, identifier), given a Processor
name. This is to handle a processor name that may include a recipe
identifier, in the format:
com.github.autopkg.recipes.somerecipe/ProcessorName
identifier will be None if one was not extracted."""
identifier, delim, processor_name = processor_name.partition("/")
if not delim:
# if no '/' was found, the first item in the tuple will be the
# full string, the processor name
processor_name = identifier
identifier = None
return (processor_name, identifier)
|
def add_to_list(str_to_add, dns_names):
"""
This will add a string to the dns_names array if it does not exist.
It will then return the index of the string within the Array
"""
if str_to_add not in dns_names:
dns_names.append(str_to_add)
return dns_names.index(str_to_add)
|
def escape(text):
"""
Normalizes whitespace and escapes HTML tags
"""
replacements = {
'<': '<',
'>': '>',
'\t': ' ',
'\f': '',
'\v': '',
'\xA0': '',
'\x85': ''}
for key in replacements:
text = text.replace(key, replacements[key])
return text
|
def bmes_to_index(tags):
"""
Args:
tags: [4, 4, 0, 0, ...] sequence of labels
Returns:
list of (chunk_type, chunk_start, chunk_end)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
result = []
if len(tags) == 0:
return result
word = (0, 0)
for i, t in enumerate(tags):
if i == 0:
word = (0, 0)
elif t == 'B' or t == 'S':
result.append(word)
word = (i, 0)
word = (word[0], word[1] + 1)
if word[1] != 0:
result.append(word)
return result
|
def construct_machine_config(m_services, service_exploits):
"""
Construct numpy array of machine configuration
"""
cfg = {}
for service in service_exploits.keys():
if service in m_services:
cfg[service] = True
else:
cfg[service] = False
return cfg
|
def _clean_scores_entities(entity_scores, features):
"""
Clean score entities given a list of features
:param entity_scores: entities scores
:param features: features
:return: score entities cleaned
"""
entity_scores_aux = {}
for entity in entity_scores.keys():
if entity in features:
entity_scores_aux[entity] = entity_scores[entity]
entity_scores = entity_scores_aux
for feature in features:
if feature not in entity_scores:
entity_scores[feature] = 0.0
return entity_scores
|
def _get_global_entities(nest):
"""Returns list of string indices representing each global entity.
For example, if 'global_entities/0' and 'global_entities/1' exist in the
input nest, this function will return the list ['0', '1'].
Args:
nest: A tensor or spec nest generated from a falken observation spec.
Returns:
A list of strings, representing the indices / IDs of each global entity.
"""
entities = nest.get('global_entities', None)
if not entities:
return []
return list(entities.keys())
|
def to_dict(sequences, key_function=None) :
"""Turns a sequence iterator or list into a dictionary.
sequences - An iterator that returns SeqRecord objects,
or simply a list of SeqRecord objects.
key_function - Optional function which when given a SeqRecord
returns a unique string for the dictionary key.
e.g. key_function = lambda rec : rec.name
or, key_function = lambda rec : rec.description.split()[0]
If key_function is ommitted then record.id is used, on the
assumption that the records objects returned are SeqRecords
with a unique id field.
If there are duplicate keys, an error is raised.
Example usage:
from Bio import SeqIO
filename = "example.fasta"
d = SeqIO.to_dict(SeqIO.parse(open(faa_filename, "rU")),
key_function = lambda rec : rec.description.split()[0])
print len(d)
print d.keys()[0:10]
key = d.keys()[0]
print d[key]
"""
if key_function is None :
key_function = lambda rec : rec.id
d = dict()
for record in sequences :
key = key_function(record)
if key in d :
raise ValueError("Duplicate key '%s'" % key)
d[key] = record
return d
|
def tuple_compare_eq(left, right):
"""Compare two 'TupleOf' instances by comparing their individual elements."""
if len(left) != len(right):
return False
for i in range(len(left)):
if left[i] != right[i]:
return False
return True
|
def check_diagonal_down(board, num_rows, num_cols):
"""check if any 4 are connected diagonally down(top left to bottom right)
returns bool"""
won = False
for row in range(num_rows - 3):
for col in range(num_cols - 3):
start = board[row][col]
if start == " ":
continue
won = True
for i in range(1, 4):
if start != board[row + i][col + i]:
won = False
break
if won:
return won
return won
|
def _collect_set(x):
"""Generate a set of a group."""
return list(set(x))
|
def to_PyGetSetDef_entry(cpp_struct_name, py_name, doc):
"""Creates one entry for a PyGetSetDef array from the entries for one
property-struct (as returned by parse_file).
"""
return 'PROPERTY_FORWARDER(%s, "%s", %s)' % (
cpp_struct_name, py_name, doc)
|
def merge_configs(default_dict: dict, overriding_dict: dict) -> dict:
"""
Args:
default_dict: The 'dict' config with the default values to be overwritten.
overriding_dict: The 'dict' config with the new values. If it has some missing keys the default values
from the default_dict will be kept.
Returns:
The merged 'dict' config with the overwritten values.
"""
for overriding_key, overriding_value in overriding_dict.items():
if overriding_key not in default_dict:
default_dict[overriding_key] = overriding_value
elif not isinstance(overriding_value, dict):
default_dict[overriding_key] = overriding_value
else:
default_dict[overriding_key] = merge_configs(
default_dict[overriding_key],
overriding_dict[overriding_key]
)
return default_dict
|
def chainMessages(send_fn, messages):
"""
Constructs a chain of messages each of which will be sent when
the previous message in the chain is finalized.
"""
for i in range(len(messages)-1):
#
# Need the x = i bit to capture the current value of i,
# otherwise all the messages have the same finalizer.
#
messages[i].finalizer = lambda x = i: send_fn(messages[x+1])
return messages[0]
|
def newton_sqrt1(x):
"""Return the square root of x using Newton's Method"""
val = x
while True:
last = val
val = (val + x / val) * 0.5
if abs(val - last) < 1e-9:
break
return val
|
def getManifestArchitecture(manifest):
""" returns system architecture for manifest"""
return manifest["manifest"]["architecture"]
|
def serialise(instance):
"""
Serialises a real number to a sequence of bytes.
:param instance: The real number to serialise.
:return: A sequence of bytes representing the real number.
"""
return str(instance).encode("utf_8")
|
def _prepare_query_string_for_comparison(query_string: str) -> str:
"""To use cached data, we need to compare queries. Returns a query string in canonical form."""
# for now this is a simple complete strip, but it could grow into much more sophisticated
# query comparison data structures
query_string = "".join(query_string.split()).strip("()").lower()
query_string = query_string[:-1] if query_string.endswith(";") else query_string
return query_string
|
def find_min_y_point(list_of_points):
"""
Returns that point of *list_of_points* having minimal y-coordinate
:param list_of_points: list of tuples
:return: tuple (x, y)
"""
min_y_pt = list_of_points[0]
for point in list_of_points[1:]:
if point[1] < min_y_pt[1] or (point[1] == min_y_pt[1] and point[0] < min_y_pt[0]):
min_y_pt = point
return min_y_pt
|
def filter_by_min_points(bboxes, min_points_dict):
"""Filter ground truths by number of points in the bbox."""
filtered_boxes = []
for box in bboxes:
if box.label_class in min_points_dict.keys():
if box.points_inside_box.shape[0] > min_points_dict[
box.label_class]:
filtered_boxes.append(box)
else:
filtered_boxes.append(box)
return filtered_boxes
|
def make_bigly(name: str) -> str:
"""A function returning a string, rather than a component."""
return f"BIGLY: {name.upper()}"
|
def get_latest_values(existing_content, scope=None):
"""Get the latest "registries", "py_versions" and "repository" values
Args:
existing_content (dict): Dictionary of complete framework image information.
scope (str): Type of the image, required if the target is DLC
framework (Default: None).
"""
if scope in existing_content:
existing_content = existing_content[scope]
else:
if "versions" not in existing_content:
raise ValueError(
"Invalid image scope: {}. Valid options: {}.".format(
scope, ", ".join(existing_content.key())
)
)
latest_version = list(existing_content["versions"].keys())[-1]
registries = existing_content["versions"][latest_version].get("registries", None)
py_versions = existing_content["versions"][latest_version].get("py_versions", None)
repository = existing_content["versions"][latest_version].get("repository", None)
return registries, py_versions, repository
|
def hp(h, *, rho = 1000, g = 9.81):
"""
Computes the hydrostatic pressure acting on a submerged object given:
- the height of fluid above the object, h
- the density of the fluid in which is it submerged, rho
- the acceleration due to gravity, g
"""
if h < 0:
raise ValueError("Height of fluid, h, must be greater than or equal to zero")
if rho < 0:
raise ValueError("Density of fluid, rho, must be greater than or equal to zero")
if g < 0:
raise ValueError("Acceleration due to gravity, g, must be greater than or equal to zero")
return rho * g * h
|
def getdifflist(inputlist):
"""returns a list of length-1 relative to the input list
list values are the differential of the inputlist [n+1]-[n]"""
difflist=[inputlist[i+1]-inputlist[i]
for i in range(len(inputlist)-1)]
return difflist
|
def GetTagValue(tags, tag_name):
"""Returns the content for a specific tag."""
tag_prefix = tag_name + ':'
content = None
for tag in tags:
if tag.startswith(tag_prefix):
content = tag[len(tag_prefix):]
break
return content
|
def bold(out_format, text):
"""Returns a string representing a bolded version of the given text
depending on the output format.
"""
if out_format == 'remarkup':
return "**%s**" % text
elif out_format == 'terminal':
return "\033[1m%s\033[0m" % text
else:
raise RuntimeError("Unknown output format: %s" % out_format)
|
def unify(value):
"""
Converts the value to a number from 0 to 127.
Bools: 0 and 127 for False and True respectively.
Strings: 127 for "on", 0 for anything else
"""
if type(value) == bool:
return 127 if value else 0
elif type(value) == str:
return 127 if value == "on" else 0
else:
return int(value)
|
def cov(x,y=None):
"""
Calculates the covariance between two arrays/vectors or of a single matrix
"""
import numpy as np
array1=np.array(x)
if y!=None:
array2=np.array(y)
if array1.shape!=array2.shape:
print("Error: incompatible dimensions")
return None
covmat=np.cov(array1,array2)
result=covmat[0][1]
elif len(array1.shape)==1:
result=float(np.cov(array1))
else:
result=np.cov(array1)
return result
|
def Pier_correction(detector, XandYarr):
""" This function corrects the measured centroids for the average values.
Args:
Pier_corr -- Perform average correction suggested by Pier: True or False
Returns:
cb_centroid_list -- List, values corrected for Pier's values
"""
# Corrections for offsets in positions (see section 2.5 of Technical Notes in Documentation directory)
offset_491 = (-0.086, -0.077)
offset_492 = (0.086, 0.077)
corrected_x = XandYarr[0]
corrected_y = XandYarr[1]
if detector == 491:
corrected_x = XandYarr[0] + offset_491[0]
corrected_y = XandYarr[1] + offset_491[1]
elif detector == 492:
corrected_x = XandYarr[0] + offset_492[0]
corrected_y = XandYarr[1] + offset_492[1]
corr_XandYarr = [corrected_x, corrected_y]
return corr_XandYarr
|
def array_of_ordereddict_to_list_of_names(tags_ordereddict_array):
"""
Serializers have a funny organization that isn't helpful in making further queries
Here we take the list of ordered dictionaries (id: x, name: y) and pull out the name only
and put that in a names list to return
"""
names = []
length = len(list(tags_ordereddict_array))
for i in range(length):
names.append(tags_ordereddict_array[i]["name"])
return names
|
def coo_index_to_data(index):
"""
Converts data index (row, col) to 1-based pixel-centerd (x,y) coordinates of the center ot the pixel
index: (int, int) or int
(row,col) index of the pixel in dtatabel or single row or col index
"""
return (index[1] + 1.0, index[0] + 1.0)
|
def hop_many(lst, func, iterations):
"""
Make a function that applied given function to the given list for
given number of iterations.
>>> lst = [1,2,3]
>>> hop_many(lst, square, 2)
[1, 16, 81]
>>> hop_many(lst, square, 3)
[1, 256, 6561]
>>> hop_many(lst, identity, 100)
[1, 2, 3]
>>> hop_many(lst, lambda x: x - 1, 4)
[-3, -2, -1]
"""
final = []
for i in lst:
for _ in range(iterations):
i = func(i)
final.append(i)
return final
|
def get_translation(row):
"""Generate a translation matrix from elements in dictionary ``row``.
Examples
---------
>>> sdict = {'_pdbx_struct_oper_list.vector[{}]'.format(i): i for i in range(1, 4)}
>>> get_translation(sdict)
[1.0, 2.0, 3.0]
"""
return [
float(row["_pdbx_struct_oper_list.vector[1]"]),
float(row["_pdbx_struct_oper_list.vector[2]"]),
float(row["_pdbx_struct_oper_list.vector[3]"]),
]
|
def si_to_kmh(vals):
"""Conversion from SI wind speed units to km/hr.
Note
----
Code was migrated from https://github.com/nguy/PyRadarMet.
Parameters
----------
vals : float
float or array of floats
Speed in SI units (m/s)
Returns
-------
output: float
float or array of floats
Speed in km/hr
Examples
--------
>>> from wradlib.trafo import si_to_kmh
>>> print(si_to_kmh(1.))
3.6
"""
return vals * 3600.0 / 1000.0
|
def _valvar(unk, vardict):
"""Determines if an unknown string is a value or a dict variable.
Parameters
----------
unk : float or str
The unknown value, either a float or a dictionary key.
vardict : dict
The dictionary to be searched if unk is not a float.
Returns
-------
float
The desired value for unk.
Raises
------
ValueError
When unk is not a float and not a key in vardict.
"""
try:
return float(unk)
except ValueError:
if unk in vardict:
return vardict[unk]
else:
raise KeyError('\'{}\' not found in variable list'.format(unk))
|
def expand_list(x, n, crop=False, default=None):
"""Expand ellipsis in a list by substituting it with the value
on its left, repeated as many times as necessary. By default,
a "virtual" ellipsis is present at the end of the list.
expand_list([1, 2, 3], 5) -> [1, 2, 3, 3, 3]
expand_list([1, 2, ..., 3], 5) -> [1, 2, 2, 2, 3]
expand_list([1, 2, 3, 4, 5], 3, crop=True) -> [1, 2, 3]
"""
x = list(x)
if Ellipsis not in x:
x.append(Ellipsis)
idx_ellipsis = x.index(Ellipsis)
if idx_ellipsis == 0:
fill_value = default
else:
fill_value = x[idx_ellipsis-1]
k = len(x) - 1
x = (x[:idx_ellipsis] +
[fill_value] * max(0, n-k) +
x[idx_ellipsis+1:])
if crop:
x = x[:n]
return x
|
def sleep_interval(interval, now, wake_up_second):
"""
Work out how long to sleep (so we don't drift over time)
Only works properly when interval is at least one minute and the
runtime between calls is less than interval (although it will
return a reasonable value in both these situations).
:param interval: number of seconds between cycles
:param now: the current time
:param wake_up_second: modula time to wake up at
:return: number of seconds to sleep
"""
next_wakeup = float(now + interval)
if wake_up_second > interval:
return next_wakeup - now
next_wakeup = next_wakeup - (next_wakeup % wake_up_second)
return next_wakeup - now
|
def get_package_breadcrumbs(package_tree, name, version):
"""
Takes a npm ls JSON tree and looks up the paths to the given
dependency (name and version).
Returns an array of paths. Where a path is an array of
dependencies leading to the given dependency in the tree.
>>> get_package_breadcrumbs(tree, 'minim', '1.0.0')
[
['[email protected]'],
['[email protected]'],
['[email protected]', '[email protected]']
]
"""
def traverse_dependencies(dependencies, path):
"""
Inline function to be called recursively to check for dependency and
pass down the path to further dependencies.
"""
results = []
for dependency_name in dependencies:
dependency = dependencies[dependency_name]
if dependency_name == name and dependency.get('version') == version:
# Found dependency in path
results.append(path)
continue
if 'dependencies' in dependency:
# Traverse dependency dependencies
sub_dependencies = dependency['dependencies']
path_component = '{}@{}'.format(dependency_name, dependency['version'])
results += traverse_dependencies(sub_dependencies, path + [path_component])
return results
return traverse_dependencies(package_tree['dependencies'], [])
|
def pad_or_truncate(some_list, target_len):
"""
This function shortens or extends a list. When it is extended, extra 0 values are added. This can be helpful to
simulate what happens after a storm has passed.
:param some_list: The original list of values
:param target_len: the wanted length of the list
:return: a list of values with length of target_len
"""
return some_list[:target_len] + [0]*(target_len - len(some_list))
|
def _properties_are_equal(properties_a, properties_b) -> bool:
"""Test that the keys and values of these maps are equal."""
for k, v in properties_a.items():
if k not in properties_b.keys():
return False
if v != properties_b[k]:
return False
for k, _ in properties_b.items():
if k not in properties_a.keys():
return False
return True
|
def noll_to_zern(j):
"""
Convert linear Noll index to tuple of Zernike indices.
j is the linear Noll coordinate, n is the radial Zernike index and m is the azimuthal Zernike index.
@param [in] j Zernike mode Noll index
@return (n, m) tuple of Zernike indices
@see <https://oeis.org/A176988>.
"""
if (j == 0):
raise ValueError("Noll indices start at 1, 0 is invalid.")
n = 0
j1 = j-1
while (j1 > n):
n += 1
j1 -= n
m = (-1)**j * ((n % 2) + 2 * int((j1+((n+1)%2)) / 2.0 ))
return (n, m)
|
def _splitList(data, n):
"""Split data list to n sized sub lists."""
return [data[i : i + n] for i in range(0, len(data), n)]
|
def numFormatConversion(seq, form='int', **kwds):
"""
When length keyword is not specified as an argument, the function
returns a format-converted sequence of numbers. The function returns
nothing when the conversion fails due to errors.
**Parameters**\n
seq: 1D numeric array
The numeric array to be converted.
form: str | 'int'
The format to convert into.
**Return**\n
numseq: converted numeric type
The format-converted array.
"""
try:
lseq = len(seq)
except:
raise
l = kwds.pop('length', lseq)
if lseq == l:
# Case of numeric array of the right length but may not be
# the right type
try:
numseq = eval('list(map(' + form + ', seq))')
return numseq
except:
raise
else:
# Case of numeric array of the right type but wrong length
return seq
|
def DSER(results):
"""DA Segmentation Rate: number of segments of the
reference incorrectly segmented
over number of reference segments.
"""
assert len(results) == 2
CorrectSegs = results[0]
TotalSegs = results[1]
return ((TotalSegs-CorrectSegs)/TotalSegs) * 100
|
def split_optional_type(t):
"""Split optional from a type.
E.g.:
split_optional_type(Optional[int]) => True, int
split_optional_type(int) => False, int
"""
if isinstance(t, type):
return False, t
is_optional = hasattr(t, "__args__") and len(t.__args__) == 2 and t.__args__[-1] is type(None)
if is_optional:
return True, t.__args__[0]
else:
return False, t
|
def _bounds_side(size, max_pixels, overlap, coord, axis):
"""Calculates the boundaries of a side based on where in the
ROI the current sub-ROI is.
Attributes:
size: Size in (z, y, x) order.
overlap: Overlap size between sub-ROIs.
coord: Coordinates of the sub-ROI, in (z, y, x) order.
axis: The axis to calculate.
Returns:
int, int: Boundary of sides for the given ``axis`` as ``start, end``.
"""
pixels = max_pixels[axis]
start = coord[axis] * pixels
end = start + pixels
if overlap is not None:
end += overlap[axis]
if end > size[axis]:
end = size[axis]
return int(start), int(end)
|
def factorial(n):
"""Helper function to calculate only factorial of 1-4(inclusive)"""
if n == 1 or n == 0:
return 1
else:
return n * factorial(n-1)
|
def average(*args):
"""
Finds arithmetic mean of an array input
>>> average(*[1,2,3]) # 2.0
"""
return sum(args, 0.0) / len(args)
|
def compose_truncate(table):
"""Compose truncate command string.
Arguments
---------
table : str
Real table name.
Returns
-------
str
Query string with real table name.
"""
query = 'TRUNCATE TABLE {}'.format(table)
return query
|
def mean(X):
"""
Return the mean of X
"""
if not X:
return None
sum = 0
for x in X:
sum += x
return sum/len(X)
|
def enforceOneLineEnding(s):
"""Ensures that a string contains exactly 1 line ending character at the end."""
if len(s) == 0:
return "\n"
if s[-1] == "\n":
return s
return s + "\n"
|
def didItTouchTheInterface(x, y, _i, s):
"""
Funcao que determina se o raio ainda esta na camada atual
Recebe: (x, y) - ponto atual do raio
_i - proxima interface
s - sentido que o raio esta seguindo (para cima ou
para baixo)
Retorna: Caso o raio esteja descendo pelo meio, se a ultima coor-
denada y calculada para ele foi maior que o y da inter-
face calculado para o x do raio, entao ele ultrapassou a
interface.
Caso ele esteja subindo pelo meio e seu y for menor que
o y calculado para o x do raio na interface, entao o
raio ultrapassou a interface
"""
if s == 1:
if y > _i(x):
return 1
else:
return 0
else:
if y < _i(x):
return 1
else:
return 0
|
def convert_num_2_terminal(num):
"""
convert the index numbe to terminal to be used to name a vehicles
"""
term_dict = {0:"A",1:"B",2:"C",3:"D",4:"E"}
return term_dict[num]
|
def host_to_device(request_type):
""" Check if the direction is host to device """
return (request_type & 0x80) == 0x00
|
def dijkstra_path(edges, distances, va, vb):
"""
Returns the best path between two vertices.
Uses `Dikjstra <https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm>`_ algorithm.
@param edges list of edges.
@param distances list of distances
@param va first vertex
@param vb last vertex
@return list of edges
This function could be implemented based on
`shortest_path <https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.shortest_path.html>`_.
"""
dist = {va: 0}
prev = {va: None}
modif = 1
while modif > 0:
modif = 0
for (a, b), d in zip(edges, distances):
if a in dist:
d2 = dist[a] + d
if b not in dist or dist[b] > d2:
dist[b] = d2
prev[b] = a
modif += 1
if b in dist:
d2 = dist[b] + d
if a not in dist or dist[a] > d2:
dist[a] = d2
prev[a] = b
modif += 1
rev = {(a, b): i for i, (a, b) in enumerate(edges)}
rev.update({(b, a): i for i, (a, b) in enumerate(edges)})
path = []
v = vb
while v is not None:
path.append(v)
v = prev[v]
path.reverse()
return [rev[a, b] for a, b in zip(path[:-1], path[1:])]
|
def compute_cutoff_threshold(matrix_c: list, threshold: float):
"""
Algorithm 1 of the paper "Automatic Discovery of Attributes in Relational Databases" from M. Zhang et al. [1]
This algorithm computes the threshold of a column that determines if any other column is to be considered
its neighbour.
Parameters
---------
matrix_c : list
A list containing dicts of EMD/ColumnName pairs
threshold : float
The conservative global EMD cutoff threshold described in [1]
Returns
-------
float
The cutoff threshold of the input column
"""
matrix_c.append({'e': threshold, 'c': 0})
matrix_c = sorted(matrix_c, key=lambda k: k['e'])
cutoff = 0.0
gap = 0.0
i = 0
while i < len(matrix_c) - 1 and matrix_c[i + 1]['e'] <= threshold:
if gap < (matrix_c[i + 1]['e'] - matrix_c[i]['e']):
gap = matrix_c[i + 1]['e'] - matrix_c[i]['e']
cutoff = matrix_c[i]['e']
i += 1
return cutoff
|
def _filter_none_elems_from_dict(dict_: dict):
""" Given a dict (call it m), returns a new dict which contains all the
non-null (non-none) elements of m.
Args:
dict_: The dict to return the non-null elements of.
Returns:
A new dict with all the non-null elements of <dict_>.
"""
return {k: v for k, v in dict_.items() if v is not None}
|
def stripSuffix(glyphName):
"""strip away unnecessary suffixes from a glyph name"""
if glyphName.find('.') != -1:
baseName = glyphName.split('.')[0]
if glyphName.find('.sc') != -1:
baseName = '.'.join([baseName, 'sc'])
return baseName
else:
return glyphName
|
def escapeHtmlNoBreaks(data):
"""
Escape &, <, and > (no line breaks) in a unicode string of data.
"""
# must do ampersand first
return data.replace(u"&", u"&").replace(u">", u">").\
replace(u"<", u"<")
|
def bbox_str_to_list(bbox: str):
""" Parse the bbox query param and return a list of floats """
bboxList = bbox.split(',')
return list(map(float, bboxList))
|
def correct_text(text):
"""Because Google gives English, not commands"""
text = text.lower()
text = text.replace("-", " ")
text = text.replace("aiden", "8 and")
text = text.split(" ")
conversions = [
[["one", "won"], "1"],
[["to", "too", "two"], "2"],
[["three", "free"], "3"],
[["four", "for", "ford"], "4"],
[["five"], "5"],
[["six", "stix"], "6"],
[["seven"], "7"],
[["eight", "ate", "hate"], "8"],
[["nine"], "9"],
[["ten"], "10"],
[["+"], "and"],
[["x"], "by"],
[["buy"], "by"],
[["criticize", "play"], "create a size"],
[["write"], "right"],
[["op"], "up"],
[["run"], "room"]
]
for i in range(len(text)):
for conversion in conversions:
if text[i] in conversion[0]:
text[i] = conversion[1]
return text
|
def build_person(first_name, last_name):
"""Return a dictionary information about a person."""
person = {'first': first_name, 'last': last_name}
print("The person you just added")
return person
|
def sign (x):
"""Returns the sign of `x` as `-1`, `0`, or `+1`."""
return 0 if x == 0 else +1 if x > 0 else -1
|
def set_new_exist_frac(msegs, aeo_years, bldg):
""" Determine cumulative new vs. existing structures by year.
Attributes:
msegs (dict): Data on new and existing homes (residential) and new and
existing square feet (commercial), broken down by building type.
aeo_years (list): Modeling time horizon.
bldg (string): Building type energy data is currently being retrieved
for.
Returns:
Fractions of the total building stock that are existing or newly
constructed since the first year in the modeling time horizon.
"""
# Initialize dict of supporting data for new/existing structure calcs.
new_constr = {
"annual new": {}, "annual total": {}}
# Initialize dict to store new vs. existing structure fractions
new_exist_frac = {"new": {}, "existing": {}}
# Determine annual and total new construction for each year (by new
# homes for the residential sector, by square feet for commercial)
if bldg in ["single family home", "mobile home",
"multi family home"]:
new_constr["annual new"] = {yr: msegs["new homes"][yr] for
yr in aeo_years}
new_constr["annual total"] = {yr: msegs["total homes"][yr] for
yr in aeo_years}
else:
new_constr["annual new"] = {yr: msegs["new square footage"][yr] for
yr in aeo_years}
new_constr["annual total"] = {yr: msegs["total square footage"][yr] for
yr in aeo_years}
# Find the cumulative fraction of new buildings constructed in all
# years since the beginning of the modeling time horizon
# Set cumulative new homes or square footage totals
for yr in aeo_years:
if yr == aeo_years[0]:
new_exist_frac["new"][yr] = new_constr["annual new"][yr]
else:
new_exist_frac["new"][yr] = new_constr["annual new"][yr] + \
new_exist_frac["new"][str(int(yr) - 1)]
# Divide cumulative new home or square footage totals by total
# new homes or square footage to arrive at cumulative new fraction
new_exist_frac["new"] = {
key: val / new_constr["annual total"][key] for key, val in
new_exist_frac["new"].items()}
# Cumulative existing fraction equals 1 - cumulative new fraction
new_exist_frac["existing"] = {key: (1 - val) for key, val in
new_exist_frac["new"].items()}
return new_exist_frac
|
def get_py_annot_method_names(line_data, annot, val):
""" Function checks for the annotation condition in python files
@parameters
line_data: File content in list format
annot: Annotation condition (Ex: @Test)
val: index pointer that helps in getting method name
@return
This function returns function/method names that has the given annotation"""
data = []
for i, _ in enumerate(line_data):
if annot in line_data[i]:
if str(line_data[i]).strip().split(" ")[0] == "def": # pragma: no mutate
func_name = line_data[i + 1 + val].strip().split(" ")[1].split("(")[0] # pragma: no mutate
data.append(func_name)
else:
for j in range(i, len(line_data)):
if str(line_data[j]).strip().split(" ")[0] == "def": # pragma: no mutate
func_name = line_data[j].strip().split(" ")[1].split("(")[0] # pragma: no mutate
data.append(func_name)
break
return data
|
def iterate_json_keys_for_value(jsonObj, key, value):
"""Return True if *jsonObj* contains a toplevel *key* set to *value*."""
for i in jsonObj:
if i[key] == value:
return True
return False
|
def fore_num(num: int) -> bytes:
"""Set terminal background color to a numbered color (0-255)."""
return b"\x1b[38;5;%dm" % (num)
|
def uniq(lst):
"""
order-preserving unique
"""
seen = set()
seen_add = seen.add
return [x for x in lst if not (x in seen or seen_add(x))]
|
def toascii(what):
""" convert to ascii. """
return what.encode('ascii', 'replace')
|
def siteType_to_idx(sites):
"""
Convert site letters to indices (where "a" = 0, "b" = 1, etc.).
Args
----
sites: str or list
Site label "a", "b", etc. to convert into zero-indexing from "a".
"""
# If multiple sites passed as single string, remove all whitespace and
# iterate through string.
if type(sites) == str:
sites = sites.lower().replace(" ", "")
return [ord(s.lower())-ord("a") for s in sites]
|
def string_to_array(s: str) -> list:
"""
A function to split a string and
convert it into an array of words
:param s:
:return:
"""
return s.split(' ')
|
def dict_select_choose(input_dict, keys):
"""
Trim a dictionary down to selected keys, if they are in the dictionary.
Args:
input_dict (dict): the dictionary to trim
keys (List): list of keys desired in the final dict
Returns:
trimmed dictionary
"""
return {k: input_dict[k] for k in keys if k in input_dict.keys()}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.