content
stringlengths 42
6.51k
|
---|
def sum_digit(i):
"""Die Funktion berechnet die Quersumme von i."""
summe = 0
for digit in str(i):
summe += int(digit)
return summe
|
def sort_key(buffer):
"""Returns a sort key such that "simpler" buffers are smaller than
"more complicated" ones.
We define sort_key so that x is simpler than y if x is shorter than y or if
they have the same length and x < y lexicographically. This is called the
shortlex order.
The reason for using the shortlex order is:
1. If x is shorter than y then that means we had to make fewer decisions
in constructing the test case when we ran x than we did when we ran y.
2. If x is the same length as y then replacing a byte with a lower byte
corresponds to reducing the value of an integer we drew with draw_bits
towards zero.
3. We want a total order, and given (2) the natural choices for things of
the same size are either the lexicographic or colexicographic orders
(the latter being the lexicographic order of the reverse of the string).
Because values drawn early in generation potentially get used in more
places they potentially have a more significant impact on the final
result, so it makes sense to prioritise reducing earlier values over
later ones. This makes the lexicographic order the more natural choice.
"""
return (len(buffer), buffer)
|
def simple_lang(pressure, n_total, k_const):
"""A simple Langmuir equation returning loading at a pressure."""
return (n_total * k_const * pressure / (1 + k_const * pressure))
|
def translate_lon_to_geos5_native(longitude):
"""See function above"""
return ((longitude + 180) / 0.625)
|
def graph_return(resp, keys):
"""Based on concepts of GraphQL, return specified subset of response.
Args:
resp: dictionary with values from function
keys: list of keynames from the resp dictionary
Returns:
the `resp` dictionary with only the keys specified in the `keys` list
Raises:
RuntimeError: if `keys` is not a list or tuple
"""
if not (len(keys) and isinstance(keys, (list, tuple))):
raise RuntimeError(f'Expected list of keys for: `{resp.items()}`, but received `{keys}`')
ordered_responses = [resp.get(key, None) for key in keys]
return ordered_responses if len(ordered_responses) > 1 else ordered_responses[0]
|
def binary_search(array: list, target: int) -> bool:
""" searches through a sorted list to find a target integer """
mid = len(array) // 2
if len(array) < 1:
return False
if len(array) == 1:
return array[0] == target
if array[mid] < target:
return binary_search(array[mid:], target)
elif array[mid] > target:
return binary_search(array[:mid], target)
else:
return True
|
def _find_calframe(calframes, calpaths, values, calname, verbose=False):
""" Find the calibration frame and path from given dicts for dict values.
"""
if calframes is None:
calpath = None
calframe = None
else:
try:
calframe = calframes[values]
calpath = calpaths[values]
except (KeyError, IndexError):
calpath = None
calframe = None
if verbose:
print(f"{calname} not found for the combination [{values}]: Skip process.")
return calframe, calpath
|
def merge_configs(driver: dict, client: dict) -> dict:
"""
Merge Driver and Client config. The Client configs will overwrite matching keys in the Driver config.
Args:
driver (dict): driver dictionary of configs
client (dict): client dictionary of configs
Returns:
Merged configs (dict)
"""
return {**driver, **client}
|
def partition(nums, lo, hi, idx):
"""
Partition using nums[idx] as value.
Note lo and hi are INCLUSIVE on both ends and idx must be valid index.
Count the number of comparisons by populating A with RecordedItem instances.
"""
if lo == hi:
return lo
nums[idx], nums[lo] = nums[lo], nums[idx]
i = lo
j = hi + 1
while True:
while True:
i += 1
if i == hi:
break
if nums[lo] < nums[i]:
break
while True:
j -= 1
if j == lo:
break
if nums[j] < nums[lo]:
break
# doesn't count as comparing two values
if i >= j:
break
nums[i], nums[j] = nums[j], nums[i]
nums[lo], nums[j] = nums[j], nums[lo]
return j
|
def batches_list(project='batch', n_batches=5):
"""
Create buttons corresponding to number of batches inside the given project.
Args:
project (str): name of the project, *default:* ``batch``
n_batches (int): number of batches inside this project, *default:* ``5``
Returns:
list: list of tuples ``(project, batch_id, batch_name)``
Example:
>>> batches_list()
[('batch', 1, 'Batch 1'), ('batch', 2, 'Batch 2'), ('batch', 3, 'Batch 3'), ('batch', 4, 'Batch 4'), ('batch', 5, 'Batch 5')]
>>> batches_list(project='test', n_batches=3)
[('test', 1, 'Batch 1'), ('test', 2, 'Batch 2'), ('test', 3, 'Batch 3')]
"""
batches_links = [(project, i+1, f"Batch {i+1}") for i in range(n_batches)]
return batches_links
|
def _get_tracklet(tracks: dict, idx: int) -> list:
"""Get a tracklet by the first object ID"""
target = [t for t in tracks.values() if t[0] == idx]
if target:
return target[0]
else:
raise ValueError("Object ID not found.")
|
def _format_spacing(details):
"""Formats spacing to build weather section."""
length = 19
spaces = ''
if len(details) < length:
for i in range(length - len(details) - 1):
spaces += " "
return spaces
|
def get_scene(videoname):
"""ActEV scene extractor from videoname."""
s = videoname.split("_S_")[-1]
s = s.split("_")[0]
return s[:4]
|
def get_resume_name(stage_name):
"""return a new name with resume and an increasing index"""
split_stage_name = stage_name.split("_")
if len(split_stage_name) > 2 and split_stage_name[-2] == "resume":
resume_index = int(split_stage_name[-1]) + 1
return "_".join(split_stage_name[:-2] + ["resume_{}".format(resume_index)])
else:
return stage_name + "_resume_0"
|
def greedy_cow_transport(cows, limit=10):
"""
Uses a greedy heuristic to determine an allocation of cows that attempts to
minimize the number of spaceship trips needed to transport all the cows. The
returned allocation of cows may or may not be optimal.
The greedy heuristic should follow the following method:
1. As long as the current trip can fit another cow, add the largest cow that will fit
to the trip
2. Once the trip is full, begin a new trip to transport the remaining cows
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
import operator
totalTrips = []
sorted_cows = sorted(
[(name, weight)
for name, weight in cows.items()
if cows[name] <= limit],
key=operator.itemgetter(1),
reverse=True
)
repeat = True
while repeat:
currentTrip = []
currentTotal = 0
for name, weight in sorted_cows:
if currentTotal+weight <= limit:
currentTrip.append(name)
currentTotal += weight
for name in currentTrip:
cow = (name, cows[name])
sorted_cows.remove(cow)
totalTrips.append(currentTrip)
repeat = len(sorted_cows) > 0
return totalTrips
|
def cmp(a, b):
"""replaces the missing built-in in Python3"""
if a is None and b is None:
return 0
elif a is None:
return -1
elif b is None:
return 1
else:
return (a > b) - (a < b)
|
def five_interp(x, a0, a1, a2, a3, a4):
"""``Approximation degree = 5``
"""
return a0 + a1 * x + a2 * (x ** 2) + a3 * (x ** 3) + a4 * (x ** 4)
|
def filter_invalid_flags(item):
"""Filter our all flags not needed for getting the compilestats."""
filter_list = ["-O1", "-O2", "-O3", "-Os", "-O4"]
prefix_list = ['-o', '-l', '-L']
result = item not in filter_list
result = result and not any([item.startswith(x) for x in prefix_list])
return result
|
def is_palindrome(number):
""" Returns True if the variable passed is a palindrome, False otherwise. """
numlist = list(str(number))
if len(numlist) <= 0:
return False
if len(numlist) == 1:
return True
i = 0
while i <= len(numlist) / 2:
if numlist[i] != numlist[-(i+1)]:
return False
i += 1
return True
|
def mscb(t):
"""
Find the index of the most significant change bit,
the bit that will change when t is incremented
aka the power of 2 boundary we are at
>>> mscb(0)
0
>>> mscb(1)
1
>>> mscb(7)
3
>>> mscb(8)
0
"""
return (t^(t+1)).bit_length()-1
|
def parse_tripos_bond(mol2):
"""
Parse Tripos MOL2 BOND records to a dictionary
:param mol2: Tripos MOL2 file as string
:type mol2: :py:str
:return: Tripos bond records
:rtype: :py:dict
"""
read = False
bond_dict = {}
headers = ('b_start', 'b_end', 'b_type')
for line in mol2.split('\n'):
if line.startswith('@<TRIPOS>BOND'):
read = True
continue
if line.startswith('@<TRIPOS>') and read:
read = False
break
if read:
line = line.split()
if line:
bond_dict[int(line[0])] = dict(zip(headers, line[1:]))
for value in bond_dict.values():
for key in ('b_start', 'b_end'):
value[key] = int(value[key])
return bond_dict
|
def keys_and_values(d):
"""Return the keys and values as separate lists.
Sorted by the key value.
"""
return [list(keys_values)
for keys_values
in list(zip(*sorted(list(d.items()))))]
|
def min_list(lst):
"""
A helper function for finding the minimum of a list of integers where some of the entries might be None.
Args:
lst (list): The list.
Returns:
int: The entry with the minimal value.
"""
if len(lst) == 0:
return None
elif len(lst) == 1:
return lst[0]
elif all([entry is None for entry in lst]):
return None
return min([entry for entry in lst if entry is not None])
|
def format_seconds(seconds):
"""
Return ':ss' if seconds are < 60, 'm:ss' if minutes are less than 60,
'h:mm:ss' if more than an hour
:param seconds:
:return:
"""
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
if hours:
return '%d:%02d:%02d' % (hours, minutes, seconds)
elif minutes:
return '%d:%02d' % (minutes, seconds)
else:
return ':%02d' % seconds
|
def pressure_from_altitude(y):
"""
Calculate standard atmospheric pressure based on an altitude in m. The
basic formula can be found many places. For instance, Munson, Young, and
Okiishi, 'Fundamentals of Fluid Mechanics', p. 51.
Enter: y: altitude in m.
Exit: p: pressure in N/m/m.
"""
p0 = 101325 # Pa, standard pressure at sea level
L = 0.0065 # K/m, temperature lapse rate
T0 = 288.15 # K, reference temperature at sea level
g = 9.80655 # m/s/s, gravity at sea level
# I've used the more authoritative values from CIPM-2007 for these constants
M = 0.02896546 # kg/mol, molar mass of dry air, from CIPM-2007
R = 8.314472 # J/(mol*K), universal gas constant, from CIPM-2007
# For exceptionally high altitudes, treat this as nearly zero.
if y >= T0 / L - 1:
y = T0 / L - 1
p = p0*(1-L*y/T0)**(g*M/(R*L))
return p
|
def rescale(value, orig_min, orig_max, new_min, new_max):
"""
Rescales a `value` in the old range defined by
`orig_min` and `orig_max`, to the new range
`new_min` and `new_max`. Assumes that
`orig_min` <= value <= `orig_max`.
Parameters
----------
value: float, default=None
The value to be rescaled.
orig_min: float, default=None
The minimum of the original range.
orig_max: float, default=None
The minimum of the original range.
new_min: float, default=None
The minimum of the new range.
new_max: float, default=None
The minimum of the new range.
Returns
----------
new_value: float
The rescaled value.
"""
orig_span = orig_max - orig_min
new_span = new_max - new_min
try:
scaled_value = float(value - orig_min) / float(orig_span)
except ZeroDivisionError:
orig_span += 1e-6
scaled_value = float(value - orig_min) / float(orig_span)
return new_min + (scaled_value * new_span)
|
def gauss(A, b):
"""
@param A: matrix
@param b: vector
@return: reduced echelon form of A|b
"""
n = len(A)
for i in range (0,n):
if A[i][i] != 0:
p = 1 / A[i][i]
for j in range (i,n):
A[i][j] *= p
b[i]*p
for k in range(i+1,n):
for j in range(i+1,n):
A[k][j] -= A[k][i]*A[i][j]
b[k] -= A[k][i]*b[i]
A[k][i]=0
return A,b
|
def get_first_non_empty(lst):
"""
lst = [[], [], 1, [2, 3, 4], [], []] -> 1
lst = [[], [], False, (), [2, 3, 4], [], []] -> [2, 3, 4]
"""
for element in lst:
if element:
return element
|
def _get_lua_base_module_parts(base_path, base_module):
"""
Get a base module from either provided data, or from the base path of the package
Args:
base_path: The package path
base_module: None, or a string representing the absence/presence of a base
module override
Returns:
Returns a list of parts of a base module based on base_path/base_module.
If base_module is None, a default one is created based on package name.
"""
# If base module is unset, prepare a default.
if base_module == None:
return ["fbcode"] + base_path.split("/")
# If base module is empty, return the empty list.
elif not base_module:
return []
# Otherwise, split it on the module separater.
else:
return base_module.split(".")
|
def substitute_variables(model_params_variables, model_data_raw):
"""
:param model_params_variables:
:param model_data_raw:
:return:
"""
model_data_list = []
for argument_raw in model_data_raw:
argument_split_raw = argument_raw.split(",")
argument_split = []
for parameter_raw in filter(lambda x: x != "", argument_split_raw):
parameter = parameter_raw.strip()
if parameter in model_params_variables:
argument_split.append(model_params_variables[parameter])
else:
if "inst" in parameter:
last_time_str = model_data_list[-1][1]
last_time = float(last_time_str)
new_time = last_time + 1
new_time_str = str(new_time)
argument_split.append(new_time_str)
elif parameter.startswith("-e") and "_" in parameter:
flag = parameter.split("_")[0]
argument_split.append(flag)
else:
argument_split.append(parameter)
model_data_list.append(argument_split)
# generate clean model data
model_data = []
for argument in model_data_list:
model_data.append(",".join(argument))
return model_data
|
def is_category_suspicious(category, reputation_params):
"""
determine if category is suspicious in reputation_params
"""
return category and category.lower() in reputation_params['suspicious_categories']
|
def max_sequence(arr):
"""Find the largest sum of any contiguous subarray."""
best_sum = 0 # or: float('-inf')
current_sum = 0
for x in arr:
current_sum = max(0, current_sum + x)
best_sum = max(best_sum, current_sum)
return best_sum
|
def trailing_zeros(n):
"""Count trailing zero bits in an integer."""
if n & 1: return 0
if not n: return 0
if n < 0: n = -n
t = 0
while not n & 0xffffffffffffffff: n >>= 64; t += 64
while not n & 0xff: n >>= 8; t += 8
while not n & 1: n >>= 1; t += 1
return t
|
def config_writer(proxies):
"""Converting proxies into a config string
"""
config = ""
def get_proxy(location, address):
config = (
"\n\tlocation "
+ location
+ "{\n\t\tproxy_set_header X-Real-IP $remote_addr;\n\t\tproxy_pass "
+ address
+ ";\n\t}"
)
return config
def get_static(location, address):
config = "\n\tlocation " + location + "{\n\t\troot " + address + ";\n\t}"
return config
last_proxy = ""
for proxy in proxies:
if last_proxy != proxy["nameserver"]:
if proxy["id"] != 1:
config += "\n}"
config += "\nserver {\n\tlisten 80"
if proxy["id"] == 1:
config += " default_server"
config += ";\n\tserver_name " + proxy["nameserver"] + ";"
if proxy["type"] == "proxy":
config += get_proxy(proxy["location"], proxy["address"])
elif proxy["type"] == "static":
config += get_static(proxy["location"], proxy["address"])
last_proxy = proxy["nameserver"]
config += "\n}"
return config
|
def parser_parental_rating_Descriptor(data,i,length,end):
"""\
parser_parental_rating_Descriptor(data,i,length,end) -> dict(parsed descriptor elements).
This descriptor is not parsed at the moment. The dict returned is:
{ "type": "parental_rating", "contents" : unparsed_descriptor_contents }
(Defined in ETSI EN 300 468 specification)
"""
return { "type" : "parental_rating", "contents" : data[i+2:end] }
|
def quintic_ease_out(p):
"""Modeled after the quintic y = (x - 1)^5 + 1"""
f = p - 1
return (f * f * f * f * f) + 1
|
def get_status(status):
"""Returns a numeric status value."""
status_map = {
'running': 1,
'stopped': 0}
return status_map[status]
|
def safe_format_amount(commodity, amount):
"""
Formats an amount with a commodity, or without it if the commodity is None
"""
if commodity is None:
return str(amount)
return commodity.format_amount(amount)
|
def select(headers, vectors):
"""
Select a vector whose .host attribute matches the Host header
"""
host_header = headers.get("Host", "")
if not host_header:
return None
for v in vectors:
if v['.host'] == host_header:
return v
return None
|
def vec_dotprod(vector1, vector2):
"""
Dot product of 2 vectors
Parameters
----------
vector1 : list
Input vector 1.
vector2 : list
Input vector 2.
Returns
-------
dotp_res : float
Resulting dot product.
"""
dotp_res = sum(v1_i * v2_i for v1_i, v2_i in zip(vector1, vector2))
return dotp_res
|
def strip_url_parameters (url):
"""Remove any client or user parameters from this url, and return
the string without them (it leaves urls w/o parameters as-is)"""
return url.split('?')[0]
|
def extract_uid_metadata(uid):
"""Extract metadata for a given UID.
Apply to all UIDs: see `extract_all_uid_metadata`.
Return extracted metadata with format
[uid, label, speaker, paired_image, production, frames].
"""
uid_parts = uid.split("_")
extracted = [
uid, uid_parts[0], uid_parts[1],
"{}_{}".format(uid_parts[2], uid_parts[3]), uid_parts[4], uid_parts[5]]
return extracted
|
def split_unit_quantity(string):
"""Given a physical quantity as a string, returns a tuple containing the quantity's magnitude and unit, both
as strings."""
quantity = ""
index = 0
quantity_characters = [str(num) for num in range(10)] + [".", "-", "+"]
for character in string:
if character in quantity_characters:
quantity+= character
index += 1
else:
break
symbol = string[index:]
return quantity, symbol
|
def sendController(msg, q):
"""Communicate with connected controller
Arguments:
msg {str} -- Message to be sent
q {socket.socket} -- Controller's socket
Returns:
int -- Error Code:
0) Error
1) Success
"""
try:
q.send(msg)
return 1 # success
except Exception as ex:
print('[SERVER] Error:', ex)
return 0
|
def check_list(listvar):
"""Turns single items into a list of 1."""
if not isinstance(listvar, list):
listvar = [listvar]
return listvar
|
def sentence(*args):
"""
SENTENCE thing1 thing2
SE thing1 thing2
(SENTENCE thing1 thing2 thing3 ...)
(SE thing1 thing2 thing3 ...)
outputs a list whose members are its inputs, if those inputs are
not lists, or the members of its inputs, if those inputs are lists.
"""
result = []
for arg in args:
if isinstance(arg, list):
result.extend(arg)
else:
result.append(arg)
return result
|
def build_counts(haplotypes):
"""
# ========================================================================
BUILD COUNTS
PURPOSE
-------
Builds the a list of the counts of each haplotype in a passed haplotype
list.
Example:
haplotype1.sequence = "AAA"
haplotype1.count = 3
haplotype1.sequence = "CGC"
haplotype1.count = 5
haplotype1.sequence = "TCC"
haplotype1.count = 1
haplotypes = [haplotype1, haplotype2, haplotype3]
build_counts(haplotypes) -> [3, 5, 1]
INPUT
-----
[HAPLOTYPE LIST] [haplotypes]
The list of Haplotypes.
RETURN
------
[INT LIST]
A list of the counts of each haplotype, in the same order as the
original haplotype list.
# ========================================================================
"""
counts = []
for haplotype in haplotypes:
count = haplotype.count
counts.append(count)
return counts
|
def cell_start(position, cell_width, wall_width):
"""Compute <row, col> indices of top-left pixel of cell at given position"""
row, col = position
row_start = wall_width + row * (cell_width + wall_width)
col_start = wall_width + col * (cell_width + wall_width)
return (row_start, col_start)
|
def find_distance_from_1_to(number):
"""Find Manhattan Distance between number and 1 in square."""
last_corner = 1
steps = 1
side = 0
while last_corner < number:
if side == 0:
steps += 2
last_corner += steps - 1
side += 1
if side == 4:
side = 0
to_middle = (steps - 1) // 2
middle = last_corner - to_middle
return to_middle + abs(number - middle)
|
def process_changes(results):
"""Split GCS change events into trivial acks and builds to further process."""
acks = [] # pubsub message ids to acknowledge
todo = [] # (id, job, build) of builds to grab
# process results, find finished builds to process
for ack_id, message in results:
if message.attributes['eventType'] != 'OBJECT_FINALIZE':
acks.append(ack_id)
continue
obj = message.attributes['objectId']
if not obj.endswith('/finished.json'):
acks.append(ack_id)
continue
job, build = obj[:-len('/finished.json')].rsplit('/', 1)
job = 'gs://%s/%s' % (message.attributes['bucketId'], job)
todo.append((ack_id, job, build))
return acks, todo
|
def get_pair_equality(row, column_1, column_2):
"""
Helper function used by pair_equality, to test values of two columns for
equality in a single row.
:param row:
Row from dataframe
:param column_1:
Name of first column
:param column_2:
Name of second column
:return:
1 if and only if the row has the same value in two given columns
"""
if row[column_1] == row[column_2]:
return 1
else:
return 0
|
def sum_word_len(tokens):
"""Given list of words, return sum of length of words (int)"""
return sum([len(token) for token in tokens])
|
def intersection(nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
nums1=set(nums1)
nums2=set(nums2)
return list(nums1.intersection(nums2))
|
def nexthop_is_local(next_hop):
"""
Check if next-hop points to the local interface.
Will be True for Connected and Local route strings on Cisco devices.
"""
interface_types = (
'Eth', 'Fast', 'Gig', 'Ten', 'Port',
'Serial', 'Vlan', 'Tunn', 'Loop', 'Null'
)
for type in interface_types:
if next_hop.startswith(type):
return True
|
def part2(tree_line):
""" Product of the no. of trees on diff. slopes """
slopes = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
height = len(tree_line)
width = len(tree_line[0])
product_trees = 1
for slope in slopes:
x_coord, y_coord, trees = 0, 0, 0
while y_coord < height - 1:
x_coord = (x_coord + slope[0]) % width
y_coord = y_coord + slope[1]
if tree_line[y_coord][x_coord] == "#":
trees += 1
product_trees *= trees
return product_trees
|
def _is_typing_object(type_object):
"""Checks to see if a type belong to the typing module.
Parameters
----------
type_object: type or typing._GenericAlias
The type to check
Returns
-------
bool
True if `type_object` is a member of `typing`.
"""
return type_object.__module__ == 'typing'
|
def PoolVec2Array_to_list(array):
"""Return a copy of the array as a list of 2-element lists of floats.
This is not efficient."""
result = []
for elt in array:
v2 = [elt.x, elt.y]
result.append(v2)
return result
|
def _parse_tool_list(tl):
"""
A convenience method for parsing the output from an API call to a Galaxy
instance listing all the tools installed on the given instance and
formatting it for use by functions in this file. Sample API call:
`https://test.galaxyproject.org/api/tools?in_panel=true`
:type tl: list
:param tl: A list of dicts with info about the tools
:rtype: tuple of lists
:return: The returned tuple contains two lists: the first one being a list
of tools that were installed on the target Galaxy instance from
the Tool Shed and the second one being a list of custom-installed
tools. The ToolShed-list is YAML-formatted.
Note that this method is rather coarse and likely to need some handholding.
"""
ts_tools = []
custom_tools = []
for ts in tl:
# print "%s (%s): %s" % (ts['name'], ts['id'], len(ts.get('elems', [])))
for t in ts.get('elems', []):
tid = t['id'].split('/')
if len(tid) > 3:
tool_already_added = False
for added_tool in ts_tools:
if tid[3] in added_tool['name']:
tool_already_added = True
if not tool_already_added:
ts_tools.append({'tool_shed_url': "https://{0}".format(tid[0]),
'owner': tid[2],
'name': tid[3],
'tool_panel_section_id': ts['id']})
# print "\t%s, %s, %s" % (tid[0], tid[2], tid[3])
else:
# print "\t%s" % t['id']
custom_tools.append(t['id'])
return ts_tools, custom_tools
|
def fill_in_missing_output_path(output, output_name, tool_inputs):
"""
Creates a path template for outputs that are missing one
This is needed for the descriptor to be valid (path template is required)
"""
# Look for an input with the same name as the output and use its value key
found = False
for input in tool_inputs:
if input["name"] == output_name:
output["path-template"] = input["value-key"]
found = True
break
# If no input with the same name was found, use the output ID
if not found:
output["path-template"] = output["id"]
return output
|
def create_label_groups(groups):
"""Create info dictionaries for label groups."""
group_dicts = []
if groups:
for item in groups:
dic = {}
if len(item) == 1:
dic['label'] = item[0]
dic['name'] = item[0]
elif len(item) == 2:
dic['label'] = item[0]
dic['name'] = item[1]
else:
raise ValueError('Label group takes 1 or 2 arguments')
group_dicts.append(dic)
return group_dicts
|
def _dict_to_yaml(data, indent=""):
"""
Helper function for recursively turning the json data into yaml strings.
"""
yaml_data = ""
for (k, v) in data.items():
if isinstance(v, dict) and "lazyObject" in v.keys():
name = v["lazyObject"]
yaml_data += f"{indent}{k}: {name}\n"
yaml_data += _dict_to_yaml(v["kwargs"], indent + " " * 4)
else:
yaml_data += f"{indent}{k}: {v}\n"
return yaml_data
|
def axis_diff(direction, rect_start, rect_finish,
boundaries_start, boundaries_finish):
"""
detects if an segment is outside its boundaries and returns the
difference between segment and boundary; if the segment is inside
boundaries returns 0
"""
if direction < 0:
if rect_start < boundaries_start:
return rect_start - boundaries_start
elif direction > 0:
if rect_finish > boundaries_finish:
return rect_finish - boundaries_finish
return 0
|
def make_pipeline_image_name(pipeline_class_name):
""" Create the string for the image for this pipeline_class_name
'disdat-module[-submodule]...'
Args:
pipeline_class_name:
Returns:
str: The name of the image 'disdat-module[-submodule]...'
"""
return '-'.join(['disdat'] + pipeline_class_name.split('.')[:-1]).lower()
|
def _element_basis(string: str):
"""
Parse element and basis from string
Args: str
Returns: element, basis
"""
cut_list = string.split(".")
element = cut_list[0]
basis = " ".join(cut_list[1:])
return element, basis
|
def get_default_data():
"""Default data for Composer tests.
Returns:
dict: object with bands, product, url and output_dir.
"""
return {
"bands": [],
"product": "LC08_L1TP_221071_20170521_20170526_01_T1",
"url": "https://landsat-pds.s3.amazonaws.com/c1/L8/221/071",
"output_dir": "test_media/",
}
|
def differential_frequency(a, C, e, eref=1):
"""
Differential frequency of events with energies (or other metric) greater than e for a power-law of the form
f = C*e**-a
to the flare energy distribution, where f is the cumulative frequency of flares with energies greater than e.
"""
return a*C*(e/eref)**(-a-1)
|
def find_span_binsearch(degree, knot_vector, num_ctrlpts, knot, **kwargs):
""" Finds the span of the knot over the input knot vector using binary search.
Implementation of Algorithm A2.1 from The NURBS Book by Piegl & Tiller.
The NURBS Book states that the knot span index always starts from zero, i.e. for a knot vector [0, 0, 1, 1];
if FindSpan returns 1, then the knot is between the interval [0, 1).
:param degree: degree
:type degree: int
:param knot_vector: knot vector
:type knot_vector: list, tuple
:param num_ctrlpts: number of control points
:type num_ctrlpts: int
:param knot: knot
:type knot: float
:return: span of the knot over the knot vector
:rtype: int
"""
# Get tolerance value
tol = kwargs.get('tol', 10e-6)
# In The NURBS Book; number of knots = m + 1, number of control points = n + 1, p = degree
# All knot vectors should follow the rule: m = p + n + 1
n = num_ctrlpts - 1
if abs(knot_vector[n + 1] - knot) <= tol:
return n
# Set max and min positions of the array to be searched
low = degree
high = num_ctrlpts
# The division could return a float value which makes it impossible to use as an array index
mid = (low + high) / 2
# Direct int casting would cause numerical errors due to discarding the significand figures (digits after the dot)
# The round function could return unexpected results, so we add the floating point with some small number
# This addition would solve the issues caused by the division operation and how Python stores float numbers.
# E.g. round(13/2) = 6 (expected to see 7)
mid = int(round(mid + tol))
# Search for the span
while (knot < knot_vector[mid]) or (knot >= knot_vector[mid + 1]):
if knot < knot_vector[mid]:
high = mid
else:
low = mid
mid = int((low + high) / 2)
return mid
|
def single_to_plural(string: str) -> str:
"""This function Convert singular to plural """
for char in string:
"""Checks weather it is word or not"""
if not char.isalpha():
raise ValueError('You did not input a word. ')
if string.endswith('y'):
if string.endswith(('ay', 'ey', 'iy', 'oy', 'uy')):
return string + 's'
else:
return string[:-1] + 'ies'
if string[-1:] in 'osxz':
return string + 'es'
elif string[-2:] in ['ch', 'sh']:
return string + 'es'
return string + 's'
|
def convert_to_list (line):
"""
This program makes the rows of the matrix making lists of list.
@type line: string
@param line: m columns of the matrix
@rtype: list
@return: convert string into a list
"""
line_list = line.split()
i = 0
row = []
while i < len(line_list):
number = float(line_list[i])
row.append (number)
i = i + 1
return row
|
def map_device(materials):
"""
Get (first) material, if available
:param materials: list with checked materials, can be empty
:return: device name, e.g. "trek-rad"
"""
if len(materials) == 0:
return ""
return materials[0]
|
def simple_name_from(long_name):
""" Extracts primary name from uniprot string containing all names.
Additional names are given in brackets and parantheses
"""
out = []
buffer = []
in_bracket = 0
in_square_bracket = 0
for letter in long_name:
if letter == "(":
in_bracket += 1
buffer.append(letter)
elif letter == ")":
in_bracket -= 1
buffer.append(letter)
elif letter == "[":
in_square_bracket += 1
buffer.append(letter)
elif letter == "]":
in_square_bracket -= 1
buffer.append(letter)
else:
# If not in bracket
if in_bracket == 0 and in_square_bracket == 0:
if letter == " ":
buffer.append(letter)
elif buffer:
out.extend(buffer)
buffer = []
out.append(letter)
else:
out.append(letter)
else:
buffer.append(letter)
assert in_bracket == 0
assert in_square_bracket == 0
return "".join(out)
|
def extended_euclidean_gcd(a, b):
"""Copied from
http://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm"""
x,y, u,v = 0,1, 1,0
while a != 0:
q,r = b//a,b%a; m,n = x-u*q,y-v*q
b,a, x,y, u,v = a,r, u,v, m,n
return b, x, y
|
def _serialize_headers(headers):
"""Serialize CIMultiDictProxy to a pickle-able dict because proxy
objects forbid pickling:
https://github.com/aio-libs/multidict/issues/340
"""
# Mark strings as keys so 'istr' types don't show up in
# the cassettes as comments.
return {str(k): v for k, v in headers.items()}
|
def isprime_eratosteene(n):
"""Returns True if n is prime.
It uses the fact that a prime (except 2 and 3) is of form 6k - 1 or 6k + 1.
Function looks only at divisors of this form (after checking 2 & 3).
"""
if n == 2:
return True
if n == 3:
return True
if n % 2 == 0:
return False
if n % 3 == 0:
return False
i = 5
w = 2
while i * i <= n:
if n % i == 0:
return False
i += w
w = 6 - w
return True
|
def convert_array_to_string(array):
"""Returns a string containing the given array"""
string = '{'
for value in array:
string = string + str(value) + ','
if (len(string)!=1):
string = string[:-1]
return string + '}'
|
def workspace_command(cmd):
"""Simple command to always go to the workspace directory"""
return ' && '.join([
'cd {job.ws}',
cmd if not isinstance(cmd, list) else ' && '.join(cmd),
'cd ..',
])
|
def _numstages(ord, type='lowpass') -> int:
""" compute the number of stages based on the filter type. """
if type in ['lowpass', 'highpass', 'allpass']:
ncount = (ord-1)/2 + 1 if (ord % 2) else ord/2 # 6 coeffs/stage.
elif type in ['bandpass', 'bandstop']:
ncount = ord
else:
raise ValueError('filter type %s is not valid.' % type)
return int(ncount)
|
def IsFloat(s):
"""Is a string an floating point number?"""
try:
float(s)
return True
except ValueError:
return False
|
def get_darkvariance(camcol,band,run=None):
"""
data.sdss3.org/datamodel/files/BOSS_PHOTOOBJ/frames/RERUN/RUN/CAMCOL/frame.html
"""
DARK_VAR_CCD = {
0:{"u":9.61, "g":15.6025,"r":1.8225,
"i":7.84, "z":0.81},
1:{"u":12.6025,"g":1.44, "r":1.00,
"i":[5.76,6.25],"z":1.0},
2:{"u":8.7025, "g":1.3225, "r":1.3225,
"i":4.6225, "z":1.0},
3:{"u":12.6025,"g":1.96, "r":1.3225,
"i":[6.25,7.5625],"z":[9.61,12.6025]},
4:{"u":9.3025, "g":1.1025, "r":0.81,
"i":7.84, "z":[1.8225,2.1025]},
5:{"u":7.0225, "g":1.8225, "r":0.9025,
"i":5.0625, "z":1.21}
}
dark = DARK_VAR_CCD[camcol-1][band]
# ----------
# - output
if type(dark) == float:
return dark
if run is None:
raise ValueError("there is two dark-variance possibilites for "+\
" *camcol* %d, *band* %s "%(
camcol-1,band) + "Please, provide a *run*")
return dark[1] if run>1500 else dark[0]
|
def norm1(m):
"""
Return the L1-norm of the point m
"""
s = 0.0
for (name, value) in m.items():
s += abs(value)
return s
|
def direction(pos, prev):
"""
Determine the direction of travel given current and previous position, as
a unit vector and magnitude.
"""
dx, dy = pos[0] - prev[0], pos[1] - prev[1]
mag = 1
if abs(dx) > 1:
mag = 2
dx /= abs(dx)
if abs(dy) > 1:
mag = 2
dy /= abs(dy)
return int(dx), int(dy), mag
|
def elementwise_list_division(numerator, denominator, percentage=False):
"""
Simple method to element-wise divide a list by the values in another list of the same length.
"""
assert len(numerator) == len(denominator), 'Attempted to divide two lists of different lengths'
percentage_multiplier = 100. if percentage else 1.
return [n / d * percentage_multiplier for n, d in zip(numerator, denominator)]
|
def trim_by_part_and_type(response, type, part):
""" get specific data """
array = []
for item in response:
if item['type'] == type:
array.append(item[part])
return array
|
def get_summary(html_text):
"""Returns the summary part of the raw html text string of the wikipedia article.
:param html_text: The html content of an article.
:type html_text: str
:return: The summary of the input wikipedia article.
:rtype: str
"""
# The summary ends before the first h tag.
end_summary_index = html_text.find('<h')
summary = html_text[:end_summary_index]
return summary
|
def npy_cdouble_from_double_complex(var):
"""Cast a cython double complex to a numpy cdouble."""
res = "_complexstuff.npy_cdouble_from_double_complex({})".format(var)
return res
|
def splitlines(value):
"""
Returns the value turned into a list.
"""
return value.splitlines()
|
def post_add(db, usernick, message):
"""Add a new post to the database.
The date of the post will be the current time and date.
Return a the id of the newly created post or None if there was a problem"""
if(len(message) < 551): # 551 words limits
cur = db.cursor()
cur.execute("INSERT INTO posts (usernick, content) VALUES(?,?)", (usernick, message,)) # add stuffs into the database
db.commit()
row = list(cur.execute("SELECT id, timestamp, usernick, content, users.avatar FROM posts, users WHERE users.nick = posts.usernick ORDER BY timestamp DESC"))
return row[0][0] # retrieve the id of the post
else:
return None
|
def dahua_brightness_to_hass_brightness(bri_str: str) -> int:
"""
Converts a dahua brightness (which is 0 to 100 inclusive) and converts it to what HASS
expects, which is 0 to 255 inclusive
"""
bri = 100
if not bri_str:
bri = int(bri_str)
current = bri / 100
return int(current * 255)
|
def bottom_up_fibonacci(n: int, return_ith: bool = False) -> object:
"""Returns the nth fibonacci number if return_ith=False, else it returns a
list containing all the ith fibonacci numbers, for i=0, ... , n.
For example, suppose return_ith == True and n == 5, then this function
returns [0, 1, 1, 2, 3, 5]. If return_ith == False, it returns simply 5.
Note: indices start from 0 (not from 1).
This function uses a dynamic programing "bottom up" approach: we start by
finding the optimal solution to smaller sub-problems, and from there, we
build the optimal solution to the initial problem.
Time complexity: O(n)."""
if n == 0:
return n if not return_ith else [n]
if n == 1:
return n if not return_ith else [0, n]
fib = [0] * (n + 1)
fib[0] = 0
fib[1] = 1
for i in range(2, n + 1):
fib[i] = fib[i - 1] + fib[i - 2]
return fib[-1] if not return_ith else fib
|
def is_subtask(task):
"""Whether the task name represents a subtask."""
return ":" in task
|
def aggregate_count(items, col):
"""
Function to use on Group by Charts.
accepts a list and returns the count of the list's items
"""
return len(list(items))
|
def blur(blur_width=1.0, blur_height=1.0, sample_num_x=4, sample_num_y=4):
"""
Blur filter.
distance = distance apart each sample is
percentage = amount of blurring to apply
sample_num_x = number of samples to apply on the X axis
sample_num_y = number of samples to apply on the Y axis
Author: SolarLune
"""
return ("""
// Name: Simple 16-Sample (Box?) Blur Effect
// Author: SolarLune
// Date Updated: 6/6/11
uniform sampler2D bgl_RenderedTexture;
void main(void)
{
float blur_width = 0.002 * """ + str(blur_width) + """;
float blur_height = 0.002 * """ + str(blur_height) + """;
int sample_num_x = """ + str(sample_num_x) + """;
int sample_num_y = """ + str(sample_num_y) + """;
vec4 color;
for (int i = -sample_num_x; i < sample_num_x; i++)
{
for (int j = -sample_num_y; j < sample_num_y; j++)
{
color += texture2D(bgl_RenderedTexture, vec2(gl_TexCoord[0].st) + vec2(i * blur_width, j * blur_height));
}
}
gl_FragColor = color / (sample_num_x*sample_num_y*4);
}
""")
|
def generate_winner_list(winners):
""" Takes a list of winners, and combines them into a string. """
return ', '.join([winner.username_raw for winner in winners])
|
def _string_hash(s):
"""String hash (djb2) with consistency between py2/py3 and persistency between runs (unlike `hash`)."""
h = 5381
for c in s:
h = h * 33 + ord(c)
return h
|
def subdivide(events: list, splits: list) -> list:
"""Split the given events to match the keys in the splits list"""
formatted_events = []
for i in range(len(splits)-1):
formatted_event = {}
formatted_event["temporalRange"] = [splits[i], splits[i + 1]]
# Get the events of the enclosing event (it must always be one)
for event in events:
tr = event["temporalRange"]
if tr[0] <= splits[i] and tr[1] >= splits[i+1]:
formatted_event["events"] = event["events"]
# Adding the formatted event to the return result
formatted_events.append(formatted_event)
return formatted_events
|
def calculate_alpha(alpha=0.5):
# type: (float)-> float
""" Calculates alpha for train_model (py27) """
alpha += 0.1
return alpha
|
def record_sets_fetcher(record):
"""Fetch a record's sets."""
return record.get('_oai', {}).get('sets', [])
|
def is_pon(item):
"""
:param item: array of tile 34 indices
:return: boolean
"""
if len(item) != 3:
return False
return item[0] == item[1] == item[2]
|
def getFromLongestMatchingValue(
objectList,
listOfValues,
keyToMatch,
caseInsensitive=True
):
"""
Function to take a list of objects, a list of values and a key to match and
return the object with the longest matching value for that key or None if
no value matches for that that key.
:param objectList: The list of objects.
:type objectList: list of dicts
:param listOfValues: A list of values to try to match
:type listOfValues: list of string values
:param keyToMatch: key in which to match the value
:type keyToMatch: str
:param caseInsensitive: Case insensitive value matching?
:type caseInsensitive: boolean
:returns: dict with longest matching value for specified key in object
"""
objectList = objectList.copy()
if caseInsensitive:
listOfValues = [k.lower() for k in listOfValues]
value = max(
[str(k) for k in listOfValues],
key=len
) if len(listOfValues) else None
if value and value in listOfValues:
listOfValues.remove(value)
for object in sorted(
objectList,
key=lambda i: len(i.get(keyToMatch, "")),
reverse=True
):
if (
object.get(keyToMatch, '').lower(
) if caseInsensitive else object.get(keyToMatch, '')
)==value:
return(object)
if len(listOfValues)>=1:
return(getFromLongestMatchingValue(
objectList,
listOfValues,
keyToMatch,
caseInsensitive
))
for object in sorted(
objectList,
key=lambda i: len(i.get(keyToMatch, "")),
reverse=False
):
generic = object.get(keyToMatch, '').lower(
) if caseInsensitive else object.get(keyToMatch, '')
generic = generic.split('-')[0] if '-' in generic else generic
if generic==value:
return(object)
return({})
|
def normal_log_deriv(x, mu, sig):
"""
Derivative of the log of the normal distribution with respect to x
:param x: value to be tested
:param mu: mean
:param sig: variance
:return: d/dx
"""
return (-2.0 * x + mu) / (2 * sig ** 2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.