content
stringlengths
42
6.51k
def fill_bin_content(ax, sens, energy_bin, gb, tb): """ Parameters -------- Returns -------- """ for i in range(0,gb): for j in range(0,tb): theta2 = 0.005+0.005/2+((0.05-0.005)/tb)*j gammaness = 0.1/2+(1/gb)*i text = ax.text(theta2, gammaness, "%.2f %%" % sens[energy_bin][i][j], ha="center", va="center", color="w", size=8) return ax
def int_or_none(val): """Return an int if we can, otherwise None""" try: return int(val) except (ValueError, TypeError): return None
def get_akey(d): """Get a key from a given dictionary. It returns the first key in d.keys(). Parameters ---------- d : dict Dictionary of objects. Returns ------- obj : object First item of iter(d.keys()). """ return next(iter(d.keys()))
def augment_with_ancient_samples(g, sampled_demes, sample_times): """ Returns a demography object and new sampled demes where we add a branch event for the new sampled deme that is frozen. New sampled, frozen demes are labeled "{deme}_sampled_{sample_time}". Note that we cannot have multiple ancient sampling events at the same time for the same deme (for additional samples at the same time, increase the sample size). """ frozen_demes = [] for ii, (sd, st) in enumerate(zip(sampled_demes, sample_times)): if st > 0: sd_frozen = sd + f"_sampled_{st}" frozen_demes.append(sd_frozen) sampled_demes[ii] = sd_frozen g.deme(id=sd_frozen, start_time=st, end_time=0, initial_size=1) g.branch(parent=sd, child=sd_frozen, time=st) return g, sampled_demes, frozen_demes
def build_path(options): """ Build the URI path needed to query the DNSDB API :param options: Dictionary :return: string """ if options["name"]: if options["inverse"]: path = "/lookup/rdata/name/{}/{}".format(options["name"], options["type"]) return path else: path = "/lookup/rrset/name/{}/{}".format(options["name"], options["type"]) if options["bailiwick"]: path += "/{}".format(options["bailiwick"]) return path return path elif options["ip"]: options["ip"] = options["ip"].replace("/", ",") path = "/lookup/rdata/ip/{}".format(options["ip"]) return path elif options["hex"]: path = "/lookup/rdata/raw/{}".format(options["hex"]) return path else: raise LookupError("name, ip, or hex was not specified")
def check_uniqueness_in_rows(board: list): """ Check buildings of unique height in each row. Return True if buildings in a row have unique length, False otherwise. >>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*543215', \ '*35214*', '*41532*', '*2*1***']) True >>> check_uniqueness_in_rows(['***21**', '452453*', '423145*', '*543215', \ '*35214*', '*41532*', '*2*1***']) False >>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*553215', \ '*35214*', '*41532*', '*2*1***']) False """ for row in board[1:-1]: row = list(row[1:-1]) while '*' in row: row.remove('*') if len(set(row)) != len(row): return False return True
def to_json_type(v): """"Convert string value to proper JSON type. """ try: if v.lower() in ("false", "true"): v = (True if v.lower() == "true" else False) elif v.isdigit(): v = int(v) elif v.replace(".", "").isdigit(): v = float(v) except AttributeError: raise ValueError("Conversion to JSON failed for: %s" % v) return v
def _parse_domain_specific_input(input_str): """ "input1, val1; input2, val2;" --> [{"name": "input1", "value": "val1"}, ...] Semicolon is delimiter between param items List 'values' be passed as extra comma separated values. first item will be key, and the rest will be the list :param input_str: "input1, val1; input2, val2; input 3, val3" :return: """ def _get_comma_separated_params(key_value_pair_str): """ parse, clean, and add to request object :param str key_value_pair_str: expected 'input1, val1' :return: """ param_items = key_value_pair_str.split(",", 1) param_items = [s.strip() for s in param_items] return {"name": param_items[0], "value": ",".join(param_items[1:])} params_list = input_str.split(";") # the conditional is to account for trailing semicolon which results in empty item from split params_list = [_get_comma_separated_params(item) for item in params_list if item] return params_list
def to_db_field_type(driver, dtype): """ This method converts the dtype to a field type that the CREATE TABLE statement accepts. Args: driver (str): the DBMS driver type. dtype (str): the data type. Returns: A field type that the CREATE TABLE statement accepts. """ if dtype in ["VARCHAR", "CHAR"]: if driver == "mysql": return dtype + "(255)" else: return "STRING" else: return dtype
def super_make_dirs(path, mode): """ Make directories recursively with specific permissions :param path: path to be created :param mode: permissions for the directory :return: """ import os if not path or os.path.exists(path): return [] (head, tail) = os.path.split(path) res = super_make_dirs(head, mode) try: os.mkdir(path, mode=mode) except FileExistsError: # if file exists already, pass it pass res += [path] return res
def expand_box(box, img_shape, scale=None, padding=None): """Expand roi box Parameters ---------- box : list [x, y, w, h] order. img_shape : list [width, height] scale : float, optional Expand roi by scale, by default None padding : int, optional Expand roi by padding, by default None Returns ------- expanded roi: list [x, y, w, h] order. """ x, y, w, h = box wmax, hmax = img_shape if scale is not None: xo = max([x - (scale - 1) * w / 2, 0]) yo = max([y - (scale - 1) * h / 2, 0]) wo = w * scale ho = h * scale elif padding is not None: xo = max(x - padding, 0) yo = max(y - padding, 0) wo = w + padding * 2 ho = h + padding * 2 else: xo, yo, wo, ho = x, y, w, h if xo + wo >= wmax: wo = wmax - xo - 1 if yo + ho >= hmax: ho = hmax - yo - 1 return [int(xo), int(yo), int(wo), int(ho)]
def get_insert_string(row): """ Build SQL string for fieldnames and values. Parameter 'row' is a dictionary, so we must keep key/value combinations together when constructing the string. """ fields = "(" values = " VALUES(" for key, val in row.items(): fields = fields + key + ", " if type(val) == int: val = str(val) values = values + "'" + val + "'" + ", " fields = fields[:-2] + ")" values = values[:-2] + ")" return fields + values
def genLabn(n,s): """ Generate nth string label from a char set s """ l = len(s) m = n g ="" go = True while go: c= m % l g = s[c] + g m=m//l go = m > 0 m-= 1 return g
def counting_sort(A, unused_0=0, unused_1=0): """ Unlike the book, we don't require the max value be passed in and just find it using "max". We also just create the output array B locally instead of passing it in. """ k = max(A) + 1 B = [0 for i in range(len(A))] C = [0 for i in range(k)] for j in range(len(A)): C[A[j]] = C[A[j]] + 1 for i in range(1, k): C[i] = C[i] + C[i - 1] for j in reversed(range(len(A))): # Subtract 1 because we go from [0, length - 1] not [1, length] B[C[A[j]] - 1] = A[j] C[A[j]] = C[A[j]] - 1 return B
def isMetaLogName(filename): """Accept also incomplete name to zip file only""" return 'meta_' in filename or filename.endswith('.zip')
def _gen_eos_args(state_i, eos_pars): """ Generate a dict that we can pass to EosInterface """ pars = dict( list(state_i.items()) + list(eos_pars.items())) for key in ['u_s', 'u_2']: if key in pars: del pars[key] return pars
def hex_to_dec(value): """ 'ff' -> 255 ; 'af fe' -> [175, 254] ; ('af', 'fe) -> [175, 254] """ if type(value) in (list, tuple): return [hex_to_dec(item) for item in value] else: value = value.split(' ') if len(value) == 1: return int(str(value[0]), 16) else: return hex_to_dec(value)
def shuffle_datasets(dataset_dict, dataset_names, idx1, idx2): """ Shuffles two elements, indicated by `idx1` and `idx2`, between two datasets in `dataset_dict`. """ for name in dataset_names: dataset_dict[name][idx1], dataset_dict[name][idx2] = \ dataset_dict[name][idx2], dataset_dict[name][idx1] return dataset_dict
def pythagorean_distance_equation(path1, path2): """Pythagorean Distance Equation. Function for counting distance, derived from the Pythagorean theorem. """ # point path dot X1 dotX1 = path1[0] # point path dot X2 dotX2 = path2[0] # point path dot Y1 dotY1 = path1[1] # point path dot Y2 dotY2 = path2[1] # result distance --> revise result_distance = ((((dotX2-dotX1)**2)+((dotY2-dotY1)**2))**0.5) # return result return result_distance
def br(text): """ {{ text | br }} Adds html '<br>' to all '\n' linebreaks. :param text: input text to modify :type text: str :return: modified text :rtype: str """ return text.replace("\n", "<br>\n")
def find(pattern, path, directoriesOnly = False): """Find all files or directories that match a pattern""" import os, fnmatch result = [] for root, dirs, files in os.walk(path): if directoriesOnly: for name in dirs: if fnmatch.fnmatch(name, pattern): result.append(os.path.join(root, name)) else: for name in files: if fnmatch.fnmatch(name, pattern): result.append(os.path.join(root, name)) return result
def get_valid_classes_phrase(input_classes): """Returns a string phrase describing what types are allowed """ all_classes = list(input_classes) all_classes = sorted(all_classes, key=lambda cls: cls.__name__) all_class_names = [cls.__name__ for cls in all_classes] if len(all_class_names) == 1: return 'is {0}'.format(all_class_names[0]) return "is one of [{0}]".format(", ".join(all_class_names))
def join(include, exclude): """ Removes all items within list of excluded files from list of included files. Args: include: list containing directories of all files matching --add=GLOB. exclude: list containing directories of all files matching --rm=GLOB. Returns: include: a final list with all files to which the header will be prepended. """ for excludefile in exclude: if excludefile in include: include.remove(excludefile) return include
def is_script(root, full_file_name): """ Function decides if current file a script according to its root and full name :param root: path to the file :param full_file_name: name of file with extension (if present) :return: boolean value, if file is a script """ is_script_result = False if '.' in full_file_name: file_name, extension = full_file_name.rsplit('.', 1) if extension in ["xlsx", "xls"] and file_name.startswith("RobotScenario_"): is_script_result = True return is_script_result
def add_binary_builtin(*args: str) -> str: """ implementation using pythons built in functions """ integer_sum = sum([int(x, 2) for x in args]) return bin(integer_sum)[2:]
def const_float(value): """Create an expression representing the given floating point value.""" return ['constant', 'float', ['{0:.6f}'.format(value)]]
def draft_window_position(m) -> str: """ One of the named positions you can move the window to """ return "".join(m)
def _get_change_list(state_name, property_name, new_value): """Generates a change list for a single state change.""" return [{ 'cmd': 'edit_state_property', 'state_name': state_name, 'property_name': property_name, 'new_value': new_value }]
def longest_initial_palindrome(s: str) -> str: """Returns the longest palindrome starting at the beginning of s""" # The first two while loops stip of leading and trailing characters equal to s[0]. # Without this step, the method fails for, e.g. # 'abaa', 'aaaabaaaaaaaaaa', 'aaabbcbbaaaaa', # where the number of trailing a's is greater than the number of leading a's. # while start < end and s[start] == s[0] and s[end] == s[0]: # start += 1 # end -= 1 # while start < end and s[end] == s[0]: # end -= 1 # Walk backwards through string: # 1. If current last letter equals the beginning letter, # we might have a palindrome, so advance the start # pointer to keep checking. # 2. If start and end don't match, the current end letter # can't be in the longest palindrome, so reset start to 0. # 3. Decrement the end pointer in either case to continue # walking backwards through string. # 4. We've found a palindrome when the pointers meet # in the middle. start = 0 end = len(s)-1 end_of_initial_run = 0 while start < end: # While the first and last letters match, move start and end # pointers simultaneously towards middle. if s[start] == s[end]: start += 1 # If we haven't finished the initial run of the first letter, # advance the pointer in concert with start. Otherwise, leave # it pointing at the end of the initial run. if start-1 == end_of_initial_run and s[start] == s[0]: end_of_initial_run = start # If the first and last letters don't match, reset start... # UNLESS we just finished the initial run of the first letter # and the end letter is equal to the first. In this case, # we want to keep the start pointer in place and continue # moving the end pointer towards the middle until it reaches # a letter that is not equal to s[0]. elif not (start-1 == end_of_initial_run and s[end] == s[0]): start = end_of_initial_run = 0 # In all cases, we decrement the end pointer. end -= 1 # Suppose the longest palindrome has length k. # When the loop exits, either # (1) start == end at the middle index when k is odd, or # (2) start == end + 1 when k is even, with end being # the last index of the first half of the string, and # start being the first index of the second half. return s[:start+end+1]
def dict_get(dictionary, key): """ Return the value for the given key or '' if not found. """ return dictionary.get(key, '')
def has_null_coordinates(coord): """ """ for x, y in coord: if x == 0.0 and y == 0.0: return True return False
def part2(input_lines): """ Now, the jumps are even stranger: after each jump, if the offset was three or more, instead decrease it by 1. Otherwise, increase it by 1 as before. Using this rule with the above example, the process now takes 10 steps, and the offset values after finding the exit are left as 2 3 2 3 -1. How many steps does it now take to reach the exit? """ jumps = [int(line) for line in input_lines] def jump(at): to = (at + jumps[at]) if jumps[at] > 2: jumps[at] -= 1 else: jumps[at] += 1 return to pos = 0 count = 0 while pos >= 0 and pos < len(jumps): count += 1 pos = jump(pos) return count
def print_tuple(*_args, _separator=' '): """Prints all the elements in a tuple separated by a given separator, default ' '""" return _separator.join(map(lambda _x: str(_x), _args))
def fs_join(*args): """Like os.path.join, but never returns '\' chars""" from os.path import join return join(*args).replace('\\','/')
def get_namespace_from_type(opengraph_type: str) -> str: """ Returns namespace part from types like 'video.movie' :param opengraph_type: opengraph type :return: namespace """ return opengraph_type.split('.')[0]
def Varsigma(D_ox, D_red): """ See explantion in C_redox_Estep_semiinfinite() Kristian B. Knudsen ([email protected] || [email protected]) """ return (D_ox / D_red) ** (1 / 2)
def get_simple_pgs(pg): """ takes in 2 or 3 3x3 matricies returns one if all have 0's in positions x12 and x13 returns a 0 otherwise """ size = len(pg) if size == 2: condition12 = (pg[0][0][1] == 0) and (pg[1][0][1] == 0) condition13 = (pg[0][0][2] == 0) and (pg[1][0][2] == 0) if condition12 and condition13: return 1 if size == 3: condition12 = (pg[0][0][1] == 0) and (pg[1][0][1] == 0) and (pg[2][0][1] == 0) condition13 = (pg[0][0][2] == 0) and (pg[1][0][2] == 0) and (pg[2][0][2] == 0) if condition12 and condition13: return 1 return 0
def is_between_inclusive(x, begin, end): """Returns whether x is (in turn order) between begin (inclusive) and end (inclusive). This function assumes that x, begin, end are smaller than the number of players (and at least 0). If begin == end, this is only true if x == begin == end.""" return begin <= x <= end or end < begin <= x or x <= end < begin
def remove_top_item(values, count): """ Remove the top item from a heap with count items and restore its heap property. """ # Save the top item to return later. result = values[0] # Move the last item to the root. values[0] = values[count - 1] # Restore the heap property. index = 0 while True: # Find the child indices. child1 = 2 * index + 1 child2 = 2 * index + 2 # If a child index is off the end of the tree, # use the parent's index. if child1 >= count: child1 = index if child2 >= count: child2 = index # If the heap property is satisfied, we're done. if (values[index] >= values[child1]) and \ (values[index] >= values[child2]): break # Get the index of the child with the larger value. if values[child1] > values[child2]: swap_child = child1 else: swap_child = child2 # Swap with the larger child. values[index], values[swap_child] = values[swap_child], values[index] # Move to the child node. index = swap_child # Return the value we removed from the root. return result
def check_bipartite(adj_list): """ Determine if the given graph is bipartite. Time complexity is O(|E|) Space complexity is O(|V|) """ vertices = len(adj_list) # Divide vertexes in the graph into set_type 0 and 1 # Initialize all set_types as -1 set_type = [-1 for v in range(vertices)] set_type[0] = 0 queue = [0] while queue: current = queue.pop(0) # If there is a self-loop, it cannot be bipartite if adj_list[current][current]: return False for adjacent in range(vertices): if adj_list[current][adjacent]: if set_type[adjacent] == set_type[current]: return False if set_type[adjacent] == -1: # set type of u opposite of v set_type[adjacent] = 1 - set_type[current] queue.append(adjacent) return True
def italicize(text): """Formats markup text into italics.""" return "*" + str(text) + "*"
def homophones(a, b, phonetic): """Checks if words two can be pronounced the same way. If either word is not in the pronouncing dictionary, return False a, b: strings phonetic: map from words to pronunciation codes """ if a not in phonetic or b not in phonetic: return False return phonetic[a] == phonetic[b]
def remove_parenthesis_break(words_list): """Doc.""" new_words_list = [] parenthesis_num = 0 new_words = [] is_line_end = True for words in words_list: if not words: continue new_words += words if not words[-1].startswith('//'): for word in words: if word == '(': parenthesis_num += 1 elif word == ')': parenthesis_num -= 1 if parenthesis_num <= 0: is_line_end = True else: is_line_end = False else: is_line_end = True if is_line_end: new_words_list.append(new_words) new_words = [] return new_words_list
def hierarchy_reward(agent, world): """ reward to encourage emergence of agent hierarchy """ rew = 0 return rew
def intersect_all(set1: set, set2: set, set3: set) -> set: """Return the intersection of all three of the given sets: set1, set2, and set3. >>> intersect_all({1, 2, 3}, {1, 2, 3, "test"}, {1, 2, 6, 7, "test"}) == {1, 2} True >>> intersect_all(set(), {1, 2, 3}, {1, 2, 3}) == set() True >>> intersect_all({"a", ""}, {1, 0}, {True, False}) == set() True >>> intersect_all({'a'}, {"a"}, {chr(97)}) == {'a'} True """ return set.intersection(set1, set2, set3)
def utf8_to_str(d): """ Many of the rogue variables are returned as UTF8 formatted int arrays. This function changes them from UTF8 to a string Args: ----- d (int array) : An integer array with each element equal to a character Ret: ---- d_str (str) : The string associated with input d """ return ''.join([str(s, encoding='UTF-8') for s in d])
def wrap(seq, bases=60): """ Print wrapped sequence. Args: seq (str): Nucleotide sequence bases (int): Number of bases to include on each line. """ count = 0 ret = '' for i in seq: if count >= bases: ret = ret + '\n' count = 0 ret = ret + i count += 1 return ret
def xshift(x, num): """Shift x position slightly.""" return x + num * (x * 0.1)
def _parse_none(arg, fn=None): """Parse arguments with support for conversion to None. Args: arg (str): Argument to potentially convert. fn (func): Function to apply to the argument if not converted to None. Returns: Any: Arguments that are "none" or "0" are converted to None; otherwise, returns the original value. """ if arg.lower() in ("none", "0"): return None return arg if fn is None else fn(arg)
def unique_routes(routes): """ Returns an list with unique routes for all nodes. This function achieves this by removing simetrical routes (A, B, C) == (C, B, A). Args: routes - a list of routes, as sequences of nodes Returns: An list with unique routes. """ # Iterate over all routes in reversed order for route in list(reversed(routes)): # If the reversed route (C, B, A) exists in the list if list(reversed(route)) in routes: # Remove it from the list routes.remove(route) return routes
def deferred_value(name, val): """ Safely get a value that may not exist. Raise LookupError on the name if the value doesn't exist. This is intended as a helper for getters. """ if val: return val raise LookupError(f'{name} not ready')
def asURN(epsg): """ convert EPSG code to OGC URN CRS ``urn:ogc:def:crs:epsg::<code>`` notation """ return "urn:ogc:def:crs:epsg::%d" % int(epsg)
def relative_abundance(coverage): """ cov = number of bases / length of genome relative abundance = [(cov) / sum(cov for all genomes)] * 100 """ relative = {} sums = [] for genome in coverage: for cov in coverage[genome]: sums.append(0) break for genome in coverage: index = 0 for cov in coverage[genome]: sums[index] += cov index += 1 for genome in coverage: index = 0 relative[genome] = [] for cov in coverage[genome]: if sums[index] == 0: relative[genome].append(0) else: relative[genome].append((cov / sums[index]) * float(100)) index += 1 return relative
def _find_best_source_match( source_intervals, target_intervals, i, j, intersection_threshold): """ Given that source interval i is the first source interval that intersects target interval j, return the index of the first source interval that matches target interval j maximally, or `None` if there is no such source interval. A source interval is said to *match* a target interval iff the ratio of the duration of the intersection of the intervals to the minimum of their durations is at least `_DURATION_THRESHOLD`. A source interval matches a target interval *maximally* iff the ratio is the maximum of such ratios over all matching source intervals. """ start_j, end_j = target_intervals[j] dur_j = end_j - start_j k = i best_k = None best_fraction = 0 source_interval_count = len(source_intervals) while k != source_interval_count: start_k, end_k = source_intervals[k] if start_k >= end_j: # source interval k follows target interval j break else: # source interval k intersects target interval j dur_k = end_k - start_k intersection_start = max(start_k, start_j) intersection_end = min(end_k, end_j) intersection_dur = intersection_end - intersection_start min_dur = min(dur_k, dur_j) fraction = intersection_dur / min_dur if fraction >= intersection_threshold and fraction > best_fraction: # source interval k is better match for target interval j # than any preceding source interval best_k = k best_fraction = fraction k += 1 return best_k
def matchSubdirLevel(absDir, correctRelDir): """ The two input directories are supposed to be to the same level. Find how many directory levels are given in correctRelDir, and return the same number of levels from the end of absDir. Also return the topDir, which is the components of absDir above the level of correctRelativeDir """ numLevels = len(correctRelDir.split('/')) absDirComponents = absDir.split('/') matchingSubdir = '/'.join(absDirComponents[1:][-numLevels:]) topDir = '/'.join(absDirComponents[:-numLevels]) return (matchingSubdir, topDir)
def filetoarray(filename): """ filetoarray """ try: with open(filename, "r") as stream: return stream.readlines() except: return []
def conversion_helper(val, conversion): """Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure.""" if not isinstance(val, (tuple, list)): return conversion(val) rtn = [conversion_helper(v, conversion) for v in val] if isinstance(val, tuple): rtn = tuple(rtn) return rtn
def any_dict_value(d): """ Returns a value from the specified dictionary. This is intended for use with dictionaries of one element, so we do not attempt to make this a random choice. """ for v in d.values(): return v
def str2bool(value): """ Converts a string to a boolean value. """ if type(value) == bool: return value return value and value.lower() in ('yes', 'true', 't', '1', 'y')
def hill_langmuir_equation(l, kd): """Hill-Langmuir receptor occupation equation. Args: l (float, numpy.array): The input concentration of an ligand in concentration units. kd (float): The ligand-receptor dissociation constant (or its effective value) in concentration units. Bounds fot fitting: 0 <= kd <= inf Returns: float, numpy.array : The fractional receptor occupation for the given ligand concentration; unitless, range [0,1]. """ return l / (l + kd)
def is_number(s): """ Helper method to determine if this string can be cast to a float :param s: String to attempt to turn into a float :return: True or False """ try: float(s) return True except ValueError: return False
def e_d(val): """energy divisor""" if val == 0: return 10000 elif val == 1: return 1000 elif val == 2: return 100 elif val == 3: return 10 else: return None
def has_func(obj, fun): """check if a class has specified function: https://stackoverflow.com/a/5268474 Args: obj: the class to check fun: specified function to check Returns: A bool to indicate if obj has funtion "fun" """ check_fun = getattr(obj, fun, None) return callable(check_fun)
def create_board_comp(r, c): """very short now! - list compression""" return [[' ' for _ in range(c)] for _ in range(r)]
def titlecase(name): """Title-case body `name` if it looks safe to do so.""" return name if name.startswith(('1', 'C/', 'DSS-')) else name.title()
def _save_dict(obj): """Recursively save the dict.""" py_dict = type(obj)() for k, v in obj.items(): if isinstance(v, dict): py_dict[k] = _save_dict(v) elif hasattr(v, 'numpy'): py_dict[k] = getattr(v, 'numpy')() else: py_dict[k] = v return py_dict
def new_compliance(pos_a, pos_b, char, password): """ Count the characters of a password for compliance with the puzzle Args: pos_a: One of two positions the character must appear in pos_b: One of two positions the character must appear in char: The character password: The password Returns" test_result: Pass/fail bool of puzzle """ test_result = 0 if password[pos_a-1] is char or password[pos_b-1] is char: if password[pos_a-1] is not password[pos_b-1]: test_result = 1 return test_result
def my_acc(scoreList, rightOne, n=None): """Accuracy for Root Cause Analysis with multiple causes. Refined from the Acc metric in TBAC paper. """ node_rank = [_[0] for _ in scoreList] if n is None: n = len(scoreList) s = 0.0 for i in range(len(rightOne)): if rightOne[i] in node_rank: rank = node_rank.index(rightOne[i]) + 1 s += (n - max(0, rank - len(rightOne))) / n else: s += 0 s /= len(rightOne) return s
def map_rcs_to_JW(nh, nv, row, col, spin): """Mapping (row, column, spin-type) to Jordan-Wigner encoding. Args: nhoriz -- number of horizontal sites nvert -- number of vertical sites row -- row location of the qubit in the lattice col -- column location of the qubit in the lattice spin -- spin-type: up or down Returns: Number of Jordan-Wigner encoded qubit """ col_adjust = col if (row % 2 == 1): col_adjust = nh - 1 - col return row * nh + col_adjust + nh * nv * spin
def is_numeric(obj) -> bool: """ Test if obj is numeric :param obj: test object :return: is numeric """ try: obj + 0 return True except TypeError: return False
def newman_conway(num): """ Returns a list of the Newman Conway numbers for the given value. Time Complexity: O(n) Space Complexity: O(n) """ if num <= 0: raise ValueError("num cannot be less than 1") memo = [0] * (num+1) memo[0] = 0 memo[1] = 1 if num > 1: memo[2] = 1 x = 3 while x <= num: memo[x] = memo[memo[x - 1]] + memo[x - memo[x - 1]] x += 1 memo.pop(0) memo = [str(int) for int in memo] return " ".join(memo)
def copyto_emitter(target, source, env): """ changes the path of the source to be under the target (which are assumed to be directories. """ n_target = [] for t in target: n_target = n_target + [t.File( str( s ) ) for s in source] return (n_target, source)
def norm_int_dict(int_dict): """Normalizes values in the given dict with int values. Parameters ---------- int_dict : list A dict object mapping each key to an int value. Returns ------- dict A dict where each key is mapped to its relative part in the sum of all dict values. Example ------- >>> dict_obj = {'a': 3, 'b': 5, 'c': 2} >>> result = norm_int_dict(dict_obj) >>> print(sorted(result.items())) [('a', 0.3), ('b', 0.5), ('c', 0.2)] """ norm_dict = int_dict.copy() val_sum = sum(norm_dict.values()) for key in norm_dict: norm_dict[key] = norm_dict[key] / val_sum return norm_dict
def kdiff(str1,str2,k=10): """ Computes a kmer difference between two strings """ cnt=0.0 for i in range(len(str1)-k): cnt += 1 if str2.find(str1[i:i+k])<0 else 0 return (cnt/(len(str1)-k))
def set_field_value(value, default=None): """ Parameters ---------- value : str, int, float, object default :str, int, float, object, defualt None Returns ------- value :str, int, float, object """ try: return value except: return default
def footer(id1, id2 = None): """ Build SMT formula footer Args: id1 (str): ID of policy 1 in SMT formula id2 (str, optional): ID of policy 2 in SMT formula. Defaults to None. Returns: str: SMT footer """ smt = '(assert {}.allows)\n'.format(id1) if id2: smt += '(assert (or {0}.denies {0}.neutral))\n'.format(id2) smt += '(check-sat)\n' smt += '(get-model)\n' return smt
def paddedsize(w, l): """ PADDEDSIZE Computes padded sizes useful for FFT-based filtering. PQ = PADDEDSIZE(AB), where AB is a two-element size vector, computes the two-element size vector PQ = 2*AB. """ return (2*w, 2*l)
def add_stop(utt): """ Add full stops at the end of the utterances """ if utt[-1] not in '.,!:;?': utt = utt + ' .' return utt
def invert(port_array, placeholder): """ Transforms an array of ports into a dictionary, referenced by port number """ if port_array == placeholder: return port_array prev = {} for p in port_array: prev[p['port_no']] = p return prev
def callable (obj) : """Return whether the object is callable (i.e., some kind of function). Note that classes are callable, as are instances with a __call__() method. """ return hasattr (obj, "__call__")
def lambda_delete_question(sentences): """ lambda function in pandas to delete sentences in postings with question marks """ new_sentences = [] for sentence in sentences: if sentence.find(r'?') == -1: new_sentences.append(sentence) return new_sentences
def count(article): """Function to count words from title occuring inside the article body""" numberOfWords = 0 heading = article[0].split() for word in heading: if word in article[1]: numberOfWords += 1 return numberOfWords / len(heading)
def extract_ids(objects_or_ids): """Return a list of ids given either objects with ids or a list of ids.""" try: ids = [obj.id for obj in objects_or_ids] except: ids = objects_or_ids return ids
def concatenate_dirs(dir1, dir2): """ Appends dir2 to dir1. It is possible that either/both dir1 or/and dir2 is/are None """ result_dir = dir1 if dir1 is not None and len(dir1) > 0 else '' if dir2 is not None and len(dir2) > 0: if len(result_dir) == 0: result_dir = dir2 else: result_dir += '/' + dir2 return result_dir
def create_gitbom_doc_text(infiles, db): """ Create the gitBOM doc text contents :param infiles: the list of checksum for input files :param db: gitBOM DB with {file-hash => its gitBOM hash} mapping """ if not infiles: return '' lines = [] for ahash in infiles: line = "blob " + ahash if ahash in db: gitbom_hash = db[ahash] line += " bom " + gitbom_hash lines.append(line) lines.sort() return '\n'.join(lines) + '\n'
def remove_suffix(string: str, suffix: str) -> str: """ Remove suffix from string. Parameters ---------- string Given string to remove suffix from. suffix Suffix to remove. Raises ------ AssertionError If string doesn't end with given suffix. """ if string.endswith(suffix): string = string[: -len(suffix)] else: # pragma: nocover raise AssertionError(f"{string} doesn't end with suffix {suffix}") return string
def fillIfNeeded(A, B): """Fill A or B with zeros if not the same size""" A = list(A)[:] B = list(B)[:] if len(A) < len(B): A += [0]*(len(B)-len(A)) if len(B) < len(A): B += [0]*(len(A)-len(B)) return A, B
def mocking_case(text): """Create mocking case text""" value = "" prev = "" for character in text: value += character.upper() if prev.islower() else character.lower() prev = value[-1:] if character.isalpha() else prev return value
def extractIdFromResponse(requestName, htmlPage): """extractIdFromResponse. Args: requestName: htmlPage: """ psRequestIdPrefix = 'Request Name: &nbsp; ' pssServerIdPrefix = 'Submitted Request ID:&nbsp; ' errorText = 'Error:' pssServerId = -1 if (htmlPage.find(errorText) > 0 or htmlPage.find(requestName) == -1 or htmlPage.find(psRequestIdPrefix) == -1 or htmlPage.find(pssServerIdPrefix) == -1): print("Cannot continue... Something went wrong.") else: # Extract a subset of the page that contains our request ID. # This is done first, to make doubly sure that the PSS server ID # we pick up is the one related to our request. truncatedPage = htmlPage[htmlPage.find(psRequestIdPrefix)-len(pssServerIdPrefix)-10:htmlPage.find(psRequestIdPrefix)+len(psRequestIdPrefix)+len(requestName)] # Now extract the PSS server ID idString = truncatedPage[truncatedPage.find(pssServerIdPrefix)+len(pssServerIdPrefix):truncatedPage.find(psRequestIdPrefix)] try: pssServerId = int(idString) except ValueError: print("Error: Cannot convert string: '%s' to an integer." % idString) return pssServerId
def fitness(bit_string): """ Toy fitness function, returns 2^x, where x is the number of 'one' bits in the bit string. """ return 2**sum(bit_string)
def red(s): """ Decorates the specified string with character codes for red text. :param s: Text to decorate with color codes. :return: String containing original text wrapped with color character codes. """ return '\033[91m{}\033[0m'.format(s)
def dict_to_table(_dict): """ Examples -------- { "head":["PC","Mac","Windows","Ubuntu"], "body":[ ["X1","x","o","o"], ["Surface","x","o","o"], ["MacBook Air","o","o","o"] ] } """ dict_table = "" head = _dict["head"] dict_table += "|"+"|".join([str(n) for n in head])+"|\n" dict_table += "|"+"".join(["-|" for i in range(len(head))])+"\n" body = _dict["body"] for row in body: dict_table += "|"+"|".join([str(n) for n in row])+"|\n" return dict_table
def create_blank_world(size_x=20, size_y=20, character=". "): """ Creates a blank world out of size * size dots """ world = [] for line in range(size_y): world.append([]) world[line] = size_x * [character] return world
def spatpix_frame_to_ref(spatpix, frame='dms', subarray='SUBSTRIP256', oversample=1): """Convert spatpix from an arbitrary frame to nat coordinates in SUBSTRIP256. :param spatpix: spatpix coordinates in an arbitrary coordinate frame. :param frame: the input coordinate frame. :param subarray: the input coordinate subarray. :param oversample: the oversampling factor of the input coordinates. :type spatpix: array[float] :type frame: str :type subarray: str :type oversample: int :returns: spatpix_ref - the input coordinates transformed to nat coordinate frame and SUBSTRIP256 subarray. :rtype: array[float] """ if (frame == 'nat') & (subarray == 'SUBSTRIP256'): spatpix_ref = spatpix elif (frame == 'dms') & (subarray == 'SUBSTRIP256'): spatpix_ref = 255*oversample - spatpix elif (frame == 'sim') & (subarray == 'SUBSTRIP256'): spatpix_ref = spatpix elif (frame == 'nat') & (subarray == 'SUBSTRIP96'): spatpix_ref = spatpix + 150*oversample elif (frame == 'dms') & (subarray == 'SUBSTRIP96'): spatpix_ref = 245*oversample - spatpix elif (frame == 'sim') & (subarray == 'SUBSTRIP96'): spatpix_ref = spatpix + 150*oversample else: raise ValueError('Unknown coordinate frame or subarray: {} {}'.format(frame, subarray)) return spatpix_ref
def set_rcParams(font_size=10, line_width=1): """Set rcParam of matplotlib.""" parameters = { # Use LaTeX to write all text "text.usetex": True, "font.family": "serif", # Use 10pt font in plots, to match 10pt font in document "axes.labelsize": font_size, "font.size": font_size, # Make the legend/label fonts a little smaller "legend.fontsize": font_size, "xtick.major.width": 0.5, "xtick.labelsize": font_size - 2, "ytick.major.width": 0.5, "ytick.labelsize": font_size - 2, # Line properties "lines.linewidth": line_width, "axes.linewidth": 0.5, "grid.linewidth": 0.25, } return parameters
def clamp(low, high, val): """ Clamps an integral type to some interval. Parameters ---------- low: float lower bound high: float upper bound val: float value to clamp Returns ------- float clamped value """ return low if val < low else (high if val > high else val)
def _run_function(function, *args): """ Since eval() function only evaluates functions written in the source code, _run_function is at eval() scope and makes it possible to run FunctionVariables functions. """ return function(*args)
def map_range(x, in_min, in_max, out_min, out_max): """Map value from one range to another.""" return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def create_annotation_info(id, image_id, category_id, bbox): """ Create an annotation to the image id """ return { "id": id, "image_id": image_id, "category_id": category_id, "bbox": bbox, "iscrowd" : 0 }
def bak_base_name(bak_name): """ Takes a backup name and returns the base name. Example: Backup name = "test_bak_to_git.py.20211201_134317.bak" Base name = "test_bak_to_git.py" """ return bak_name.rsplit(".", 2)[0]