content
stringlengths
42
6.51k
def toUnicode(input): """ Converts a series of bytes to unicode (UTF-16) bytes Arguments : input - the source bytes Return: the unicode expanded version of the input """ unicodebytes = "" # try/except, just in case .encode bails out try: unicodebytes = input.encode('UTF-16LE') except: inputlst = list(input) for inputchar in inputlst: unicodebytes += inputchar + '\x00' return unicodebytes
def generate_html(fields, pidx, appdata): """Fun to be had here!""" html = "" for arg in appdata['arguments']: html += "type: %s" % (arg['type'], ) return html
def check_blank_before_after_class(class_docstring, context, is_script): """Class docstring should have 1 blank line around them. Insert a blank line before and after all docstrings (one-line or multi-line) that document a class -- generally speaking, the class's methods are separated from each other by a single blank line, and the docstring needs to be offset from the first method by a blank line; for symmetry, put a blank line between the class header and the docstring. """ if not class_docstring: return before, after = context.split(class_docstring) before_blanks = [not line.strip() for line in before.split('\n')] after_blanks = [not line.strip() for line in after.split('\n')] if before_blanks[-3:] != [False, True, True]: return True if not all(after_blanks) and after_blanks[:3] != [True, True, False]: return True
def get_odd_predecessor(odd_int, index, k=3): """ This method calculates the odd predecessor for a certain odd number in a Collatz graph. For every odd number there are n predecessors. The variable index [0..n] specifies which predecessor is returned. The method is based on a deterministic algorithm. It currently works only for the k-factors (1,3,5,7,9). :param odd_int: The node for which the predecessor is calculated. :param index: The index of the predecessor as int [0..n]. :param k: The factor by which odd numbers are multiplied in the sequence (default is 3). :return: The predecessor or None if no predecessor exists. """ # Validate input parameters assert odd_int > 0, "Value > 0 expected" mod_result = odd_int % 2 assert mod_result == 1, "Not an odd number" # Return None if no predecessors exist if k > 1 and odd_int % k == 0: return None result = None if k == 1: result = (odd_int * 2 ** ((k - odd_int % k) + index) - 1) // k elif k == 3: result = (odd_int * 2 ** ((k - odd_int % k) + (2 * index)) - 1) // k elif k == 5: power_dict = {0: None, 3: 1, 4: 2, 2: 3, 1: 4} power = power_dict[odd_int % 5] if power: result = (odd_int * 2 ** (power + (4 * index)) - 1) // k elif k == 7: power_dict = {0: None, 1: 3, 2: 2, 3: None, 4: 1, 5: None, 6: None} power = power_dict[odd_int % 7] if power: result = (odd_int * 2 ** (power + (3 * index)) - 1) // k elif k == 9: power_dict = {0: None, 1: 6, 2: 5, 3: None, 4: 4, 5: 1, 6: None, 7: 2, 8: 3} power = power_dict[odd_int % 9] if power: result = (odd_int * 2 ** (power + (6 * index)) - 1) // k else: raise TypeError("Parameter k not in (1,3,5,7,9)") return result
def get_hemisphere(lat): """For a given latitude, return N or S.""" if lat < 0.0: return 'S' else: return 'N'
def make_str(ppddl_tree, level=0): """ Creates a string representation of a PPDDL tree. """ if not ppddl_tree: return '' # Make sure the resulting string has the correct indentation. indent = (' ' * (2*level)) ppddlstr = indent + '(' indent += ' ' # Appending subelements of the PPDDL tree. for i, element in enumerate(ppddl_tree): if isinstance(element, list): ppddlstr += '\n' + make_str(element, level + 1) else: if element.startswith(':') and i != 0: ppddlstr += '\n' + indent ppddlstr += element if i != len(ppddl_tree) - 1: ppddlstr += ' ' if element == ':parameters' and ppddl_tree[i + 1] == []: ppddlstr += '()' ppddlstr += ') ' return ppddlstr
def flip_ctrlpts2d(ctrlpts2d, size_u=0, size_v=0): """ Flips a list of surface 2-D control points in *[u][v]* order. The resulting control points list will be in *[v][u]* order. :param ctrlpts2d: 2-D control points :type ctrlpts2d: list, tuple :param size_u: size in U-direction (row length) :type size_u: int :param size_v: size in V-direction (column length) :type size_v: int :return: flipped 2-D control points :rtype: list """ if size_u <= 0 or size_v <= 0: # Detect array shapes size_u = len(ctrlpts2d) size_v = len(ctrlpts2d[0]) new_ctrlpts2d = [[[] for _ in range(size_u)] for _ in range(size_v)] for i in range(size_v): for j in range(size_u): new_ctrlpts2d[i][j] = [float(c) for c in ctrlpts2d[j][i]] return new_ctrlpts2d
def get_hgvs(gene_obj): """Analyse gene object Return: (hgvs_nucleotide, hgvs_protein)""" hgvs_nucleotide = "-" hgvs_protein = "" transcripts_list = gene_obj.get("transcripts") for transcript_obj in transcripts_list: if transcript_obj.get("is_canonical") is True: hgvs_nucleotide = str(transcript_obj.get("coding_sequence_name")) hgvs_protein = str(transcript_obj.get("protein_sequence_name")) return (hgvs_nucleotide, hgvs_protein)
def to_modify_quota(input_quota, array_quota, array_include_overhead): """ :param input_quota: Threshold limits dictionary passed by the user. :param array_quota: Threshold limits dictionary got from the Isilon Array :param array_include_overhead: Whether Quota Include Overheads or not. :return: True if the quota is to be modified else returns False. """ if input_quota['include_overheads'] is not None \ and input_quota['include_overheads'] != array_include_overhead: return True for limit in input_quota: if limit in array_quota and input_quota[limit] is not None and\ input_quota[limit] != array_quota[limit]: return True return False
def generate_clustered_data(stories, clusters): """ clusters is a dict with an index as key and values as story IDs as stored in MongoDB clustered is a list of dictionaries. Each dictionary is a cluster of stories. Internal dicts have as keys the storyid and as values the actual story content. An additional key is the cluster ID. """ clustered = [] for x in clusters.keys(): mini = {'clust_id': x} for sid in clusters[x]: mini[sid] = stories[sid] clustered.append(mini) return clustered
def get_transcripts_from_tree(chrom, start, stop, cds_tree): """Uses cds tree to btain transcript IDs from genomic coordinates chrom: (String) Specify chrom to use for transcript search. start: (Int) Specify start position to use for transcript search. stop: (Int) Specify ending position to use for transcript search cds_tree: (Dict) dictionary of IntervalTree() objects containing transcript IDs as function of exon coords indexed by chr/contig ID. Return value: (set) a set of matching unique transcript IDs. """ transcript_ids = set() # Interval coordinates are inclusive of start, exclusive of stop if chrom not in cds_tree: return [] cds = list(cds_tree[chrom].overlap(start, stop)) for cd in cds: transcript_ids.add(cd.data) return list(transcript_ids)
def average(numbers): """ Return the average (arithmetic mean) of a sequence of numbers. """ return sum(numbers) / float(len(numbers))
def remove_error_clusters(all_clusters, node_paired_unpaired, case): """ The salient features of the function remove_error_clusters are: 1. Arguments: "all_clusters" = list of all clusters for the given case under study "node_paired_unpaired" = complete set of data on the paired instants, unpaired linear and unpaired nonlinear instants of all nodes under all cases "case" = the case no. under study 2. Returns: "edit_clusters" = list of clusters obtained after deletion 3. Use: We are not interested in clusters which have maximum activation times lying beyond 4950 ms, given that the range of activation of all nodes can be assumed as 0-5000ms. """ edit_clusters=[] for i in all_clusters: flag=0 for j in i: a=node_paired_unpaired[case][j]['unpaired linear'] a.extend(node_paired_unpaired[case][j]['unpaired non-linear']) if a!=[]: if max(a)>4950: flag=1 break if flag==0: edit_clusters.append(i) return edit_clusters
def got_all_step_funcs(event, debug=False): """ Check if all step function arns found. Args: event (dict): Step functions arns are set as they are found. debug (bool): Print debug messages. Returns: (bool): True if all step functions found. """ if event['delete-sfn-arn'] is None: if debug: print('No delete-sfn') return False if event['query-deletes-sfn-arn'] is None: if debug: print('No query-deletes-sfn') return False if event["delete-exp-sfn-arn"] is None: if debug: print('No delete-exp-sfn') return False if event["delete-coord-frame-sfn-arn"] is None: if debug: print('No delete-coord-frame-sfn') return False if event["delete-coll-sfn-arn"] is None: if debug: print('No delete-coll-sfn') return False return True
def eval_metric(results, params): """BLEU Evaluate """ crr_cnt, total_cnt = 0, 0 for result in results: total_cnt += 1 p = result['pred_answer'] g = result['gold_answer'] if p == g: crr_cnt += 1 return crr_cnt * 100. / total_cnt
def get_sort_params(value, default_key=None): """Parse a string into a list of sort_keys and a list of sort_dirs. :param value: A string that contains the sorting parameters. :param default_key: An optional key set as the default sorting key when no sorting option value is specified. :return: A list of sorting keys and a list of sorting dirs. """ keys = [] dirs = [] if value: for s in value.split(','): s_key, _s, s_dir = s.partition(':') keys.append(s_key) s_dir = s_dir or 'asc' nulls_appendix = 'nullsfirst' if s_dir == 'asc' else 'nullslast' sort_dir = '-'.join([s_dir, nulls_appendix]) dirs.append(sort_dir) elif default_key: # use default if specified return [default_key, 'id'], ['asc-nullsfirst', 'asc'] if 'id' not in keys: keys.append('id') dirs.append('asc') return keys, dirs
def _reverse_map(dictionary) -> dict: """ Inverts a map of single or iterable value types. { a: b, c: d, } will become { b: a, d: c, } { a: [b, c], d: [e, f], } will become { b: a, c: a, e: d, f: d, } etc... """ result = {} for k in dictionary.keys(): v = dictionary[k] if isinstance(v, (list, set, frozenset, tuple)): for _v in v: result[_v] = k else: result[v] = k return result
def strip_library(name): """ >>> strip_library("fuchsia.device/MAX_DEVICE_NAME_LEN") 'MAX_DEVICE_NAME_LEN' >>> strip_library("SomethingGreat") 'SomethingGreat' """ return name[name.rfind('/') + 1:]
def compute_all_relationships(scene_struct, eps=0.2): """ Computes relationships between all pairs of objects in the scene. Returns a dictionary mapping string relationship names to lists of lists of integers, where output[rel][i] gives a list of object indices that have the relationship rel with object i. For example if j is in output['left'][i] then object j is left of object i. """ all_relationships = {} for name, direction_vec in scene_struct['directions'].items(): if name == 'above' or name == 'below': continue all_relationships[name] = [] for i, obj1 in enumerate(scene_struct['objects']): coords1 = obj1['3d_coords'] related = set() for j, obj2 in enumerate(scene_struct['objects']): if obj1 == obj2: continue coords2 = obj2['3d_coords'] diff = [coords2[k] - coords1[k] for k in [0, 1, 2]] dot = sum(diff[k] * direction_vec[k] for k in [0, 1, 2]) if dot > eps: related.add(j) all_relationships[name].append(sorted(list(related))) return all_relationships
def _normalize(string): """Returns the canonical form of a color name. Removes non-ascii letters, then lowercases the whole thing. """ return ''.join(filter(str.isalpha, string)).lower()
def get_vlan_ranges(begin_range, end_range): """Expands vlan ranged from vlan pools""" vlans = [] for vlan in range(int(begin_range), int(end_range) + 1): vlans.append(str(vlan)) return vlans
def anySizeToBytes(size_string): """ Convert a string like '1 KB' to '1024' (bytes) """ # separate integer from unit try: size, unit = size_string.split() except Exception: try: size = size_string.strip() unit = ''.join([c for c in size if c.isalpha()]) if len(unit) > 0: size = size[:-len(unit)] except Exception: return -1 if len(size) == 0: return -1 size = float(size) if len(unit) == 0: return int(size) short_unit = unit.upper()[0] # convert units_dict = {'T': 40, 'G': 30, 'M': 20, 'K': 10} if short_unit in units_dict: size = size * 2**units_dict[short_unit] return int(size)
def remove_empty_columns(orig_cols): """Remove columns with <= 1 non-empty cells.""" cols = [] for col in orig_cols: non_empty = sum((bool(cell[1]) for cell in col), 0) if non_empty >= 2: cols.append(col) return cols
def safe_int_cast(val, default=0): """Safely casts a value to an int""" try: return int(val) except (ValueError, TypeError): return default
def stats(index_and_true_and_retrieved): """ Returns comma-separated list of accuracy stats Included are (relevant instances, retrieved instances, overlap, precision, recall, f-score) index_and_true_and_retrieved is composed of: index: used to track which sample is associated with the stats true: set of true instances retrieved: set of retrieved indexes Return value: (index, tuple of stats) """ index, true, recovered = index_and_true_and_retrieved relevant = len(true) retrieved = len(recovered) overlap = len(true.intersection(recovered)) try: precision = float(overlap) / retrieved except ZeroDivisionError: precision = 1 try: recall = float(overlap) / relevant except ZeroDivisionError: recall = 1 try: fscore = 2 * precision * recall / (precision + recall) except ZeroDivisionError: fscore = 1 return (index, (relevant, retrieved, overlap, precision, recall, fscore))
def word_score(word, opt=None): """ Count up the score of a word. a=1, b=2, c=3 Args: word: the word to get the score of opt: if opt does not equal None z will be 1 and a will be 26 Returns: The score of the word Raises: KeyError: character is invalid """ if opt == None: arr = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10, 'k': 11, 'l': 12, 'm': 13, 'n': 14, 'o': 15, 'p': 16, 'q': 17, 'r': 18, 's': 19, 't': 20, 'u': 21, 'v': 22, 'w': 23, 'x': 24, 'y': 25, 'z': 26} score = 0 for i in list(word): score += arr[i] return score else: arr = {'a': 26, 'b': 25, 'c': 24, 'd': 23, 'e': 22, 'f': 21, 'g': 20, 'h': 19, 'i': 18, 'j': 17, 'k': 16, 'l': 15, 'm': 14, 'n': 13, 'o': 12, 'p': 11, 'q': 10, 'r': 9, 's': 8, 't': 7, 'u': 6, 'v': 5, 'w': 4, 'x': 3, 'y': 2, 'z': 1} score = 0 for i in list(word): score += arr[i] return score
def merge(dest, source): """In-place, recursive merge of two dictionaries.""" for key in source: if key in dest: if isinstance(dest[key], dict) and isinstance(source[key], dict): merge(dest[key], source[key]) continue dest[key] = source[key] return dest
def to_list(obj): """Convert an object to a list if it is not already one""" if not isinstance(obj, (list, tuple)): obj = [obj, ] return obj
def euclideanDistance( a, b ): """Assumes inputs are tuples.""" return ( ( a[0] - b[0] )**2 + ( a[1] - b[1] )**2 )**.5
def filter_batch(batch, i): """check whether sample i should be included""" return batch["passed_quality_filter"][i] is True
def allow(perm): """ This function acts as a predicate for allowed permutations. """ # leaves us with 1108800/39916800 permutations order = {j: i for i, j in enumerate([str(_) for _ in perm])} if order['NigiriCard("egg")'] > order['NigiriCard("salmon")']: return False if order['NigiriCard("salmon")'] > order['NigiriCard("squid")']: return False if order['MakiCard("2")'] > order['MakiCard("3")']: return False if order['MakiCard("1")'] > order['MakiCard("2")']: return False return True
def moveZerosToEnd(arr): """Moving zeros to the end using 2 pointers""" # set a pointer at the first 0 elem in the array zero = 0 # find the first 0 in the array while zero < len(arr) and arr[zero] != 0: zero += 1 # init a pointer for the next element (might be nonzero) nonzero = zero + 1 # move zeros to the back while zero < len(arr) and nonzero < len(arr): # no need to swap 0's with each other - just move nz right if arr[nonzero] == 0: nonzero += 1 # use the pointers to swap else: arr[zero], arr[nonzero] = arr[nonzero], arr[zero] # then move forward in the array zero += 1 nonzero += 1 return arr # time is linear, space is constant """ z nz z arr = [1, 10, 2, 8, 3, 6, 4, 5, 7, 0, 0, 0, 0, 0] z = 5 nz = 8 """
def are_equal(j1, j2): """Returns True if j1 and j2 are equal, False otherwise""" if j1 is None or j2 is None: return False sum_diffs_abs = sum(abs(a - b) for a, b in zip(j1, j2)) if sum_diffs_abs > 1e-3: return False return True
def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc
def ispart(ptha, pthb): """ the functions takes two paths a and b and retrns True if all a elements are part of b and in the same order """ j = 0 for elem in ptha: if elem in pthb: if pthb.index(elem) >= j: j = pthb.index(elem) else: return False else: return False return True
def simplifyinfixops(tree, targetnodes): """Flatten chained infix operations to reduce usage of Python stack >>> def f(tree): ... print prettyformat(simplifyinfixops(tree, ('or',)), ('symbol',)) >>> f(('or', ... ('or', ... ('symbol', '1'), ... ('symbol', '2')), ... ('symbol', '3'))) (or ('symbol', '1') ('symbol', '2') ('symbol', '3')) >>> f(('func', ... ('symbol', 'p1'), ... ('or', ... ('or', ... ('func', ... ('symbol', 'sort'), ... ('list', ... ('or', ... ('or', ... ('symbol', '1'), ... ('symbol', '2')), ... ('symbol', '3')), ... ('negate', ... ('symbol', 'rev')))), ... ('and', ... ('symbol', '4'), ... ('group', ... ('or', ... ('or', ... ('symbol', '5'), ... ('symbol', '6')), ... ('symbol', '7'))))), ... ('symbol', '8')))) (func ('symbol', 'p1') (or (func ('symbol', 'sort') (list (or ('symbol', '1') ('symbol', '2') ('symbol', '3')) (negate ('symbol', 'rev')))) (and ('symbol', '4') (group (or ('symbol', '5') ('symbol', '6') ('symbol', '7')))) ('symbol', '8'))) """ if not isinstance(tree, tuple): return tree op = tree[0] if op not in targetnodes: return (op,) + tuple(simplifyinfixops(x, targetnodes) for x in tree[1:]) # walk down left nodes taking each right node. no recursion to left nodes # because infix operators are left-associative, i.e. left tree is deep. # e.g. '1 + 2 + 3' -> (+ (+ 1 2) 3) -> (+ 1 2 3) simplified = [] x = tree while x[0] == op: l, r = x[1:] simplified.append(simplifyinfixops(r, targetnodes)) x = l simplified.append(simplifyinfixops(x, targetnodes)) simplified.append(op) return tuple(reversed(simplified))
def _call_settimeout(a, t, e): """ Handler for setTimeout and setInterval. Should determine whether a[0] is a lambda function or a string. Strings are banned, lambda functions are ok. Since we can't do reliable type testing on other variables, we flag those, too. """ if a and a[0]["type"] != "FunctionExpression": return ("In order to prevent vulnerabilities, the setTimeout " "and setInterval functions should be called only with " "function expressions as their first argument.", "Variables referencing function names are acceptable " "but deprecated as they are not amenable to static " "source validation.")
def _get_ipynb_code_cell_idx(ipynb_dict): """ Get the index of the code cell. Parameters ---------- ipynb_dict : dict Dictionary of notebook data. Returns ------- idx : int The index of the code cell. """ idx = 0 cells_list = ipynb_dict['cells'] for cell_dict in cells_list: if cell_dict['cell_type'] != 'code': idx += 1 continue break return idx
def ensure_quotes(s: str) -> str: """Quote a string that isn't solely alphanumeric.""" return '"{}"'.format(s) if not s.isalnum() else s
def x_coord(col, dpi): """x-coordinate in mils""" return 1000.*col/dpi
def process_docids(string): """ Returns the docid as an integer :param string: :return: """ try: docid = int(string) except ValueError: # print("Error converting docid to integer:", string) docid = 0 return docid
def vis17(n): # DONE """ .O ..O ...O OOO ..O ...O OOOOO ...O OOOOOOO Number of Os: 4 7 10""" result = '' for i in range(n): result += '.' * n + 'O\n' result += 'O' * (n * 2 + 1) + '\n' return result
def get_roi_params(separation: str = "uplc", instrument: str = "qtof"): """ Creates a dictionary with recommended parameters for the make_roi function in different use cases. Parameters ---------- separation : {"uplc", "hplc"} Mode in which the data was acquired. Used to set minimum length of the roi and number of missing values. instrument : {"qtof", "orbitrap"} Type of MS instrument. Used to set the tolerance. Returns ------- roi_parameters : dict """ roi_params = {"min_intensity": 500, "multiple_match": "reduce"} if separation == "uplc": roi_params.update({"max_missing": 1, "min_length": 10}) elif separation == "hplc": roi_params.update({"max_missing": 1, "min_length": 20}) else: msg = "valid `separation` are uplc and hplc" raise ValueError(msg) if instrument == "qtof": roi_params.update({"tolerance": 0.01}) elif instrument == "orbitrap": roi_params.update({"tolerance": 0.005}) else: msg = "valid `instrument` are qtof and orbitrap" raise ValueError(msg) roi_params["mode"] = separation return roi_params
def diversity(vec): """Calculate diversity. :param vec: kmer vec :return: Diversity(X) """ m_sum = sum(vec) from math import log return m_sum*log(m_sum, 2) - sum([e*log(e, 2) for e in vec if e != 0])
def sum_multiples_of_3_and_5(limit: int) -> int: """ Sums all multiples of 3 and 5 below the provided limit. :param limit: Limit for multiples to search, non-inclusive. :return: Sum of all multiples. """ # Sum the multiples of 3 and 5, but subtract the multiples of 15 which get counted # twice. return sum(range(0, limit, 3)) + sum(range(0, limit, 5)) - sum(range(0, limit, 15))
def gcd(a, b): """The Euclidean Algorithm """ a = abs(a) b = abs(b) while a: a, b = b % a, a return b
def ljust(value, length): """Format general alphanumeric fields.""" if value is None: value = '' else: value = str(value) value = value.ljust(length, ' ')[0:length] return value
def get_rb_data_attribute(xmldict, attr): """Get Attribute `attr` from dict `xmldict` Parameters ---------- xmldict : dict Blob Description Dictionary attr : string Attribute key Returns ------- sattr : int Attribute Values """ try: sattr = int(xmldict['@' + attr]) except KeyError: raise KeyError('Attribute @{0} is missing from ' 'Blob Description. There may be some ' 'problems with your file'.format(attr)) return sattr
def _cut_if_too_long(text: str, max_length: int) -> str: """Cut a string down to the maximum length. Args: text: Text to check. max_length: The maximum length of the resulting string. Returns: Cut string with ... added at the end if it was too long. """ if len(text) <= max_length: return text return text[: max_length - 3] + "..."
def _find_valid_path(options): """Find valid path from *options*, which is a list of 2-tuple of (name, path). Return first pair where *path* is not None. If no valid path is found, return ('<unknown>', None) """ for by, data in options: if data is not None: return by, data else: return '<unknown>', None
def compile_toc(entries, section_marker='='): """Compiles a list of sections with objects into sphinx formatted autosummary directives.""" toc = '' for section, objs in entries: toc += '\n\n%s\n%s\n\n' % (section, section_marker * len(section)) toc += '.. autosummary::\n\n' for obj in objs: toc += ' ~%s.%s\n' % (obj.__module__, obj.__name__) return toc
def deletionDistance(str1,str2): """ finds the minimum number of characters you need to delete in the two strings in order to get the same strings. Params: str1 (String) - the first string THAT DOES NOT CONTAIN A REPEATING CHARACTER' str2 (String) - the second string THAT DOES NOT CONTAIN A REPEATING CHARACTER'' Returns: distance (int): the minimum number of characters you need to delete in the two strings in order to get the same strings. Examples: >>> distance = deletionDistance("dog","frog") >>>print(distance) 3 >>> distance = deletionDistance("some","some") >>>print(distance) 0 >>> distance = deletionDistance("some","thing") >>>print(distance) 9 """ str_comb = str1 + str2 str_list = [char for char in str_comb] str_list_len= len(str_list) str_set_len = len(set(str_list)) distance = str_list_len-(2*(str_list_len - str_set_len)) return distance
def inner_product(vector1, vector2): """ Returns vector1 . vector2 """ result = 0 for i in range(len(vector1)): result = result + vector1[i]*vector2[i] return result
def comb(n: int, k: int) -> int: """Number of combination Args: n (int): [description] k (int): [description] Returns: int: [description] """ if k >= n or k == 0: return 1 return comb(n - 1, k - 1) + comb(n - 1, k)
def status_in_range(value: int, lower: int = 100, upper: int = 600) -> bool: """ Validates the status code of a HTTP call is within the given boundary, inclusive. """ return value in range(lower, upper+1)
def safe_sub(val1, val2): """ Safely subtracts two values. If either value is -1, then the result is -1 (unknown). """ return -1 if val1 == -1 or val2 == -1 else val1 - val2
def parse_session_cookie(cookie_to_cook): """ cookie_to_cook = http_header['cookie'] """ #print("cookie_to_cook: %s"%str(cookie_to_cook)) session_value = None tokens = cookie_to_cook.split(";") for tok in tokens: if 'remi_session=' in tok: #print("found session id: %s"%str(tok)) try: session_value = int(tok.replace('remi_session=', '')) except: pass return session_value
def expand_to_string(config): """Return a string expanding the configuration""" if not isinstance(config, list): return '' sconf = '' for item in config: if isinstance(item, str): sconf += item + ' ' elif isinstance(item, dict): for key, value in item.items(): sconf += "{} {} ".format(key, value) elif isinstance(item, list): for key in item: sconf += "{} ".format(key) return sconf.strip()
def xor(bytes1: bytes, bytes2: bytes) -> bytes: """Support function to perform Exclusive-OR operation on two bytes. :param bytes1: set of bytes 1 :param bytes2: set of bytes 2 :returns: XORed data """ if len(bytes1) == len(bytes2): return bytes(a ^ b for a, b in zip(bytes1, bytes2)) raise ValueError("Input values must have same length")
def get_value(x, y, z, center_prob): """ Calculates the probability for the box at x, y, z (within a 3x3x3 box)""" center_count = (x % 2) + (y % 2) + (z % 2) # counting the number of "1" in our coordinates (centers) prob_unit = (1-center_prob)/14 # the probability per voxel/cube around center cube values = [(prob_unit/4), (prob_unit/2), prob_unit, center_prob] # corner, edge, major axis, center # indexes in values list correspond to the number of centers of each box type (center_count) return values[center_count]
def humidity_formatter(value): """Return the state of the entity.""" if value is None: return None return round(float(value) / 100, 1)
def convert_to_d_h_m_s(days): """Return the tuple of days, hours, minutes and seconds from days (float)""" days, fraction = divmod(days, 1) hours, fraction = divmod(fraction * 24, 1) minutes, fraction = divmod(fraction * 60, 1) seconds = fraction * 60 return int(days), int(hours), int(minutes), int(seconds)
def file_version_summary(list_of_files): """ Given the result of list-file-versions, returns a list of all file versions, with "+" for upload and "-" for hide, looking like this: ['+ photos/a.jpg', '- photos/b.jpg', '+ photos/c.jpg'] """ return [('+ ' if (f['action'] == 'upload') else '- ') + f['fileName'] for f in list_of_files]
def solution(arr, target): """Solution using a hashset.""" lookup = set() for num in arr: complement = target - num if complement in lookup: return True lookup.add(num) return False
def ParentId(tpe, id): """ A criterion used to search for records by their parent's id. For example * search for observables by case id * search for tasks by case id * search for logs by task id * search for jobs by observable id Arguments: tpe (str): class name of the parent: `case`, `case_task`, `case_artifact`... id (str): the parent id's value Returns: dict: JSON repsentation of the criterion ```python # Query to search for tasks of a case by id query = ParentId('case', '1234545643') ``` produces ```json { "_parent": { "_type": "case", "_id": "1234545643" } } ``` """ return {'_parent': {'_type': tpe, '_id': id}}
def compute_area(poly): """compute the area of poly""" point = [] for i in range(0, len(poly) - 1, 2): point.append([poly[i], poly[i + 1]]) s = 0.0 point_num = len(point) if point_num < 3: return 0.0 for i in range(len(point)): s += point[i][1] * (point[i-1][0]-point[(i+1)%point_num][0]) return abs(s/2.0)
def load_dotted_name(name): """Load an object, giving its full dotted name. Currently this isn't very smart, e.g. it can't load attributes off an object. I'll improve it as I need it. """ try: (modnm,objnm) = name.rsplit(".",1) except ValueError: return __import__(name,fromlist=["*"]) else: mod = __import__(modnm,fromlist=["*"]) return getattr(mod,objnm)
def to_unicode_repr( _letter ): """ helpful in situations where browser/app may recognize Unicode encoding in the \u0b8e type syntax but not actual unicode glyph/code-point""" # Python 2-3 compatible return u"u'"+ u"".join( [ u"\\u%04x"%ord(l) for l in _letter ] ) + u"'"
def replace_special_characters(string: str): """Replace special characters in step id with -""" parts = [] for word in string.split(' '): part = '' for c in word: if c.isalnum(): part += c elif part: parts.append(part) part = '' if part: parts.append(part) return '-'.join(parts)
def survey_T(phrase, langDict): """ Function to translate a phrase using the dictionary passed in """ if phrase in langDict and langDict[phrase] != "": return langDict[phrase] else: return phrase
def get_name_from_link(link): """ returns name from link. """ name = link.split("/")[-1] return name
def data_cut_at_index(data_cuts, index): """given data_cuts, which is the return value of remove_saturation, and index, return True if index is in a region that has been cut due to saturation, false otherwise.""" for start,stop in data_cuts: if start <= index < stop: return True return False
def __get_service_names(scenario_config): """ Gets the list of services from the scenario config. If no services are given, an empty list is returned. :param scenario_config: The scenario config. :return: A list of services. [] if no service names are found. """ if 'services' in scenario_config: service_names = scenario_config['services'] else: service_names = [] if not isinstance(service_names, list): raise Exception('"services" is not a list. It must be a list of services') return service_names
def ros_advertise_service_cmd(_type, service): """ create a rosbridge advertise_service command object :param _type: type of the ROS service :param service: name of the service """ command = { "op": "advertise_service", "type": _type, "service": service } return command
def make_single(vdict): """ >>> d = {"xx": (1,)} >>> make_single(d) {'xx': 1} """ for k in vdict.keys(): if isinstance(vdict[k], tuple) and len(vdict[k]) == 1: vdict[k] = vdict[k][0] return vdict
def vol_prism(area_of_base: float, height: float) -> float: """ Calculate the Volume of a Prism. Wikipedia reference: https://en.wikipedia.org/wiki/Prism_(geometry) :return V = Bh >>> vol_prism(10, 2) 20.0 >>> vol_prism(11, 1) 11.0 """ return float(area_of_base * height)
def trim_time_column(line): """ trims the time column off the front of the given line from backup plan file. Useful when you're searching for a particular backup plan entry but don't care about the time. """ return " ".join(line.split("\t")[1:])
def pad_right(value, to_size, pad_with): """ Should be called to pad value to expected length """ pad_amount = to_size - len(value) if pad_amount > 0: return b"".join(( value, pad_with * pad_amount, )) else: return value
def _sort_destinations(destinations): """ Takes a list of destination tuples and returns the same list, sorted in order of the jumps. """ results = [] on_val = 0 for dest in destinations: if len(results) == 0: results.append(dest) else: while on_val <= len(results): if on_val == len(results): results.append(dest) on_val = 0 break else: if dest[1] > results[on_val][1]: on_val += 1 else: results.insert(on_val, dest) on_val = 0 break return results
def sum_digits(n: int) -> int: """Returns the sum of the digits of non-negative integer n.""" assert not n < 0, "Non-negative integer n only." if n < 10: return n return n % 10 + sum_digits(n // 10)
def handle011AT(tokens): """ Processes the 011@ (https://www.gbv.de/bibliotheken/verbundbibliotheken/02Verbund/01Erschliessung/02Richtlinien/01KatRicht/1100.pdf) field. Currently, only subfield a is supported. Only the first year of publication is extracted. For details (in German), see: https://www.gbv.de/bibliotheken/verbundbibliotheken/02Verbund/01Erschliessung/02Richtlinien/01KatRicht/1100.pdf) :param tokens: a list of tokens of the field 011@ :return: the first found year of publication """ dateOfPublication="" for token in tokens: if token.startswith("a"): dateOfPublication=token[1:].replace("@","").strip() return(dateOfPublication)
def __num_two_factors(x): """Return how many times integer x can be evenly divided by 2. Returns 0 for non-positive integers. """ if x <= 0: return 0 num_twos = 0 while x % 2 == 0: num_twos += 1 x //= 2 return num_twos
def get_scales(dataset): """Returns NMS IOU, IOU and Prob Score for a particular dataset""" if dataset == 'DeepFruits': return 0.4, 1e-5, 0.2 elif dataset == 'mangoYOLO': return 0.5, 1e-5, 0.6 elif dataset == 'MultiFruit': return 0.3, 1e-5, 0.2 elif dataset == 'MinneApple': return 0.3, 1e-5, 0.2 elif dataset == 'wgisd': return 0.2, 1e-05, 0.2 assert False, "Invalid dataset name"
def GammaCorrection(color, gamma=2.8, max=255): """ Perfomes Gamma Correction on a color. :param color: The color to perform the gamma correction on. :param gamma: The gamma value. :param max: Specifies full scale output of the gamma correction. :return: Gamma corrected color. """ return [round(max * ((x / 255) ** gamma)) for x in color]
def memory2int(memory): """Internal helper function to convert Kubernetes memory amount to integers.""" multiplier = 1 if memory.endswith("Ki"): multiplier = 1024 elif memory.endswith("Mi"): multiplier = 1024 ** 2 elif memory.endswith("Gi"): multiplier = 1024 ** 3 return int(memory[:-2]) * multiplier
def check_for_contents(string, string_dict): """Iterate through string dict to check contents in other string""" for snippet in string_dict: if snippet in string: return True return False
def is_overlapped(end1, start2): """Returns True if the two segments overlap Arguments --------- end1 : float End time of the first segment. start2 : float Start time of the second segment. """ if start2 > end1: return False else: return True
def getcoroutinelocals(coroutine): """ Get the mapping of coroutine local variables to their current values. A dict is returned, with the keys the local variable names and values the bound values.""" frame = getattr(coroutine, "cr_frame", None) if frame is not None: return frame.f_locals else: return {}
def eigvector_uncoupled(par): """ Returns the flag for the correction factor to the eigenvectors for the linear guess of the unstable periodic orbit. Parameters ---------- parameters : float (list) model parameters Returns ------- correcx : 1 or 0 flag to set the x-component of the eigenvector correcy : 1 or 0 flag to use the y-component of the eigenvector """ correcx = 1 correcy = 1 return correcx, correcy
def get_did_by_foreign_key(did_foreign_key): """Return the DID referenced by a ForeignKey or OneToOneField to IdNamespace. Return None if ForeignKey or OneToOneField is NULL. This is used instead of "did_foreign_key.*.did" on ForeignKeys and OneToOneFields that allow NULL (null=True in the model). """ return getattr(did_foreign_key, "did", None)
def hours_to_minutes(hours: str) -> int: """Converts hours to minutes""" return int(hours) * 60
def subset(l, L): """ Takes two lists and returns True if the first one is contained in the second one. If the lists could be sorted, it would be more efficient. :param l: `list` instance. :param L: `list` instance. :return: `bool` instance. """ return all(x in L for x in l)
def trim_text(text): """Helper method to trim generated text.""" # Cut off generated output at the last ./?/! if there is one, # unless the text ends with hashtags and the last punc is before the hashtags. end_punc = '.!?' max_end_idx = -1 for end in end_punc: end_idx = text.rfind(end) if end_idx > max_end_idx: max_end_idx = end_idx if max_end_idx == -1: return text else: if max_end_idx + 1 < len(text) and '#' in text[max_end_idx + 1:]: return text elif max_end_idx + 2 < len(text) and text[max_end_idx + 1] == '"': return text[:max_end_idx + 2] else: return text[:max_end_idx + 1]
def min_(data): """Minimum of the values in the object""" minimum = data[0] for value in data: minimum = value if value < minimum else minimum return minimum
def stream2dict(stream_list): """Convert stream list into stream dict Parameters ---------- stream_list : list Stream in list form (list of dicts), as returned by Strava API v3 Returns ------- stream_dict : dict Stream in dict form, with key set to *stream name* and value set to the actual stream list. In this form, the stream is ready to be consumed by pandas """ stream_dict = {} for s in stream_list: stream_dict.update({s['type']: s['data']}) return stream_dict
def get_support(cluster): """ Returns support >>> get_support({5: {'11111': ['ab', 'ac', 'df', 'bd', 'bc']}, ... 4: {'11101': ['ef', 'eg', 'de', 'fg'], '11011': ['cd']}, ... 3: {'11001': ['ad'], '10101': ['dg']}, ... 2: {'10010': ['dh', 'bh'], '11000': ['be'], '10100': ['gh'], ... '10001': ['ce']}, ... 1: {'00100': ['fh', 'eh'], '10000': ['hi']}}) [100.0, 80.0, 60.0, 40.0, 20.0] """ return [i * 100 / len(cluster) for i in cluster]
def wrap_node_data_lookup(uuid_to_devices): """given a uuid to devices map, returns dictionary like the following: {'parameter_defaults': {'NodeDataLookup': {'32e87b4c-c4a7-41be-865b-191684a6883b': {'devices': ['/dev/sdc']}}, {'ea6a84d6-cf89-4fe2-b7bd-869b3fe4dd6b': {'devices': ['/dev/sdc']}}}} """ node_data_lookup = {} node_data_lookup['NodeDataLookup'] = uuid_to_devices output = {} output['parameter_defaults'] = node_data_lookup return output
def Counter64(al, br, delta): """64bit counter aggregator with wrapping """ if br < al: ch = 18446744073709551615 - al return (ch + br) / float(delta) return (br - al) / float(delta)
def generate_output_field(input_field, field_type): """ Grab an output field for a censys input field :param input_field: censys Field :return: """ output_field = input_field.replace('.', '__') return output_field
def create_data_structure(emojis): """Create data structure to store the emojis :param emojis: Dictionary of emoji :type emojis: dict :rtype: dict """ head = dict() for word in emojis.keys(): current = head for char in word: try: current = current[char] except KeyError: _ = dict() current[char] = _ current = _ current["__end__"] = True return head