content
stringlengths
42
6.51k
def mov_spaces(obj, center, orbit): """ Returns the path from and object to the COM, via orbital transfer""" path = [] while obj != "COM": ind = orbit.index(obj) obj = center[ind] path.append(obj) return path
def merge_resources(resource1, resource2): """ Updates a copy of resource1 with resource2 values and returns the merged dictionary. Args: resource1: original resource resource2: resource to update resource1 Returns: dict: merged resource """ merged = resource1.copy() merged.update(resource2) return merged
def fts_pattern(pattern): """Convert a pattern to an fts representation.""" fts = [f'{patt}*' for patt in pattern.split(' ') if patt] return ' '.join(fts)
def get_cu_mask(active_cus, total_cus, stripe_width): """ Returns a CU mask (represented as a boolean array) with the active number of CUs specified by active_cus, using the given stripe_width. """ to_return = [False] * total_cus i = 0 for n in range(active_cus): if i >= total_cus: i = n / (total_cus / stripe_width) to_return[i] = True i += stripe_width return to_return
def no_highlight(nick: str) -> str: """ Inserts a Unicode Zero Width Space into nick to prevent highlights """ return nick[0:1] + "\u200b" + nick[1:]
def does_strict_dominate(g1, g2, delta1, delta2): """ Returns true if g1 strictly dominates g2 with the given relaxation. Parameters ---------- g1 : tuple of float Objective values of a point g2 : tuple of float Objective values of a point delta1 : tuple of float Relaxation of 'g1' delta2 : tuple of float Relaxation of 'g2' Returns ------- bool """ dim = len(g1) is_sdom = True for i in range(dim): if g2[i] + delta2[i] <= g1[i] - delta1[i]: is_sdom = False return is_sdom
def load_cli_kwargs(kwargs_list, delimiter='='): """ Parse a list of command line interface "kwargs". ["key1=val1", "key2=val2"] -> {"key1": "val1", "key2": "val2"} (Where "=" is the passed delimiter value.) Args: kwargs_list - list(str) - list of delimited key value pairs. delimiter - str - value on which to split kwargs_list items. Returns: A kwarg-populated dictionary. """ kwargs = {} for kv in kwargs_list: k, v = kv.split(delimiter, 1) kwargs[k] = v return kwargs
def _apply_func(data, func, num_rows, base_row_index=0, increment=False): """ Apply the function to the base row which returns a new row. This is then added to the dataset n times. Parameters ---------- data: [][] List to apply the function to. func: function The function to apply to the row. This won't alter the initial row it is applied to. base_row_index: int The index of the row to first apply the function to. num_rows: int The number of times this function should be applied. Will result in this many new rows added to the dataset. increment: boolean If true, the function will be applied to the newly created rows rather than the base row on further iterations. Returns ------- [][] The mutated list with the new rows added. """ row = list(data[base_row_index]) curr_index = base_row_index for _ in range(num_rows): data.append(func(row)) if increment: curr_index += 1 row = list(data[curr_index]) return data
def check_flag(flag,inbit): """Check a flag is true or false""" if flag & inbit: return True return False
def mangle_name(name): """remove unsafe characters from name""" return name.replace(':', '_')
def cross_product(a, b): """ Cross product """ return a[0] * b[1] - a[1] * b[0]
def guess_extension_from_headers(h): """ Given headers from an ArXiV e-print response, try and guess what the file extension should be. Based on: https://arxiv.org/help/mimetypes """ if h.get("content-type") == "application/pdf": return ".pdf" if ( h.get("content-encoding") == "x-gzip" and h.get("content-type") == "application/postscript" ): return ".ps.gz" if ( h.get("content-encoding") == "x-gzip" and h.get("content-type") == "application/x-eprint-tar" ): return ".tar.gz" if ( h.get("content-encoding") == "x-gzip" and h.get("content-type") == "application/x-eprint" ): return ".tex.gz" if ( h.get("content-encoding") == "x-gzip" and h.get("content-type") == "application/x-dvi" ): return ".dvi.gz" return None
def required_index(a): """ Helper function to take a list of index lists and return whether it needs to be included as an index in demultiplexing. """ return len(set(tuple(a_i) for a_i in a)) != 1
def find_largest_digit_helper(num, max_num): """ :param num: the number that we should pick the largest digit from. :param max_num: current maximum number :return: the largest number """ # there is no number left if num == 0: return max_num else: # pick the last number to compare if num % 10 > max_num: max_num = num % 10 # delete the last number and compare again return find_largest_digit_helper(num//10, max_num) else: return find_largest_digit_helper(num//10, max_num)
def Flatten(matrix): """Flattens a 2d array 'matrix' to an array.""" array = [] for a in matrix: array += a return array
def send_to_address(message): """ Returns a string to be used as the address the email is being sent to Default is '[email protected]' """ # If a send to address is included in html form, return its assoc. string if 'send_to' in message and message['send_to']: return message['send_to'] # Otherwise, return default return 'default'
def TransformLen(r): """Returns the length of the resource if it is non-empty, 0 otherwise. Args: r: A JSON-serializable object. Returns: The length of r if r is non-empty, 0 otherwise. """ try: return len(r) except TypeError: return 0
def add3(v1, v2): """ add3 """ return (v1[0] + v2[0], v1[1] + v2[1], v1[2] + v2[2])
def filter_positive_even_numbers(numbers): """Receives a list of numbers, and returns a filtered list of only the numbers that are both positive and even (divisible by 2), try to use a list comprehension.""" return [x for x in numbers if x % 2 == 0 and x > 0]
def calcStampAmount(aimPrice, listOfAvailableStamps): """ Work out in multiple different ways and choose the one with the least stamps """ # possibleStampLists is a master list of lists possibleStampLists = [] # See if any stamps fit exactly into aimPrice for stamp in listOfAvailableStamps: if aimPrice % stamp == 0: possibleStampLists.append([stamp for x in range(int(aimPrice / stamp))]) # Decreasing first-fit algorithm largestStamp = max(listOfAvailableStamps) firstFitUsed = [] while aimPrice > 0: if aimPrice - largestStamp < 0: if abs(aimPrice - largestStamp) < min(listOfAvailableStamps): firstFitUsed.append(largestStamp) break listOfAvailableStamps.remove(largestStamp) try: largestStamp = max(listOfAvailableStamps) except ValueError: firstFitUsed.append(largestStamp) break continue firstFitUsed.append(largestStamp) aimPrice -= largestStamp possibleStampLists.append(firstFitUsed) # find list that contains lowest about of stamps shortest = possibleStampLists[0] for l in possibleStampLists: if len(shortest) > len(l): shortest = l return shortest
def getFrameIndex(t, fs): """ calculates and returns the frame index of at a given time offset within a signal @param t the time offset [s] @param fs sampling frequency [Hz] """ return int(round(float(t) * float(fs)))
def replaceKeys(orig_dict, oldKeys2NewKeys, inplace=True): """ replace keys with new keys using oldKeys2NewKeys mapping. """ target_dict = orig_dict if inplace else {} for oldKey, newKey in oldKeys2NewKeys.items(): if oldKey in orig_dict: target_dict[newKey] = orig_dict.get(oldKey) if inplace: orig_dict.pop(oldKey) return target_dict
def _slim_address(resource, key): """ Only return the "home" address """ return [addr for addr in resource[key] if addr["use"] == "home"]
def partition(inp_list, n): """ Paritions a given list into chunks of size n Parameters ---------- inp_list: List to be splittted n: Number of equal partitions needed Returns ------- Splits inp_list into n equal chunks """ division = len(inp_list) / float(n) return [ inp_list[int(round(division * i)): int(round(division * (i + 1)))] for i in range(n) ]
def check_grid_val(grid, r, c, v): """Return the possible values for a cell in the r,c position""" n = max([max(l) for l in grid]) h = len(grid) w = len(grid[0]) # Avoid 4 adjacent cells of of same color nope = [] # Left if r > 0 and r < h - 1 and c > 0: if (grid[r+1][c-1] == v and v == grid[r][c-1] and v == grid[r-1][c-1]): return False # Top Left if r > 0 and c > 0: if (grid[r-1][c] == v and v == grid[r-1][c-1] and v == grid[r][c-1]): return False # Top if r > 0 and c > 0 and c < w - 1: if (grid[r-1][c-1] == v and v == grid[r-1][c] and v == grid[r-1][c+1]): return False # Top Right if r > 0 and c < w - 1: if (grid[r][ c+1] == v and v == grid[r-1][ c+1] and v == grid[r-1][c]): return False # Right if r > 0 and r < h - 1 and c < w - 1: if (grid[r-1][c+1] == v and v == grid[r][c+1] and v == grid[r+1][c+1]): return False # Bottom Right if r < h - 1 and c < w - 1: if (grid[r][c+1] == v and v == grid[r+1][c+1] and v == grid[r+1][c]): return False # Bottom if r < h - 1 and c > 0 and c < w - 1: if (grid[r+1][c+1] == v and v == grid[r+1][c] and v == grid[r+1][c-1]): return False # Bottom Left if r > 0 and r < h - 1 and c > 0 and c < w - 1: if (grid[r+1][c] == v and v == grid[r+1][c-1] and v == grid[r-1][c-1]): return False # Tetris Left if r > 0 and r < h - 1 and c > 0: if (grid[r+1][c] == v and v == grid[r][c-1] and v == grid[r-1][c]): return False # Tetris Top if r > 0 and c > 0 and c < w - 1: if (grid[r][c-1] == v and v == grid[r-1][c] and v == grid[r][c+1]): return False # Tetris Right if r > 0 and r < h - 1 and c < w - 1: if (grid[r-1][c] == v and v == grid[r][c+1] and v == grid[r+1][c]): return False # Tetris Bottom if r < h - 1 and c > 0 and c < w - 1: if (grid[r][c+1] == v and v == grid[r+1][c] and v == grid[r][c-1]): return False return True
def dec_to_base(number, base): """ Input: number is the number to be converted base is the new base (eg. 2, 6, or 8) Output: the converted number in the new base without the prefix (eg. '0b') """ # your code if number == 0: return 0 else: quotient = number % base return quotient + 10*dec_to_base(number//base, base)
def indexPosition1D(i, N): """This function is a generic function which determines if index over a list of length N is an interior point or node 0 or node 1. """ if i > 0 and i < N - 1: # Interior return 0, None elif i == 0: # Node 0 return 1, 0 elif i == N - 1: # Node 1 return 1, 1
def str_grep(S, strs): """Returns a list of strings wherein the substring S is found.""" return [s for s in strs if s.find(S) >= 0]
def tsv_unescape(x): """ Unescape strings in the TSV file. Escaped characters include: - newline (0x10) -> backslash + n - vertical bar (0x7C) -> backslash + p - backslash (0x5C) -> backslash + backslash Parameters ---------- x : ``str`` Returns ------- ``str`` """ return x.replace(r'\n', '\n').replace(r'\p', '|').replace('\\\\', '\\')
def range2d( n,m ): """ Returns a list of values in a 2d range Arguments: n: The number of rows in the 2d range m: The number of columns in the 2d range Returns: A list of values in a 2d range """ return [(i,j) for i in range(n) for j in range(m) ]
def _to_mumps_number(v): """Given a value, attempt to coerce it to either an integer or float.""" sign = 1 ndec = 0 try: tmp = float(v) if tmp.is_integer(): return int(tmp) else: return tmp except ValueError: v = str(v) n = [] # Build a number based on the MUMPS numeric conversion rules for c in v: # Look for numeric characters (digits, decimal, or sign) if c.isnumeric() or c in ('.', '+', '-'): # Make sure we only add one decimal if c == '.': if ndec >= 1: break else: ndec += 1 # Correctly swap the sign if c == '-': sign *= -1 continue # Ignore the plus signs if c == '+': continue # If we made it this far, this is a valid numeric character n.append(c) else: # If we don't find any, break # Re-assemble the digits and attempt to convert it n = float("".join(n)) * sign return n if not n.is_integer() else int(n)
def shift_matrix(matrix, start_index): """Shifts an matrix so a particular index is now 0""" new_matrix = [[False] * len(matrix[0]) for i in range(len(matrix))] for i, row in enumerate(matrix): for j, value in enumerate(row): new_matrix[i][j - start_index] = value return new_matrix
def split(s, sep=None, maxsplit=-1): """split(s [,sep [,maxsplit]]) -> list of strings Return a list of the words in the string s, using sep as the delimiter string. If maxsplit is given, splits at no more than maxsplit places (resulting in at most maxsplit+1 words). If sep is not specified or is None, any whitespace string is a separator. (split and splitfields are synonymous) """ return s.split(sep, maxsplit)
def get_delim(line): """Given a string representing a line of data, check whether the delimiter is ',' or space. Parameters ---------- line : str line of data Returns ------- delim : {',', ' '} Examples -------- >>> get_delim(',') ',' >>> get_delim(' ') ' ' >>> get_delim(', ') ',' >>> get_delim('x') Traceback (most recent call last): ... ValueError: delimiter not understood: x """ if ',' in line: return ',' if ' ' in line: return ' ' raise ValueError("delimiter not understood: " + line)
def flat_dict_list(dict_list): """ will flatten list of dict or list of list of dict to a flat dict """ if type(dict_list) == dict: return dict_list res_list = [] for temp_list in dict_list: if type(temp_list) == list: res_list.append(flat_dict_list(temp_list)) else: res_list.append(temp_list) res = {} for d in res_list: res.update(d) return res
def split_data(raw_data: str): """ Splits data into a list :param raw_data: String :return: List """ return raw_data.split()
def offsetInDOL( ramOffset, sectionInfo ): # todo: write into dolInitializer method """ Converts the given integer RAM address (location in memory) to the equivalent DOL file integer offset. ramOffset should already be relative to the base address (-0x80000000). """ dolOffset = -1 # Determine which section the address belongs in, and then get that section's starting offsets. for section in sectionInfo.values(): if ramOffset >= section[1] and ramOffset < (section[1] + section[2]): sectionOffset = ramOffset - section[1] # Get the offset from the start of the section. dolOffset = section[0] + sectionOffset # Add the section offset to the RAM's start point for that section. break return dolOffset
def get_modal_triggers(offend_atoms, implied_modalities): """ :param offend_atoms: set of offending modal atoms at given w :param implied_box: set of tuples representing implied boxes and implied diamonds :return set of antecedent atoms in modal implications """ triggers = set() for atom in offend_atoms: for imp in implied_modalities: if atom == imp[0][1]: triggers.add(imp[1]) return triggers
def ones(n): """ Returns a sequence of ones with n elements. @type n: number @param n: length of sequence @rtype: list @return: sequence """ return [1.0] * n
def none(iterable): """ Returns True if all values are True >>> none([False, 0, [], None, ""]) True """ return not any(iterable)
def get_account_number(arn): """ Extract the account number from an arn. :param arn: IAM SSL arn :return: account number associated with ARN """ return arn.split(":")[4]
def VarKeys(constr): """Finds the keys in a constraint that represent parameters e.g. eliminates any that start with '_' :param dict constr: a single constraint entry of form:: {'var1': mult1, 'var2': mult2,... '_notVar': val,...} (see :func:`GroupConstraints`) :returns: a list of keys where any keys beginning with '_' are removed. """ return [i for i in constr.keys() if not i.startswith('_')]
def first(iterable, or_=None): """Get the first element of an iterable. Just semantic sugar for next(it, None). """ return next(iterable, or_)
def _get_any_description( partner_default_descriptions_dict: dict, partner_descriptions_dict: dict, partner_key: str, ): """ Returns either the default partner description or the partner description in the user's language of choice Parameters ---------- partner_default_descriptions_dict : dict The default descriptions dictionary. partner_descriptions_dict : dict The descriptions dictionary with descriptions in the user's preferred language partner_key: str The description key we are looking for Returns ------- str or None """ if partner_key in partner_descriptions_dict.keys(): return partner_descriptions_dict[partner_key] elif partner_key in partner_default_descriptions_dict.keys(): return partner_default_descriptions_dict[partner_key] else: return None
def parse_version(*args, **kwargs): """ Package resources is a very slow load """ import pkg_resources return pkg_resources.parse_version(*args, **kwargs)
def factorial(n): """Returns the factorial of a number n > 0. This is a recursive function. """ if n == 0: return 1 else: return n * factorial(n-1)
def get_matrix_diff_coords(indices): """returns coordinates for off diagonal elements""" return [(i,j) for i in indices for j in indices if i != j]
def matrix_combine(matrix_1, matrix_2): """ Return the combination of two confusion matrices. :param matrix_1: first matrix that is going to be combined. :type matrix_1: dict :param matrix_2: second matrix that is going to be combined. :type matrix_2: dict :return: the combination of two matrices as a dict of dicts """ result_matrix = {} classes_1, classes_2 = matrix_1.keys(), matrix_2.keys() classes = set(classes_1).union(set(classes_2)) for class_1 in classes: temp_dict = {} for class_2 in classes: tmp = 0 if class_1 in classes_1 and class_2 in classes_1: tmp += matrix_1[class_1][class_2] if class_1 in classes_2 and class_2 in classes_2: tmp += matrix_2[class_1][class_2] temp_dict[class_2] = tmp result_matrix[class_1] = temp_dict return result_matrix
def inferNamespacePrefix(aUri): """ From a URI returns the last bit and simulates a namespace prefix when rendering the ontology. eg from <'http://www.w3.org/2008/05/skos#'> it returns the 'skos' string """ stringa = aUri.__str__() try: prefix = stringa.replace("#", "").split("/")[-1] except: prefix = "" return prefix
def diff_weather(new, stored): """Diff the newest API response with the stored one.""" diff = {} changed = False for t in new: if stored and t in stored: if new[t]["max"] != stored[t]["max"] or new[t]["min"] != stored[t]["min"]: changed = True diff[t] = {} diff[t]["date_str"] = new[t]["date_str"] diff[t]["old"] = {} diff[t]["old"]["min"] = stored[t]["min"] diff[t]["old"]["max"] = stored[t]["max"] diff[t]["new"] = {} diff[t]["new"]["min"] = new[t]["min"] diff[t]["new"]["max"] = new[t]["max"] continue diff[t] = {} diff[t]["date_str"] = new[t]["date_str"] diff[t]["new"] = {} diff[t]["new"]["min"] = new[t]["min"] diff[t]["new"]["max"] = new[t]["max"] return diff if changed or not stored else {}
def rh_dwyer(raw_value): """Returns Dwyer sensor relative humidity (RH) from a raw register value. Range is 0-100%. """ # Humidity linear calibration = 100 / (2^15 - 1) RH0 = 0.0 RHs = 100.0 / (2 ** 15 - 1) return (RH0 + RHs * float(raw_value), "percent")
def get_central_frequency(w_type, f=None): """ Parameters ---------- w_type : int Wavelet type. 0 is Rikcer, 1 is Ormsby f : list frequency parameters of wavelet Returns ------- int """ if f is None: # set default values if w_type: f = [5, 10, 50, 100] else: f = [25] if w_type: return int((f[0] + f[3]) / 2) else: return f[0]
def get_children(xml_obj): """Return the XML elements one level down""" if xml_obj is None: return None else: return [x for x in xml_obj]
def sentence_to_HMM_format(sentence): """ Transform the sentence to HMM format :param sentence: the sentence to transform :return: the HMM format """ list = [] for sign in sentence: if sign == " ": continue list.append((sign, "")) return list
def extract_doi_links(urls): """ Try to find a DOI from a given list of URLs. :param urls: A list of URLs. :returns: First matching DOI URL, or ``None``. """ doi_urls = [url for url in urls if "/doi/" in url] if len(doi_urls) > 0: return ("http://dx.doi.org" + doi_urls[0][doi_urls[0].find("/doi/") + 4:]) else: return None
def _num_boxes_2_figsize(n): """ uses linear regression model to infer adequate figsize from the number of boxes in a boxplot Data used for training: X = [ 7 9 11 22 23 26] y = [[8,4],[9,5],[10,6],[10,10],[10,10],[10,10],[10,11]] Returns ------- (w,h) : tuple the width and the height of the figure """ if n <= 7: return (8,4) else: y1 = 0.07662*n + 8.24853 y2 = 0.36444*n + 1.71415 return int(y1), int(y2)
def represent_seconds_in_ms(seconds): """Converts seconds into human-readable milliseconds with 2 digits decimal precision :param seconds: Seconds to convert :type seconds: Union[int, float] :return: The same time expressed in milliseconds, with 2 digits of decimal precision :rtype: float""" return round(seconds * 1000, 2)
def categorize_values(categorize_value): """Classify into financial class""" compare_value = float(categorize_value[0]) if (compare_value<2): categorize_value[0]='lower' if (compare_value>=2 and compare_value<4): categorize_value[0]='middle' if (compare_value>=4): categorize_value[0]='upper' return categorize_value
def parse_hostname_from_filename(file): """Parses hostname from filename""" # strip path hostname = file.split("/")[-1].split("\\")[-1] # strip extensions hostname = hostname.split(".")[0] return hostname
def findFreeProjects(projAssignments, projCaps, lectAssignments, lectCaps, lectProjs): """ :rtype: list """ freeProjs = [] for lecturer in lectCaps.keys(): if lecturer not in lectAssignments: lectAssignments.update({lecturer: []}) if len(lectAssignments[lecturer]) < int(lectCaps[lecturer]): for project in lectProjs[lecturer]: if project not in projAssignments: projAssignments.update({project: []}) if len(projAssignments[project]) < int(projCaps[project]): freeProjs.append(project) return freeProjs
def f(i): """ Term to sum.""" return (i * 2**i) ** 0.5
def convert_bytes(bytes): """ Convert bytes into human readable """ bytes = float(bytes) if bytes >= 1099511627776: terabytes = bytes / 1099511627776 size = '%.2fT' % terabytes elif bytes >= 1073741824: gigabytes = bytes / 1073741824 size = '%.2fG' % gigabytes elif bytes >= 1048576: megabytes = bytes / 1048576 size = '%.2fM' % megabytes elif bytes >= 1024: kilobytes = bytes / 1024 size = '%.2fK' % kilobytes else: size = '%.2fb' % bytes return size
def _replace_tabs(s, ts=8): """ Replace the tabs in 's' and keep its original alignment with the tab-stop equals to 'ts' """ result = '' for c in s: if c == '\t': while True: result += ' ' if len(result) % ts == 0: break else: result += c return result
def endpoint_request(prtc, addr, rqm): """ Send request to a communication endpoint. :param prtc: Communication protocol HTTP/RPC/MQTT :param addr: Endpoint address :param rqm: Request model dict (payload) :return: Request result """ #Dummy data return 4 if prtc == 'HTTP': res = requests.get(f'http://{addr}', headers=rqm.get('headers'), data=rqm.get('payload')) return res.text elif prtc == 'RPC': rpc = ServerProxy(f'http://{addr}') res = eval(f"rpc.{rqm.get('function')}({rqm.get('parameter')})") return res elif prtc == 'MQTT': redis = Redis(host='redis', port=6379) return redis.get('addr')
def split_comments(comments): """Split COMMENTS into flag comments and other comments. Flag comments are those that begin with '#,', e.g. '#,fuzzy'.""" flags = [] other = [] for c in comments: if len(c) > 1 and c[1] == ',': flags.append(c) else: other.append(c) return flags, other
def subset_dict(dictionary={}, subset_size=1): """ Make a subset of a dictionary. Parameters ---------- dictionary : A `dict` to filter\n subset_size : Size of new dictionary. Default is 1. Returns ------- `dict` : New dictionary with only words that surpass the weight threshold. """ newDict = {k: v for k, v in list(dictionary.items())[:subset_size]} return newDict
def nextcard(a): """ Returns the card that comes after a. """ return 1 + a % 12
def type_to_string(type_obj): """Given a python type, return its JSON schema string counterpart.""" type_str = type_obj.__name__ if type_str == "str": return "string" if type_str == "float": return "number" if type_str == "int": return "integer" if type_str == "bool": return "boolean" if type_str == "NoneType": return "null" if type_str == "list": return "array" return "object"
def find_first(item, vec): """Return the index of the first occurence of item in vec.""" for i in range(len(vec)): if item == vec[i]: return i return -1
def get_query(queries): """ Parse the query from stored in the Json format in the case_detail of the test_case table Args: queries: query from Excel as Text Returns: Parsed queries """ query = queries["query"] return query
def extract_family_name(full_name): """Extract and return the family name from a string in this form "family_name; given_name". For example, if this function were called like this: extract_family_name("Brown; Sally"), it would return "Brown". """ # Find the index where "; " appears within the full name string. semicolon_index = full_name.index("; ") # Extract a substring from the full name and return it. family_name = full_name[0 : semicolon_index] return family_name
def FIT(individual): """Sphere test objective function. F(x) = sum_{i=1}^d xi^2 d=1,2,3,... Range: [-100,100] Minima: 0 """ return sum(x**2 for x in individual)
def generate_consortium_members(authors): """ Generate the list of consortium members from the authors """ # Consortium members are all authors who are not consortia # Sort members by the last token of their name consortium_members = [author["name"] for author in authors if "consortium" not in author or not author["consortium"]] return sorted(consortium_members, key=lambda name: name.split()[-1])
def data_ready(req, cache): """ Checks that all required data are in the data_cache :param req: string or list of string containing the keys of required data in cache :param cache: dictionary with the computed data :return: Boolean indicating whether all required data are in cache """ if not isinstance(req, list): req = [req] return all([r in cache for r in req])
def strip_hex_prefix(x): """ Strips possible hex prefixes from the strings :param x: :return: """ if x.startswith('0x'): return x[2:] if x.startswith('\\x'): return x[2:] return x
def f_to_k(tempe): """Receives a temperature in Fahrenheit and returns in Kelvin""" return (tempe - 32) * 5 / 9 + 273.15
def sum_by_elem(p,q): """ Reduce Function, sums each coordinate of 2 items p,q: tuples of (tuple of floats: coordinates,int) Returns tuple of (tuple of summed floats, summed int) """ p, num1 = p q, num2 = q tup = map(sum,zip(p,q)) return (tuple(tup),num1+num2)
def extract_cds_lines(all_bed_lines): """Extract bed lines with CDS mark.""" selected = [] for line in all_bed_lines.split("\n"): if line == "": continue if line.split("\t")[3].endswith("_CDS"): selected.append(line) return "\n".join(selected) + "\n"
def parse_input_to_id_dicts(in_data): """ Takes list of file lines and parses to a list of dictionaries one dict per id field --> value """ id_dicts = [] num_lines = len(in_data) idx = 0 while idx < num_lines: id_dict = dict() line = in_data[idx] while line != '': fields = line.split(' ') for field in fields: key,value = field.split(":") id_dict[key] = value idx += 1 if idx >= num_lines: break line = in_data[idx] id_dicts.append(id_dict) idx += 1 return id_dicts
def select_corpus(number): """ define the different corpus that can be used in the analysis """ # default name_of_folder = "" filename = "" pathLoad = "" language = "en" delimiter = "," column_name = "text" document_level = True if number == 0: name_of_folder = "de_BGH" filename = "BGH_df_2019-12-13.csv" pathLoad = r"data\open_legal_data" language = "de" delimiter = "," column_name = "content" elif number == 1: name_of_folder = "en_supreme_court_r_v2" filename = "Test_Judge.csv" pathLoad = r"C:\Users\mauro\Desktop\LawProject" language = "en" delimiter = "\t" column_name = "text" elif number == 4: name_of_folder = "de_StR_r" filename = "BGH_df_2019-12-13_strafrecht.csv" pathLoad = ( r"C:\Users\mauro\OneDrive\Dokumente\Python_Scripts\LawProject\openlegaldata" ) language = "de" delimiter = "," column_name = "content" elif number == 5: name_of_folder = "de_Zivil_r" filename = "BGH_df_2019-12-13_zivilrecht.csv" pathLoad = ( r"C:\Users\mauro\OneDrive\Dokumente\Python_Scripts\LawProject\openlegaldata" ) language = "de" delimiter = "," column_name = "content" elif number == 6: name_of_folder = "de_en" filename = "europarl-v7.de-en.de" pathLoad = r"C:\Users\mauro\Desktop\LawProject" language = "de" elif number == 7: name_of_folder = "de_en" filename = "europarl-v7.de-en.en" pathLoad = r"C:\Users\mauro\Desktop\LawProject" language = "en" elif number == 2: name_of_folder = "de_RCv2_skip" filename = "german_RCv2.csv" pathLoad = ( r"C:\Users\mauro\OneDrive\Dokumente\Python_Scripts\LawProject\reuters" ) language = "de" delimiter = ";" column_name = "text" elif number == 3: name_of_folder = "en_RCv1_skip" filename = "reuters_RCv1.csv" pathLoad = ( r"C:\Users\mauro\OneDrive\Dokumente\Python_Scripts\LawProject\reuters" ) language = "en" delimiter = ";" column_name = "text" elif number == 10: name_of_folder = "de_BGH_r" filename = "BGH_df_2019-12-13.csv" pathLoad = r"C:\Users\maurol\OneDrive\Dokumente\Python_Scripts\LawProject\openlegaldata" language = "de" delimiter = "," column_name = "content" corpus_info = {} corpus_info["name_of_folder"] = name_of_folder corpus_info["filename"] = filename corpus_info["pathLoad"] = pathLoad corpus_info["language"] = language corpus_info["delimiter"] = delimiter corpus_info["column_name"] = column_name corpus_info["document_level"] = document_level return corpus_info
def get_fuel(mass): """Return complete fuel needed""" out = 0 fuel = mass // 3 - 2 while fuel > 0: out += fuel fuel = fuel // 3 - 2 return out
def resolve(ctx, token): """Resolve token (var or plain object) in current context and return it's value. """ return token(ctx) if callable(token) else token
def deal_deck(Nplayers, deck): """Give each player a card""" players = [[] for i in range(Nplayers)] #create a list of lists to represent players hands for i in range(len(deck)): players[i % Nplayers].append(deck[i]) #iterate through deck, giving a card to each player return players
def overlapper(sequence, kmersize): """ take a fasta sequence and kmersize, return the sequence that overlaps from the end to the beginning required for complete k-mer counting """ end = sequence[-(kmersize - 1):] beginning = sequence[:kmersize - 1] return end + beginning
def is_lower_case_letter(string): """Function to test whether a string is a single lower-case letter""" if not string or string is None: return False if len(string) == 1 and string.islower(): return True return False
def Kvec(k, l, m): """ Returns magnitude of wavenumber vector """ return k**2 + l**2 + m**2
def check_add_driver_args(cmd_list, conf): """ DRC driver takes a lot of arguments, which are encoded via driver_args sublist and passed as a comma sepratated list of tuples <k>:<v> """ args = "" if "driver_args" in conf: for k, v in conf["driver_args"].iteritems(): args = "{args},{key}:{value}".format( args=args, key=k, value=v) args = args[1:] # remove preceeding comma cmd_list += ["-p", "%s=%s" %("driver_args", args)] return cmd_list
def sortKSUID(ksuidList): """ sorts a list of ksuids by their date (recent in the front) """ return sorted(ksuidList, key=lambda x: x.getTimestamp(), reverse=False)
def convert_to_babylonian_time(seconds): ############################################################################### """ Convert time value to seconds to HH:MM:SS >>> convert_to_babylonian_time(3661) '01:01:01' """ hours = int(seconds / 3600) seconds %= 3600 minutes = int(seconds / 60) seconds %= 60 return "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds)
def convert_bytes(num): """ this function will convert bytes to MB.... GB... etc """ step_unit = 1000.0 # 1024 bad the size for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < step_unit: return "%3.1f %s" % (num, x) num /= step_unit
def flatten_lists(dict_item): """Flatten lists in dictionary values for better formatting.""" flat_dict = {} for k, v in dict_item.items(): flat_dict[k] = ",".join(v) if type(v) is list else v return flat_dict
def round_to_full_hour(time_step: int, base=3600) -> int: """Returns given `time_step` and rounds it to nearest full hour""" return base * round(time_step / base)
def calculate_calibrated_value(image_mean, vector): """ Solves the calibration equation that finds the optimal low bound value for the saturation and value. :param image_mean: the mean if the image of which :param vector: the dictionary containing the coefficients and group mean. Calculated using Color HSVCalibration :return: the optimal low bound """ data_mean = vector['mean'][0] z_mean = data_mean[0] * vector['coefficient1'] + data_mean[1] * vector['coefficient2'] return (z_mean - (image_mean * vector['coefficient1'])) / vector['coefficient2']
def chan_expr(printer, ast): """Prints a channel expression.""" name_str = ast["name"] exprs_str = ''.join(map(lambda index: f'[{printer.ast_to_string(index)}]', ast["indices"])) return f'{name_str}{exprs_str}'
def compute_tag_stats(all_tags, segdata): """ Counts occurrence of all considered tags Parameters ---------- all_tags: Considered tags segdata: List of segdata used Returns ------- A dict indexed by tag name with tuples containing 0 and 1 occurrence count. """ stats = {} for i, tag in enumerate(all_tags): count0 = 0 count1 = 0 for data in segdata: for t, v in data["tag"]: if t == tag: if v > 0: count1 += 1 else: count0 += 1 stats[tag] = ( count0, count1, ) return stats
def bisect_right(a, x, lo=0, hi=None, key=None): """Return the index where to insert item x in list a, assuming a is sorted. The return value i is such that all e in a[:i] have e <= x, and all e in a[i:] have e > x. So if x already appears in the list, a.insert(x) will insert just after the rightmost x already there. Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. """ if key is None: key = lambda x: x if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(a) while lo < hi: mid = (lo+hi)//2 if key(x) < key(a[mid]): hi = mid else: lo = mid+1 return lo
def remove_by_index(config_pool, index): """ remove the selected configuration """ for config in config_pool: if config.index == index: config_pool.remove(config) break return config_pool
def SPOT_time(tp, interval): """ Determines if the timeperiod is valid for generating an ITS Spot message :param tp: float of current time period :param interval: float of the frequency for checking ITS Spot behavior record triggers :return: True is the tp is valid, otherwise False """ l = [str(x) for x in range(0, 10, int(str(interval)[-1]))] if str(tp)[-1] in l: return True return False
def check_compatibility(seq1,seq2): """Function that takes as input two DNA sequence and checks whether their alphabets have at least one element in common. This due to an old bug in edlib""" for base in seq1: for base2 in seq2: if base == base2: return(True) return(False)
def question2_1(input_list): """Remove duplicates from an unsorted list""" # easy method - call pythons set() function # Attach each item as input into a list. Check list for each new item cleaned_list = list() for letter in input_list: if letter in cleaned_list: pass else: cleaned_list.append(letter) return cleaned_list