content
stringlengths
42
6.51k
def _board_to_host_link(daq_config: dict, board: int, add_crate=True) -> str: """Parse the daq-config to get the host, link and crate""" for bdoc in daq_config['boards']: try: if int(bdoc['board']) == board: res = f"{bdoc['host']}_link{bdoc['link']}" if add_crate: res += f"_crate{bdoc['crate']}" return res except KeyError: raise ValueError(f'Invalid DAQ config {daq_config} or board {board}') # This happens if the board is not in the channel map which might # happen for very old runs. return 'unknown'
def prod(*args: int) -> int: """Returnthe product of `args`""" result = 1 for e in args: result *= e return result
def pad_to_max_seq_length(ls, max_seq_length, pad_idx=0, pad_right=True, check=True): """Apply padding to an input sequence. Args: ls: sequence to pad. max_seq_length: max length up to which to apply padding. pad_idx: element to use for padding. pad_right: True if padding is applied to right side of sequence, False to pad on left side. check: True if result length should be checked as under the max sequence length. Returns: Sequence with specified padding applied. """ padding = [pad_idx] * (max_seq_length - len(ls)) if pad_right: result = ls + padding else: result = padding + ls if check: assert len(result) == max_seq_length return result
def _extract_prop_option(line): """ Extract the (key,value)-tuple from a string like: >>> "option foobar 123" :param line: :return: tuple (key, value) """ line = line[7:] pos = line.find(' ') return line[:pos], line[pos + 1:]
def string_ijk_for_extent_kji(extent_kji): """Returns a string showing grid extent in simulator protocol, from data in python protocol.""" return '[{:}, {:}, {:}]'.format(extent_kji[2], extent_kji[1], extent_kji[0])
def get_ctypes(ColumnsNamesType): """ in: ["Col1:Text", "Col2:INT"] out: { Col1: TEXT, Col2: INT } """ ctypes = {} for col in ColumnsNamesType: splits = col.split(":") cname = splits[0] ctype = splits[1] ctype = ctype.upper() ctypes[cname] = ctype return ctypes
def asDict(value, isRoot=True): """Answers the value as dict as root. If the value itself is not a dict, answer it as dict(value=value). For lower levels than root, answer the plain value if is it a string or a number. Basic classed don't get translated when not called as root. All other objects are called by value.asDict() If the object cannot handle that method, then convert it to string.""" d = {} if isinstance(value, dict): for key, v in value.items(): d[key] = asDict(v, False) elif isinstance(value, (int, float, str)): if isRoot: d = dict(value=value) else: d = value # On lower levels than root, just copy the value, instead of making dict. elif isinstance(value, (list, tuple)): l = [] if isRoot: d = dict(value=l) # Always answer a dict as root else: d = l # Otherwise answer the plain value. for v in value: l.append(asDict(v, False)) elif hasattr(value, 'asDict'): d = value.asDict() else: d = dict(value=str(value)) return d
def get_epsiode_ids(episode_metas): """ Creates a list of all IMDB ID for the epsiodes inside of a list of episode metadatas. """ ids = [] for ep in episode_metas: ids.append(ep['imdb_id']) return ids
def decode_word(byte1, byte2): """Bit-rotate 0xAB 0xCD into 0xDA 0xBC and bit-invert result""" word = (byte1 * 0x100 + byte2) ^ 0xffff rotated = word // 0x10 + (word % 0x10) * 0x1000 return rotated // 0x100, rotated % 0x100
def compute_new_pos(m,n,row,col,R,edge_row,edge_col,location): """computes new position for a particular element at row,col by shifting the m x n matrix with row_col by R location can be either "top","bottom","left","right" m x n is the dimension of the box in focus""" new_col = 0 new_row = 0 col1= col row1 = row while R > 0: if location == "top": #new_col = R - col if R > col - edge_col: R = R - (col - edge_col) col = edge_col row = row location = "left" #move left <= else: new_col = col - R new_row = row R = 0 elif location == "left": if R > (edge_row + m) - row: R = R - ((edge_row + m) - row) row = edge_row + m col = col location = "bottom" #move down else: new_row = R + row new_col = col R = 0 elif location == "bottom": if R > (edge_col + n) - col: R = R - ((edge_col + n) - col) col = (edge_col + n) row = row location = "right" #move right => else: new_col = R + col new_row = row R = 0 elif location == "right": if R > row - edge_row: R = R - (row - edge_row) row = edge_row col = col location = "top" #move up else: new_row = row - R new_col = col R = 0 ## print row,col,new_row,new_col,edge_row,edge_col,location,m,n return [new_row,new_col]
def all_are_none(*args) -> bool: """ Return True if all args are None. """ return all([arg is None for arg in args])
def get_resource_string(arn): """ Given an ARN, return the string after the account ID, no matter the ARN format. Arguments: arn: An ARN, like `arn:partition:service:region:account-id:resourcetype/resource` Return: String: The resource string, like `resourcetype/resource` """ split_arn = arn.split(":") resource_string = ":".join(split_arn[5:]) return resource_string
def parse_nuclide_str(nuclide: str) -> str: """ Parses a nuclide string from e.g. '241Pu' or 'Pu241' format to 'Pu-241' format. Note this function works for both radioactive and stable nuclides. Parameters ---------- nuclide : str Nuclide string. Returns ------- str Nuclide string parsed in symbol - mass number format. Examples -------- >>> rd.utils.parse_nuclide_str('222Rn') 'Rn-222' >>> rd.utils.parse_nuclide_str('Ca40') 'Ca-40' """ letter_flag, number_flag = False, False for char in nuclide: if char.isalpha(): letter_flag = True if char.isdigit(): number_flag = True if letter_flag and number_flag: break if not (letter_flag and number_flag) or len(nuclide) < 2 or len(nuclide) > 7: raise ValueError(str(nuclide) + " is not a valid nuclide string.") while nuclide[0].isdigit(): # Re-order inputs e.g. 99mTc to Tc99m. nuclide = nuclide[1:] + nuclide[0] if nuclide[0] in ["m", "n"]: nuclide = nuclide[1:] + nuclide[0] for idx in range(1, len(nuclide)): # Add hyphen e.g. Tc99m to Tc-99m. if nuclide[idx].isdigit(): if nuclide[idx - 1] != "-": nuclide = f"{nuclide[:idx]}-{nuclide[idx:]}" break return nuclide
def rgb2hex(r: int, g: int, b: int) -> str: """Convert RGB to HEX""" return "#{:02x}{:02x}{:02x}".format(r, g, b)
def format_logging(message): """Format log messages.""" if "n_documents" in message: n = message.split("n_documents: ")[1] return "Number of documents: {}".format(n) elif "vocab_size" in message: n = message.split("vocab_size: ")[1] return "Number of types: {}".format(n) elif "n_words" in message: n = message.split("n_words: ")[1] return "Number of tokens: {}".format(n) elif "n_topics" in message: n = message.split("n_topics: ")[1] return "Number of topics: {}".format(n) elif "n_iter" in message: return "Initializing topic model..." elif "log likelihood" in message: iteration, _ = message.split("> log likelihood: ") return "Iteration {}".format(iteration[1:]) else: return message
def get_census_var_string(census_vars): """ Generates a formatted string of the variables to query for :param census_vars: a list of variables to join :return: a string containing the variables delimited by commas """ if len(census_vars) == 1: return census_vars delimiter = "," return delimiter.join(census_vars)
def compare_versions(v1: str, v2: str) -> int: """Compare two semver-style version strings, returning -1 if v1 < v2; +1 if v1 > v2; or 0 if the two version strings are equal.""" parts1 = v1.split(".") parts2 = v2.split(".") # Zero-fill the parts to the same length length_difference = abs(len(parts1) - len(parts2)) if length_difference > 0: if len(parts1) < len(parts2): parts1.extend(["0"] * length_difference) elif len(parts2) < len(parts1): parts2.extend(["0"] * length_difference) # Compare each part for part1, part2 in zip((int(x) for x in parts1), (int(x) for x in parts2)): if part1 < part2: return -1 elif part1 > part2: return 1 return 0
def typeName(ty): """ Return the name of a type, e.g.: typeName(int) => 'int' typeName(Foo) => 'Foo' typeName((int,str)) => 'int or str' @param ty [type|tuple of type] @return [str] """ if isinstance(ty, tuple): return " or ".join(t.__name__ for t in ty) else: return ty.__name__
def backtrack(node): """Iterate back through each path node to return a list of move positions""" steps = [] while node is not None: steps.append(node.position) node = node.parent return steps[::-1]
def needs_quote(arg): """Return True if the given string needs to be shell-quoted. Quoting is need if any of the following are found: * a double quotation mark (") * a single quotation mark (') * whitespace """ for c in arg: if c in ('"', "'"): return True if c.isspace(): return True else: return False
def get_search_threshs(word_counts, upper_thresh, lower_thresh): """Clips the thresholds for binary search based on current word counts. The upper threshold parameter typically has a large default value that can result in many iterations of unnecessary search. Thus we clip the upper and lower bounds of search to the maximum and the minimum wordcount values. Args: word_counts: list of (string, int) tuples upper_thresh: int, upper threshold for binary search lower_thresh: int, lower threshold for binary search Returns: upper_search: int, clipped upper threshold for binary search lower_search: int, clipped lower threshold for binary search """ counts = [count for _, count in word_counts] max_count = max(counts) min_count = min(counts) if upper_thresh is None: upper_search = max_count else: upper_search = max_count if max_count < upper_thresh else upper_thresh if lower_thresh is None: lower_search = min_count else: lower_search = min_count if min_count > lower_thresh else lower_thresh return upper_search, lower_search
def getAsciiFileExtension(proxyType): """ The file extension used for ASCII (non-compiled) proxy source files for the proxies of specified type. """ return '.proxy' if proxyType == 'Proxymeshes' else '.mhclo'
def strip_uri_host_and_path(concept_uri): """remotes the host and path from a URI, returning only the final resource name """ if concept_uri is None: return None rightmost_slash_position = concept_uri.rfind('/') rightmost_colon_position = concept_uri.rfind(':') simplified_start = max(0, rightmost_slash_position + 1, rightmost_colon_position + 1) return concept_uri[simplified_start:]
def PNT2TidalOcto_Tv14(XA,beta0PNT=0): """ TaylorT2 0PN Octopolar Tidal Coefficient, v^14 Timing Term. XA = mass fraction of object beta0PNT = 0PN Octopole Tidal Flux coefficient """ return (4)/(3)*(520+beta0PNT)-(2080*XA)/(3)
def get_data_type(data): """Returns a string representation of input argument type""" return str(type(data))
def to_correct_color(value): """converts int to string""" return 'success' if value==True else 'danger'
def overlap(tag, srl_args): """Checks if a tag is in the set of SRL tags to include in the textpiece. :param tag (str): a pos tag from SRL output, e.g. 'B-V'. :param srl_args (list): a list of SRL tags to include in the textpiece set in the config, e.g. ['V', 'A1']. :return (bool): a boolean indicating if tag is in srl_args. """ flag = False if srl_args == 'all': if tag != 'O': flag = True else: tag = tag.split('-') for srl_arg in srl_args: if srl_arg in tag: flag = True break return flag
def createRegionStr(chr, start, end=None): """ Creating "samtools"-style region string such as "chrN:zzz,zzz,zzz-yyy,yyy,yyy". If end is not specified, it will create "chrN:xxx,xxx,xxx-xxx,xxx,xxx". :param chr: :param start: :param end: :return: A string of samtool-style region """ if end == None: return str(chr) + ":" + str(int(start)) + "-" + str(int(start)) elif end is not None: return str(chr) + ":" + str(int(start)) + "-" + str(int(end))
def _get_marker_param(params): """Extract marker id from request's dictionary (defaults to None).""" return params.pop('marker', None)
def increment(string): """Add 1 to the int in that string >>> increment('1') == '2' True """ return str(int(string) + 1)
def mergeSPDX(spdxA: str, spdxB: str) -> str: """Combine the spdx ids. Args: spdxA (str): spdx of the first license spdxB (str): spdx of the second license Returns: str: combined spdx """ if len(spdxA) == 0: return spdxB if len(spdxB) == 0: return spdxA return spdxA + "+" + spdxB
def find_old_backup(bak_dir_time_objs,recurse_val = 0): """ Find oldest time object in "bak_dir_time_objs" structure. recurse_val = 0 -> start with top entry "year", default """ tmp = [] for timeobj in bak_dir_time_objs: tmp.append(timeobj[recurse_val]) min_val = min(tmp) # find minimum time value new_timeobj = [] for timeobj in bak_dir_time_objs: if(timeobj[recurse_val] == min_val): new_timeobj.append(timeobj) if (len(new_timeobj) > 1): return find_old_backup(new_timeobj,recurse_val+1) # recursive call from year to minute else: return new_timeobj[0]
def prepare_malware_keywords(malware_keywords): """ Turns the malware_keywords to lower case and return the keywords dictionary so it is case insensitive args malware_keywords -- the keywords of malwares to the number of times they appear """ _lower_mk = {} for k,v in malware_keywords.items(): _lower_mk[k.lower()] = v; return _lower_mk
def validate_positional_constraint(positional_constraint): """ Validate positional constraint for ByteMatchStatement Property: ByteMatchStatement.PositionalConstraint """ VALID_POSITIONAL_CONSTRAINTS = ( "CONTAINS", "CONTAINS_WORD", "ENDS_WITH", "EXACTLY", "STARTS_WITH", ) if positional_constraint not in VALID_POSITIONAL_CONSTRAINTS: raise ValueError( "ByteMatchStatement PositionalConstraint must be one of: %s" % ", ".join(VALID_POSITIONAL_CONSTRAINTS) # NOQA ) return positional_constraint
def __format_chrom(chrom): """ @abstract Format chrom name to keep the chroms used in this script in the same style @param chrom Chrom name [str] @return Formatted chrom name [str] """ return chrom[3:] if chrom.startswith("chr") else chrom
def get_attribute(obj, attr): """ Return object attribute value, if it exists. Arguments: obj (object): The object. attr (str): The name of the object attribute. """ at = getattr(obj, attr, None) if at: return at else: return None
def _countResidueAtoms(elements): """Count the number of atoms of each element in a residue.""" counts = {} for element in elements: if element in counts: counts[element] += 1 else: counts[element] = 1 return counts
def _hex_to_bgr(color): """Convert hex color to BGR""" # Check for the right format of hex value if len(color) != 6: raise ValueError("Hex color value {} invalid".format(color)) # Convert to BGR try: return tuple(int(color[i : i + 2], 16) for i in (4, 2, 0)) except ValueError: raise ValueError("Hex color value {} invalid".format(color))
def validate(attrs): """ No-op function which will validate the client-side data. Werkzeug will throw an exception if you try to access an attribute that does not have a key for a MultiDict. """ try: #required_attributes = ('qquuid', 'qqfilename') #[attrs.get(k) for k,v in attrs.items()] return True except Exception as e: return False
def minbias_jet_weight_true(x): """ reweights corrected jet pt from minbias to mock up triggered spectrum """ return 1.149 - 0.2655*x + 0.01857*(x**2) - 0.0003445*(x**3)
def bmi(height, weight): """Returns the BMI of a client with the given values. Formula used is: BMI = (weight * 703) / height ^2 """ return round(((weight * 703) / height**2), 1)
def _is_dim(obj): """Return True if the object is a shape dimension.""" return (isinstance(obj, tuple) and len(obj) == 2 and isinstance(obj[0], int) and isinstance(obj[1], int))
def dms(d,m,s): """dms - degrees from degrees, minutes, seconds""" return d + m/60.0 + s/3600.0
def get_track_words(words_per_track,hour_count,lst): """ read a list with words in gn """ i = hour_count * words_per_track j = i + words_per_track - 1 return lst[i:j]
def merge(dst, src, path=None, abort_conflict=False): """ Deep merges dictionary object b into a. :param dst: :param src: :return: """ if dst is None: return None if src is None: return dst if path is None: path = [] for key in src: if key in dst: if isinstance(dst[key], dict) and isinstance(src[key], dict): merge(dst[key], src[key], path + [str(key)], abort_conflict) elif dst[key] == src[key]: pass # same leaf value elif abort_conflict: raise ValueError('Conflict at %s' % '.'.join(path + [str(key)])) else: dst[key] = src[key] else: dst[key] = src[key] return dst
def unique(seq): """Return True if there are no duplicate elements in sequence seq.""" for i in range(len(seq)): for j in range(i+1, len(seq)): if seq[i] == seq[j]: return False return True
def writeTerm(index): """ Writes the basis function term with the given index """ global coeffs coeffs = [] eqtn = "" term_index = 0 for i in range(1, 9): for j in range(1, 13): # 12 term_index += 1 coeffs.append([i, j / 8.0, 0]) if index == term_index: eqtn = eqtn + " %s^%d * %s^%.3f" % ("D", i, "T", j / 8.0) for i in range(1, 6): for j in range(1, 24): # 24 term_index += 1 coeffs.append([i, j / 8.0, 1]) if index == term_index: eqtn = eqtn + " %s^%d * %s^%.3f * exp(-D^1)" % ("D", i, "T", j / 8.0) for i in range(1, 6): for j in range(1, 30): # 24 term_index += 1 coeffs.append([i, j / 8.0, 2]) if index == term_index: eqtn = eqtn + " %s^%d * %s^%.3f * exp(-D^2)" % ("D", i, "T", j / 8.0) for i in range(2, 5): for j in range(24, 38): # 38 term_index += 1 coeffs.append([i, j / 2.0, 3]) if index == term_index: eqtn = eqtn + " %s^%d * %s^%.3f * exp(-D^3)" % ("D", i, "T", j / 8.0) return eqtn
def is_title(line): """ Checks if a line contains a Wiki title Returns -------- bool True if the line contains a title False if the line does not contains a title """ if len(line) > 3 and line[-2:] == "==": return line.replace("=", "")[:-1] return None
def overlap(a1, a2, b1, b2): """Test whether two value ranges overlap. This function is typically used with date values, but it also works with integers or other comparable values. The following examples use integer values to be more readable. Unlike the test presented at <http://bytes.com/topic/python/answers/457949-determing-whether-two-ranges-overlap>, this works also with "open" ranges (the open end being indicated by a `None` value). Types of constellations:: - o---o o---o - o---o o---> - <---o o---o - <---o o---> - o-------------> o---o - o---o o---o - o---o o---> - <---------o o---o >>> overlap(1,2,3,4) False >>> overlap(3,4,1,2) False >>> overlap(1,3,2,4) True >>> overlap(2,4,1,3) True >>> overlap(1,4,2,3) True >>> overlap(2,3,1,4) True >>> overlap(1,None,3,4) True >>> overlap(3,4,1,None) True >>> overlap(1,2,3,None) False >>> overlap(3,None,1,2) False >>> overlap(None,2,3,None) False >>> overlap(3,None,None,2) False >>> overlap(1,3,2,None) True Ranges that "only touch" each other are not considered overlapping: >>> overlap(1,2,2,3) False >>> overlap(2,3,1,2) False """ #~ return a2 > b1 and a1 < b2 if a2: if b1: if b1 >= a2: return False else: if b2 and a1: if a1 > a2: raise ValueError("Range 1 ends before it started.") return b2 > a1 else: return True else: if b2 and a1: return b2 >= a1 else: return True elif b2: if a1: return b2 > a1 else: return True return True
def _elem2idx(list_of_elems, map_func): """ :param list_of_elems: list of lists :param map_func: mapping dictionary :returns list with indexed elements """ return [[map_func[x] for x in list_of] for list_of in list_of_elems]
def subnet_id_lookup(session, subnet_domain): """Lookup the Id for the Subnet with the given domain name. Args: session (Session|None) : Boto3 session used to lookup information in AWS If session is None no lookup is performed subnet_domain (string) : Name of Subnet to lookup Returns: (string|None) : Subnet ID or None if the Subnet could not be located """ if session is None: return None client = session.client('ec2') response = client.describe_subnets(Filters=[{"Name": "tag:Name", "Values": [subnet_domain]}]) if len(response['Subnets']) == 0: return None else: return response['Subnets'][0]['SubnetId']
def make_rpc_name(name: str) -> str: """ Convert python compatible name to Transmission RPC name. """ return name.replace("_", "-")
def count_string(s_string): """ >>> s = '""' >>> count_string(s) 2 >>> s = '"abc"' >>> count_string(s) 2 """ s_value = eval(s_string) return len(s_string) - len(s_value)
def _strip_after_new_lines(s): """Removes leading and trailing whitespaces in all but first line.""" lines = s.splitlines() if len(lines) > 1: lines = [lines[0]] + [l.lstrip() for l in lines[1:]] return '\n'.join(lines)
def find_primes_sieve(upper_bound): """ Returns all primess up to upper_bound (exclusive) using the sieve of Eratosthenes. The numbers are returned as a list. Example: find_primes(7) -> [2, 3, 5] """ # 1 marks a potential prime number, 0 marks a non-prime number # indexes 0 and 1 are not touched is_prime = [1] * upper_bound # list of found primes primes = [] for n in range(2, upper_bound): if not is_prime[n]: # n was marked as non-prime continue # n is a prime number primes.append(n) # mark all multiples of n as non-prime # this is only necessary up to sqrt(n) because all numbers p = n * o # with n > sqrt(n) also the divisor o < sqrt(n) if n * n <= upper_bound: m = 2 * n while m < upper_bound: is_prime[m] = 0 m += n return primes
def swap(arr, axis): """ swap arr[axis] and arr[-1] """ return arr[:axis] + [arr[-1]] + arr[axis+1:-1] + [arr[axis]]
def simplify_mod_product(base: int, factor: int, value: int) -> int: """ Assuming (factor * k) % base == value, return v such that k % base == v Also assumes that base is prime. There seem to be efficient ways to compute this result (https://en.wikipedia.org/wiki/Modular_arithmetic#Properties) but the numbers in this problem are relatively small so brute forcing it is fast enough. This function's big O runtime is linear to the magnitude of each base, which is several hundred. """ return next(i for i in range(base) if factor * i % base == value)
def pad(value, length=2): """ Adds leading and trailing zeros to value ("pads" the value). >>> pad(5) 05 >>> pad(9, 3) 009 :param int value: integer value to pad :param int length: Length to pad to :return: string of padded value :rtype: str """ return "{0:0>{width}}".format(value, width=length)
def kgraph_is_weighted(message): """Check whether knowledge graph edges have weights. Only valid if message has local knowledge graph. """ return all('weight' in edge for edge in message['knowledge_graph']['edges'])
def isNan(value): """ Por equivalencia np.NaN != np.NaN por lo tanto comparando el valor consigo mismo obtendremos siempre true, salvo en los casos que tengamos NaN. :param value: Valor a validar :return: True en caso de que el valor introducido sea un nulo o np.NaN. """ if value == value: return False else: return True
def arrayizeDict(g): """Transforms a dict with unique sequential integer indices into an array""" mk = max(g.keys()) ga = [None] * mk for k, v in g.items(): ga[k - 1] = v return ga
def rounder(delta): """ Method to obtain number of decimal places to report on plots Args: delta: (float), a float representing the change in two y values on a plot, used to obtain the plot axis spacing size Return: (int), an integer denoting the number of decimal places to use """ if 0.001 <= delta < 0.01: return 3 elif 0.01 <= delta < 0.1: return 2 elif 0.1 <= delta < 1: return 1 elif 1 <= delta < 100000: return 0 else: return 0
def sort_wc(w_c, sort_key): """Sorts the dictionary and returns sorted dictionary Args; dictionary, 0 or 1 0 - sort by key 1 - sort by value Return sorted dictionary """ sorted_w_c = {} # sorted is a built in function and returns a sorted list # if sort_key is 1 - sort on value in the dictionary # if sort_key is 0 - sort on key in the dictionary if sort_key == 1: sorted_list = sorted(w_c, key=w_c.get, reverse = True) else: sorted_list = sorted(w_c, reverse = True) #build the sorted dictionary for word in sorted_list: sorted_w_c[word] = w_c[word] return(sorted_w_c)
def default_grouping(datasets, date_field=None): """ Heuristic for default boxplot grouping """ if datasets > 20 and date_field: # Group all data by year return "year" elif datasets > 10 or not date_field: # Compare series but don't break down by year return "series" else: # 10 or fewer datasets, break down by both series and year return "series-year"
def integer_to_letter(i): """ Return the capital letter associated with the input integer (1=A, 2=B, etc.) :param: i The integer to change to letter :return: str The capital letter as a string """ return chr(i + 64)
def to_number(mapping, bind, value): """ Transforms value to number """ if value: return float(value) return None
def preprocess_filters(documents: list) -> list: """ Preprocesses a specific format of filters Each clause (split by AND) of a filter becomes a separate filter Also include the combined set of filters (combined with AND) :param documents: The list of documents with filters :return: The updated list of documents """ for document in documents: if 'filters' not in document: continue filters = [] for value in document['filters']: clauses = value.split(' AND ') filters += clauses if len(clauses) > 1: filters.append(value) if len(document['filters']) > 1: filters.append(' AND '.join(document['filters'])) document['filters'] = filters return documents
def most_affected_area(affected_areas_count): """Find most affected area and the number of hurricanes it was involved in.""" max_area = 'Central America' max_area_count = 0 for area in affected_areas_count: if affected_areas_count[area] > max_area_count: max_area = area max_area_count = affected_areas_count[area] return max_area, max_area_count
def sort_results(p_tuples): """Sort the results according to stats.""" # Scaling the number of backlinks p_by_backlinks = sorted(p_tuples, key=lambda popularity: popularity.backlinks, reverse=True) for index, p_info in enumerate(p_by_backlinks): p_info.backlinks = 1 / (float(index) + 1) # Scaling the number of clicks p_by_clicks = sorted(p_by_backlinks, key=lambda popularity: popularity.clicks, reverse=True) for index, p_info in enumerate(p_by_clicks): p_info.clicks = 1 / (float(index) + 1) # Scaling the number of Tor2web p_by_tor2web = sorted(p_by_clicks, key=lambda popularity: popularity.tor2web, reverse=True) for index, p_info in enumerate(p_by_tor2web): p_info.tor2web = 1 / (float(index) + 1) p_by_sum = sorted(p_by_tor2web, key=lambda popularity: popularity.sum(), reverse=True) answer = [] for p_info in p_by_sum: answer.append(p_info.content) return answer
def boardToArray(board): """ Function that converts a board (in FEN notation) in a list of lists of lines of the chess board Parameters : -board : the current board Returns : -arrayBoard : a list composed of lists(=lines) of the strings(=pieces) of the boards """ #replacement of the number in the string by the equivalent number of dashes board = board.replace("1","-").replace("2","--").replace("3","---").replace("4","----")\ .replace("5","-----").replace("6","------").replace("7","-------").replace("8","--------") currentLine=[] listBoard=[] #reads the board string char by char in a 'for' loop for i,char in enumerate(board): #When a space char ' ' is read, the loop is stopped and the last read line is #added to the list of lines if(char==' '): listBoard.append(currentLine) break #When a slash '/' is read (= every multiple of 9 char), a new list is created, which will contain the #next line to add and the last read line is added to the list of lines if((i+1)%9==0): listBoard.append(currentLine) currentLine=[] #Adds the current char in the current list of char (= the current line) else: currentLine.append(char) return(listBoard)
def fit(text, length, orientation="l", fillCharakter=" ", endCharakter=" ..."): """Return input text, but cut to a maximum length""" text, length, fillCharakter = str(text), int(length), str(fillCharakter) if len(text) <= length: orientation = orientation.lower()[0] if orientation == "c": leftLen = (length - len(text)) // 2 rightLen = length - len(text) - leftLen return "{}{}{}".format(leftLen * fillCharakter, text, rightLen * fillCharakter) elif orientation == "r": return "{}{}".format((length - len(text)) * fillCharakter, text) else: return "{}{}".format(text, (length - len(text)) * fillCharakter) else: return "{}{}".format(text[0:length-len(endCharakter)], endCharakter)
def _simplifiedSV(x, f, k): """ fitting function according to the common two site model. In general, x represents the pO2 or pCO2 content, whereas m, k and f are the common fitting parameters :param x: list :param k: np.float :param f: np.float :return: iratio: normalized signal i0/i """ return 1 / (f / (1. + k*x) + (1.-f))
def get_reciprocal(n: int) -> list: """ Returns the reciprocal of a number. :param n: The number to find the reciprocal of. :return: First n-non-repeating digits reciprocal. """ # quotient = [0, 0, 1, 2, 3, 4] = 0.001234 quotient: list = list() remainders: list = list() dividend = 1 repeats: bool = False while not repeats: # Carry over if n > dividend: quotient.append(0) dividend = dividend * 10 remainders.append(dividend) # Divide if dividend % n == 0: quotient.append(dividend // n) repeats = True # Divide and calculate remainder elif n < dividend: quotient.append(dividend // n) dividend = dividend % n * 10 if dividend in remainders: repeats = True remainders.append(dividend) # Remove first 0 quotient.pop(0) return quotient
def ni_to_hr(ni, f): """Calculate heart rate in beat/min from estimated interval length Args: ni (int): estimated inter-beat interval length f (float): in Hz; sampling rate of input signal Returns: float: heart rate in beat/min """ if ni == -1: return -1 return 60. * f / ni
def check_config(cred): """ Check whether the credentials are valid or not. Args: ----- :cred : dict Credentials dictionary Raises: ------- None Returns: -------- TYPE: Bool True if valid else False """ for key in cred: if cred[key] == "XXXX": return False return True
def get_labels(label_intervals): """ Calculates each interval performance. :param label_intervals: List of time series. :return: - labels: The performance related to each element in the input, calculated as the subtraction between its last and first element. """ labels = [] for data_ in label_intervals: labels.append(data_[-1] - data_[0]) return labels
def calculateChecksum(message): """ Calculates the checksum of a raw message. :param message: str :return: int """ checksum = sum(ord(ch) for ch in message) return checksum % 256
def camel_case_to_capitalized_spaced(string): """ Split string by upper case letters. F.e. useful to convert camel case strings to underscore separated ones. @return words (list) """ words = [] from_char_position = 0 for current_char_position, char in enumerate(string): if char.isupper() and from_char_position < current_char_position: words.append(string[from_char_position:current_char_position].lower()) from_char_position = current_char_position words.append(string[from_char_position:].lower()) capitalized_words = [] for word in words: capitalized_word = word.upper()[0] + word.lower()[1:] capitalized_words.append(capitalized_word) return ' '.join(capitalized_words)
def sizeof_fmt(num: float, suffix: str = 'B'): """ Given `num` bytes, return human readable size. Taken from https://stackoverflow.com/a/1094933 """ for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)
def diff(a, b): """ diff(a:iterable, b: iterable) a - b; elements in a but not in b args: a: iterable, b: iterable => eg: [1,2,3], [1] return: a list of (a - b) => eg: [2,3] """ ans = [] sb = set(b) for e in a: if e not in sb: ans.append(e) return ans
def match_RNA_to_DNA(rna_dic, region_dic, max_size_th=100000): """Function to match RNA to DNA region and append new information to RNA_dic""" # initialize a dict _updated_dic = {_k:_v for _k,_v in rna_dic.items()} for _k, _rdic in _updated_dic.items(): for _rid, _region in region_dic.items(): if abs(_rdic['end'] - _rdic['start']) and \ _rdic['start'] >= _region['start'] and _rdic['start'] <= _region['end'] \ and _rdic['chr'] == _region['chr']: _updated_dic[_k]['DNA_id'] = _rid return _updated_dic
def _find_permutation_of_group(g,gp): """ :args g: Unpermuted groups :args gp: Permuted groups. :args perm: Permuted of gp. """ n = len(g) perm = [] skip = [] for i in range(n): skip.append(False) # This is just for efficiency for im in range(n): for jm in range(n): if skip[jm]: continue # This is just for efficiency if gp[jm]==g[im]: perm.append(jm) skip[jm] = True break # don't keep looking if you already found the match return perm
def wavelength_to_standard(unit, values): """converts wavelength values to standardised representation""" if unit == 'nm': converted_values = values*1e-9 elif unit == 'um': converted_values = values*1e-6 elif unit =='a': converted_values = values*1e-10 else: converted_values = values return converted_values
def bos_markup(grp=True): """Return a regular expression to match a <bos case="..."/> element. grp: boolean indicating whether to retain case value subgroup. returns: regular expression text """ return '<bos +case="({0}.+?)"/>'.format("" if grp else "?:")
def sum_array(arr): """Return a the sum of a list of numbers, minus the min and max values""" if arr and len(arr) > 1: return sum(sorted(arr)[1:-1]) return 0
def is_set_nickname(string, nickname): """ Test if this is a nickname setting message """ if string.startswith(f"{nickname} set the nickname for "): return True if string.startswith(f"{nickname} set his own nickname to"): return True if string.startswith(f"{nickname} set her own nickname to"): return True if string.startswith(f"{nickname} set your nickname to"): return True return False
def get_rsi_mom(oversold, overbought, weak, strong, lt_rsi_mom): """ Get relative strength index momentum Args: String: Oversold text String: Overbought text String: Weak text String: Long term rsi text signal Returns: String: rsi momentum text """ ret = '' if lt_rsi_mom.lower() == 'overbought': ret = overbought if lt_rsi_mom.lower() == 'oversold': ret = oversold if lt_rsi_mom.lower() == 'weak': ret = weak if lt_rsi_mom.lower() == 'strong': ret = strong return ret
def isfloat(x, num_only=False): """Returns true if the input is a float, false otherwise.""" return type(x) == float
def been_there(state, check_dict, check): """ Returns True, if the state is already visited state: a list representing the state to be checked check_dict: a dict storing the visited states check: a boolean value, if True, marks the given state as visited if it was not so """ key = str(state) if key in check_dict: return True else: if check: check_dict[key] = True return False
def to_flamegraph(stacks, counts): """ Convert the stack dumps and sample counts into the flamegraph format. Return a list of lines. """ lines = [] for id in counts: stack = ";".join(reversed(stacks[id])) count = counts[id] lines.append('{0} {1}'.format(stack, count)) return lines
def int_parameter(level, maxval): """Helper function to scale a value between 0 and maxval and return as an int. Args: level: Level of the operation that will be between [0, `PARAMETER_MAX`]. maxval: Maximum value that the operation can have. This will be scaled to level/PARAMETER_MAX. Returns: An int that results from scaling `maxval` according to `level`. """ return int(level * maxval / 10)
def join_formatted_lines(lines): """Return the finished output""" return "\n".join(lines)
def translate_move_to_coord(move): """ Translates a move on a bord into a pair of indices. For example: {'Row':A,'Column':1} ==> (0,0) :param move: a dict of the move. :return: the pair of indices. """ return ord(move['Row']) - 65, move['Column'] - 1
def get_sql_insert_value(val) -> str: """ Returns the value that would appear in a sql insert statement (i.e. string becomes 'string', None becomes NULL) Args: val: The original value. Returns: The sql insert equivalent value. """ if val is None: return "NULL" if isinstance(val, str): escaped_val = val.replace('\n', '\\n').replace("'", "''") return f"'{escaped_val}'" return str(val)
def kurtosis(N,N2,N3,N4,**kwargs): """Calculate kurtosis in data N from averages <N^4>, <N^3>, <N^2>, and <N>.""" return (-3*(N**4) + 6*(N**2)*N2 - 4*N*N3 + N4)/((N2 - (N**2))**2)
def _offer_label_composition(sku, offer_multiple): """Helper function to create offer name strings.""" sku_offer_label = "".join([sku, str(offer_multiple), "_offer"]) return sku_offer_label
def file_type(s): """ Take in a filename and determine if the extension indicates a fasta or fastq file. Arguments --------- s : str, a filename string Returns --------- out : string, either 'fasta' or 'fastq' if file has an accepted extension, or ValueError. Examples --------- >>> file_type("example_file.fasta") "fasta" >>> file_type("example_file.fa") "fasta" >>> file_type("example_file.fastq") "fastq" >>> file_type("example_file.fq") "fastq" >>> file_type("example_file.txt") ValueError: Input file must be in fasta or fastq format. Accepted file extensions: fa, fq, fasta, or fastq. """ suffix = s.split(".")[-1] if suffix == "fa" or suffix == "fasta": return "fasta" elif suffix == "fq" or suffix == "fastq": return "fastq" else: raise ValueError("File must be in fasta or fastq format. "+\ "Accepted file extensions: fa, fq, fasta, or fastq.")
def is_quoted(str): """ whether or not str is quoted """ return ((len(str) > 2) and ((str[0] == "'" and str[-1] == "'") or (str[0] == '"' and str[-1] == '"')))
def build_cmd(*args, **kwargs): """ >>> build_cmd('script.py', 'train', model_pickle='tmp.pkl', shuffle=True) 'script.py train --model_pickle "tmp.pkl" --shuffle' """ options = [] for key, value in kwargs.items(): if isinstance(value, bool): if value: options.append("--%s" % key) else: options.append("--no_%s" % key) elif isinstance(value, int) or isinstance(value, float): options.append("--%s %s" % (key, value)) else: options.append('--%s "%s"' % (key, value)) return " ".join(list(args) + options)
def _get_neighbors(size, point): """Get indices of point's neighbors in square matrix of size `size`. Unless point (i, j) is on the boundary of the size x size square, this will be a list of 4 elements. Args: size: Int. point: Tuple of ints (i, j). Must satisfy 0 <= i, j < size. Returns: neighbors: List of tuples. Length 2 (if point is a corner), 3 (if point is on an edge), or 4 (if point is in the interior). """ i, j = point neighbors = [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)] _valid_neighbor = lambda neighbor: all(0 <= x < size for x in neighbor) neighbors = list(filter(_valid_neighbor, neighbors)) return neighbors