content
stringlengths
42
6.51k
def ooi_instrument_reference_designator(reference_designator): """ Parses reference designator into a dictionary containing subsite, node, and sensor. Args: reference_designator (str): OOI Instrument Reference Designator Returns: Dictionary of the parsed reference designator """ keys = ["subsite", "node", "sensor"] val = reference_designator.split("-") values = val[:-2] + ["-".join(val[-2:])] return dict(zip(keys, values))
def last(iterable, *default): """Return the last item of an iterable or 'default' if there's none.""" try: iterable = reversed(iterable) except TypeError: pass else: return next(iterable, *default) iterable = iter(iterable) x = next(iterable, *default) for x in iterable: pass return x
def precision(TP, FP): """Precision or positive predictive value""" return (TP) / (TP + FP)
def _is_global_dataset(dataset: dict) -> bool: """Returns wether the given dataset is spotlight specific (FALSE) or non-spotlight specific (TRUE)""" return not any( [ i in dataset["source"]["tiles"][0] for i in ["{spotlightId}", "greatlakes", "togo"] ] )
def manhattan_distance(x, y): """Manhattan distance.""" return abs(0-x) + abs(0-y)
def inner2D(v1, v2): """Calculates the inner product of two 2D vectors, v1 and v2""" return v1[0]*v2[0] + v1[1]*v2[1]
def slice_double_accept(xp,x,u,step,interval,lnpdf,pdf_params): """ NAME: slice_double_accept PURPOSE: accept a step when using the doubling procedure INPUT: xp - proposed point x - current point u - log of the height of the slice step - step parameter w interval - (xl,xr) lnpdf - function that evaluates the log of the pdf pdf_params - parameters of the pdf OUTPUT: Whether to accept or not (Neal 2003) BUGS: Not as efficient as possible with lnpdf evaluations HISTORY: 2009-10-30 - Written - Bovy (NYU) """ (xl,xr) = interval d= False acceptable= True while xr-xl > 1.1*step: m= (xl+xr)*.5 if (x < m and xp >= m) or (x >= m and xp < m): d= True if xp < m: xr= m else: xl= m if d and lnpdf(xl,*pdf_params) <= u and lnpdf(xr,*pdf_params) <= u: acceptable= False break return acceptable
def contains_missing(row): """ Returns true if the row contains a missing value somewhere """ for e in row: if e == '?': return True return False
def osm_zoom_level_to_pixels_per_meter( zoom_level: float, equator_length: float ) -> float: """ Convert OSM zoom level to pixels per meter on Equator. See https://wiki.openstreetmap.org/wiki/Zoom_levels :param zoom_level: integer number usually not bigger than 20, but this function allows any non-negative float value :param equator_length: celestial body equator length in meters """ return 2.0**zoom_level / equator_length * 256.0
def _split_section_and_key(key): """Return a tuple with config section and key.""" parts = key.split(".") if len(parts) > 1: return "{0}".format(parts[0]), ".".join(parts[1:]) return "renku", key
def get_new_in_current(l_new, l_old, pattern_dict): """ Get those labels, that are supposed to be added but were already there and known :param l_new: new labels :param l_old: old labels :param pattern_dict: directory of patterns that are used to match the filenames :type l_new: list :type l_old: list :type pttern_dict: dictionary :returns: list of knwon labels to be added, that were already in the pull request :rtype: list """ ret = [] ret2 = [] for l in l_new: if l in l_old: ret.append(l) for l in ret: if l in pattern_dict: ret2.append(l) return ret2
def base64_add_padding(data): """ Add enough padding for base64 encoding such that length is a multiple of 4 Args: data: unpadded string or bytes Return: bytes: The padded bytes """ if isinstance(data, str): data = data.encode('utf-8') missing_padding = 4 - len(data) % 4 if missing_padding: data += b'=' * missing_padding return data
def strip_prefix(string, strip): """ Strips a prefix from a string, if the string starts with the prefix. :param string: String that should have its prefix removed :param strip: Prefix to be removed :return: string with the prefix removed if it has the prefix, or else it just returns the original string """ import re strip_esc = re.escape(strip) if re.match(strip_esc, string): return string[len(strip):] else: return string
def element_counts(formula_list): """ Docstring for function pyKrev.element_counts ==================== This function takes a list of formula strings and gives atomic counts for C,H,N,O,P & S. Use ---- element_counts(Y) Returns a list of len(Y) in which each element is a dictionary containing the atomic counts. Parameters ---------- Y: A list of elemental formula strings. All integers must be standard script (e.g. C6H8O7). The list should not contain isotopologues (e.g. C9H12O6 13C1) and should only contain C,H,N,O,P and S atoms. """ elements=['C','H','N','O','P','S'] count_list = [] for formula in formula_list: element_numbers = dict() for element in elements: element_numbers[element] = 0 alpha_idx = formula.find(element) #find this first instance of the element name. If the formula is given with alphabetically similar two digit element names e.g. Co before single digit names e.g. C. #it won't work. In theory this shouldn't happen because formulas should be listed alphabetically, therefore single character names will come before two character names. if alpha_idx > -1: # if the element was found in the formula element_numbers[element] = 1 #it must have at least 1 atom for i in range(alpha_idx+2,(len(formula)+1)): try: element_numbers[element] = int(formula[alpha_idx+1:i]) #Count the number of atoms belonging to this element starting with the next character, and progressively making the character window bigger except ValueError: # this occurs when you have reached the next element name break count_list.append(element_numbers) return count_list
def restore_args_with_whitespace(x): """Restore args with whitespace, to support file paths with whitespace""" y = [] for xi in x: if len(y) == 0: y.append(xi) else: if y[-1].endswith('\\'): y[-1] = y[-1][0:-1] + ' ' + xi else: y.append(xi) return y
def convert_hex_into_decimal(hex_number: str) -> int: """ Converts a hexadecimal number into a decimal :param hex_number: :return: """ return int(hex_number, 16)
def setDefault(infile, outfile, keyfile, mode): """ Sets default params for easier testing/input in command line :param infile: file to read :param outfile: file to output :param keyfile: file to read key from :param mode: mode, either encrypt or decrypt only :return: all 4 fields initialized with their default values """ plainfile = "Tux.ppm" encrypted_file = "Tux_encrypted.ppm" decrypted_file = "Tux_decrypted.ppm" if keyfile: key = open(keyfile, 'r').read() else: key = 0xFFFFFFFFFFFFFFFFFFFF if mode is None: print("mode must be 'e' or 'd'") exit(1) if mode != 'e' and mode != 'd': print("mode must be 'e' or 'd'") exit(1) if mode == 'e': if infile is None: infile = plainfile if outfile is None: outfile = encrypted_file if mode == 'd': if infile is None: infile = encrypted_file if outfile is None: outfile = decrypted_file return infile, outfile, key, mode
def checkFloat(value, default, allowNone=False): """Check if a variable is an float or a none.""" if allowNone and (value is None or value == "None"): return None try: return float(value) except Exception: return default
def collect_alignment(adict, off): """Collect alignment histogram.""" algn = 1 for ii in range(0, 5): mask = 1 << ii if off & mask: break algn *= 2 if algn in adict: adict[algn] += 1 else: adict[algn] = 1 return algn
def update_label(old_label, exponent_text): """Copied from: https://ga7g08.github.io/2015/07/22/Setting-nice-axes-labels-in-matplotlib/""" if exponent_text == "": return old_label try: units = old_label[old_label.index("[") + 1:old_label.rindex("]")] except ValueError: units = "" label = old_label.replace("[{}]".format(units), "") exponent_text = exponent_text.replace("\\times", "") return "{} [{} {}]".format(label, exponent_text, units)
def median(data): """Calculate the median of a list.""" data.sort() num_values = len(data) half = num_values // 2 if num_values % 2: return data[half] return 0.5 * (data[half-1] + data[half])
def itraceToStr( trace ): """ Concerts an instruction trace into a string """ traceStr = "" for i in trace: traceStr += "%s," % i[0] return traceStr[:-1]
def encode_varint_1(num): """ Encode an integer to a varint presentation. See https://developers.google.com/protocol-buffers/docs/encoding?csw=1#varints on how those can be produced. Arguments: num (int): Value to encode Returns: bytearray: Encoded presentation of integer with length from 1 to 10 bytes """ # Shift sign to the end of number num = (num << 1) ^ (num >> 63) # Max 10 bytes. We assert those are allocated buf = bytearray(10) for i in range(10): # 7 lowest bits from the number and set 8th if we still have pending # bits left to encode buf[i] = num & 0x7f | (0x80 if num > 0x7f else 0) num = num >> 7 if num == 0: break else: # Max size of endcoded double is 10 bytes for unsigned values raise ValueError("Out of double range") return buf[:i + 1]
def achat_submit_row(context): """ Displays the row of buttons for delete and save. """ opts = context['opts'] change = context['change'] is_popup = context['is_popup'] save_as = context['save_as'] ctx = { 'opts': opts, 'show_delete_link': False, 'show_save_as_new': False, 'show_save_and_add_another': False, 'show_save_and_continue': False, 'is_popup': False, 'show_save': True, 'preserved_filters': context.get('preserved_filters'), 'cancel_operation': True, 'search_database': True, # Searchable database } if context.get('original') is not None: ctx['original'] = context['original'] return ctx
def bibser2bibstr(bib=u'', ser=u''): """Return a valid bib.series string.""" ret = bib if ser != u'': ret += u'.' + ser return ret
def solution(number): # O(N) """ Write a function to compute the fibonacci sequence value to the requested iteration. >>> solution(3) 2 >>> solution(10) 55 >>> solution(20) 6765 """ m = { 0: 0, 1: 1 } # O(1) for i in range(2, number + 1): # O(N) m[i] = m[i - 1] + m[i - 2] # O(1) return m[number] # O(1)
def brightness_from_percentage(percent): """Convert percentage to absolute value 0..255.""" return (percent*255.0)/100.0
def getAllRootPaths(goId, goData): """ find all paths to the root for a given GO ID """ if not goId: return list() if goId not in goData: return [[goId]] res = list() for i in goData[goId]: paths = getAllRootPaths(i, goData) for path in paths: res.append([goId] + path) return res
def sh_parse_tile(s): """ Parse tile strings """ string = s.split(" ") final = [] for i in range(len(string)): final.append(max(min(int(string[i]), 63), 0)) return final
def translate(text, conversion_dict, before=None): """ Translate words from a text using a conversion dictionary Arguments: text: the text to be translated conversion_dict: the conversion dictionary before: a function to transform the input (by default it will fall back to the identity function) """ # if empty: if not text: return text # preliminary transformation: before = before or (lambda x: x) t = before(text) # print(sorted(conversion_dict.items(),key=lambda x: x[1],reverse=True)) # print(conversion_dict.items()) i = 0 out = "" while i < len(text): if text[i].isnumeric(): if (text[i:i + 4] == "2 1 ") or (text[i:i + 4] == "1 1 "): out = out + text[i:i + 3] i = i + 3 elif text[i].isnumeric(): j = 0 while text[i + j].isnumeric(): j = j + 1 out = out + str(conversion_dict.get(int(text[i:i + j]))) i = i + j else: out = out + text[i] i = i + 1 return out.split(",")
def unique_list(inp_list): """ returns a list with unique values of inp_list. :usage: >>> inp_list = ['a', 'b', 'c'] >>> unique_inp_list = unique_list(inp_list*2) """ return list(set(inp_list))
def requires_reload(action, plugins): """ Returns True if ANY of the plugins require a page reload when action is taking place. """ return any(p.get_plugin_class_instance().requires_reload(action) for p in plugins)
def strip_suffix(target, suffix): """ Remove the given suffix from the target if it is present there Args: target: A string to be formatted suffix: A string to be removed from 'target' Returns: The formatted version of 'target' """ if suffix is None or target is None: return target s_len = len(suffix) if target[-s_len:] == suffix: return target[:-s_len] return target
def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True
def fileExistenceCheck(filepath,filename): """ Checks if the file exists in the location passed :param filepath: location of the file :type arg1: string :param filename: :type arg2: string """ from pathlib import Path tempfile = Path(filepath+filename) if tempfile.is_file(): return True else: return False
def _parse_cgroup_ids(cgroup_info): """Returns a dictionary of subsystems to their cgroup. Arguments: cgroup_info: An iterable where each item is a line of a cgroup file. """ cgroup_ids = {} for line in cgroup_info: parts = line.split(':') if len(parts) != 3: continue _, subsystems, cgroup_id = parts subsystems = subsystems.split(',') for subsystem in subsystems: cgroup_ids[subsystem] = cgroup_id return cgroup_ids
def get_log(name): """Return a console logger. Output may be sent to the logger using the `debug`, `info`, `warning`, `error` and `critical` methods. Parameters ---------- name : str Name of the log. References ---------- .. [1] Logging facility for Python, http://docs.python.org/library/logging.html """ import logging, sys logging.basicConfig(stream=sys.stdout, level=logging.WARNING) return logging.getLogger(name)
def removesuffix(string_, suffix): """As i ran into https://www.python.org/dev/peps/pep-0616/ .""" if suffix and string_.endswith(suffix): return string_[:-len(suffix)] else: return string_
def normalize_case(text): """ Normalize case of some (all-{upper|lower}case) text. Uses a wordlist to determine which words need capitalization. """ # todo: figure out a smarter way :) SPECIAL_CASE_WORDS = [ 'Trento', 'Provincia', ] text = text.lower() for word in SPECIAL_CASE_WORDS: text.replace(word.lower(), word) return text.capitalize()
def beta_from_normalized_beta(beta_normalized, N, M): """ input: beta_normalized N: total number of values in each input image (pixels times channels) M: number of latent dimensions computes beta = beta_normalized * N / M given the relationship: \beta_\text{norm} = \frac{\beta M}{N} from the Higgins, 2017, bVAE paper (p. 15) """ beta = beta_normalized * N / M return beta
def add_linebreaks(text, max_len=80): """ Add linebreaks on whitespace such that no line is longer than `max_len`, unless it contains a single word that's longer. There are probably way faster methods, but this is simple and works. """ br_text = '' len_cnt = 0 for word in text.split(' '): len_cnt += len(word) + 1 if len_cnt > max_len: len_cnt = len(word) br_text += '\n' + word else: br_text += ' ' + word return br_text[1:]
def binary_search(items, item, comparator=(lambda x, y: 0 if x == y else 1 if x > y else -1), lo=0, hi=None, insertion_index=False): """ Does a binary search on a list using a custom comparator :param items: sorted list of items to search :param item: item to find :param comparator: fn(x, y) => 0 if x == y, 1 if x > y, -1 if x < y, will used built in ==, >, < if not provided :param lo: lower boundary to search, start of list if not provided :param hi: upper boundary to search, end of list if not provided :param insertion_index if set, will return the insertion index for item :return: index of item if found, -1 otherwise; if insertion_location is set, will return tuple where second item is index of first element in items that is larger than item """ if hi is None: hi = len(items) while lo < hi: mid = (lo+hi) // 2 comparison = comparator(item, items[mid]) if comparison == 0: return mid if not insertion_index else (mid, mid) elif comparison == 1: lo = mid+1 else: hi = mid return -1 if not insertion_index else (-1, lo)
def get_state(edge, distances): """ create one row for the log ['v1-v2', dist, dist, ...] """ edge_s = ['(' + edge[0] + '-' + edge[1] + ')'] distances_l = [distances[i] for i in sorted(distances.keys())] state = edge_s + distances_l return state
def test_fake_group(group_no, groups): """ Does tag span many groups? If so we'll create pseudo group of groups. Return the number of groups and total number of posts if true. Otherwise, return 0, 0 :param: group_no: Group number within groups list :param: groups: List of :return: True if tag spans many groups, False if not """ group_count = post_count = 0 group_no, start, start_ndx, end, end_ndx, count = groups[group_no - 1] if " " not in start or " " not in end: return group_count, post_count our_group = start.split()[0] while True: if " " not in start or " " not in end: return group_count, post_count if our_group != end.split()[0]: return group_count, post_count if group_no >= len(groups): return group_count, post_count group_no += 1 group_count += 1 post_count += count group_no, start, start_ndx, end, end_ndx, count = groups[group_no - 1] if " " not in start or " " not in end: return group_count, post_count if start.split()[0] != our_group: return group_count, post_count
def _flash_encryption_tweak_range_bits(flash_crypt_config=0xF): """ Return bits (in reverse order) that the "key tweak" applies to, as determined by the FLASH_CRYPT_CONFIG 4 bit efuse value. """ tweak_range = 0 if (flash_crypt_config & 1) != 0: tweak_range |= 0xFFFFFFFFFFFFFFFFE00000000000000000000000000000000000000000000000 if (flash_crypt_config & 2) != 0: tweak_range |= 0x00000000000000001FFFFFFFFFFFFFFFF0000000000000000000000000000000 if (flash_crypt_config & 4) != 0: tweak_range |= 0x000000000000000000000000000000000FFFFFFFFFFFFFFFE000000000000000 if (flash_crypt_config & 8) != 0: tweak_range |= 0x0000000000000000000000000000000000000000000000001FFFFFFFFFFFFFFF return tweak_range
def transaction_location_validator(transaction_location_pid): """Validate that the given transaction location PID is valid.""" return transaction_location_pid == "loc_pid"
def bold_viewed(val, viewed_pages): """ Takes a scalar and returns a string with the css property `'color: red'` for negative strings, black otherwise. """ weight = 'bold' if val in viewed_pages else 'normal' return 'font-weight: %s' % weight
def word_preprocessing(word, ignore_non_alnumspc=True, ignore_space=True, ignore_numeric=True, ignore_case=True): """ Function for word preprocessing | | Argument | | word: a string to be processed | | Parameter | | ignore_non_alnumspc: whether to remove all non alpha/numeric/space characters | | ignore_space: whether to remove all spaces | | ignore_numeric: whether to remove all numeric characters | | ignore_case: whether to convert all alpha characters to lower case | | Output | | processed string (type: str) """ if ignore_non_alnumspc: word = "".join(filter(lambda x: x.isalnum() or x.isspace(), word)) if ignore_space: word = "".join(filter(lambda x: not x.isspace(), word)) if ignore_numeric: word = "".join(filter(lambda x: not x.isnumeric(), word)) if ignore_case: word = word.lower() return word
def pad_with_object(sequence, new_length, obj=None): """ Returns :samp:`sequence` :obj:`list` end-padded with :samp:`{obj}` elements so that the length of the returned list equals :samp:`{new_length}`. :type sequence: iterable :param sequence: Return *listified* sequence which has been end-padded. :type new_length: :obj:`int` :param new_length: The length of the returned list. :type obj: :obj:`object` :param obj: Object used as padding elements. :rtype: :obj:`list` :return: A :obj:`list` of length :samp:`{new_length}`. :raises ValueError: if :samp:`len({sequence}) > {new_length})`. Example:: >>> pad_with_object([1, 2, 3], 5, obj=0) [1, 2, 3, 0, 0] >>> pad_with_object([1, 2, 3], 5, obj=None) [1, 2, 3, None, None] """ if len(sequence) < new_length: sequence = \ list(sequence) + [obj, ] * (new_length - len(sequence)) elif len(sequence) > new_length: raise ValueError( "Got len(sequence)=%s which exceeds new_length=%s" % (len(sequence), new_length) ) return sequence
def is_required_version(version, specified_version): """Check to see if there's a hard requirement for version number provided in the Pipfile. """ # Certain packages may be defined with multiple values. if isinstance(specified_version, dict): specified_version = specified_version.get("version", "") if specified_version.startswith("=="): return version.strip() == specified_version.split("==")[1].strip() return True
def convert_vars_to_readable(variables_list): """Substitutes out variable names for human-readable ones :param variables_list: a list of variable names :returns: a copy of the list with human-readable names """ human_readable_list = list() for var in variables_list: if False: # var in VARIABLE_NAMES_DICTONARY: pass # human_readable_list.append(VARIABLE_NAMES_DICTONARY[var]) else: human_readable_list.append(var) return human_readable_list
def isstrictlyascending(lam): """This determines whether the index string is a strictly ascending sequence Parameters ---------- lam : an indexable object of things that are comparable. Usually an array of integers Returns ------- bool : true if the sequence is strictly ascending, else false. """ retval = True for i in range(0, len(lam)-1): # print(lam[i],lam[i+1]) retval = retval and lam[i] < lam[i+1] # i = 0 # while (i < len(lam)-1) and retval == True: # if StrictLessThan(lam[i], lam[i+1]): # if sympify(lam[i] < lam[i+1]): # i = i+1 # else: # retval = False return retval
def is_magic_packet(data): """ Checks if a packet is a magic packet, returns True or False. Args: data (bytes): the payload from a packet """ # convert data to lowercase hex string data = data.hex().lower() # magic packets begin with 'f'*12 (called a synchronization stream) sync = data[:12] if sync == 'f'*12: # the mac address follows (next 12 chars) mac = data[12:24] # and the mac address is repeated 16 times magic = sync + mac*16 if len(data) == len(magic): return magic == data else: # allow for a SecureON password, which adds another # 12-character hex string to the end of the packet return magic == data[:-12] else: return False
def getMissingIndex(x): """ For a list, remove value in [" ",""] :param x(list): list of strings :returns empty_index: Index of empty String elements :returns x: Input list with empty element replaced with ".." """ empty_index = [i for i,elem in enumerate(x) if elem in [" ",""]] for i in empty_index: x[i] = ".." return empty_index,x
def get_dynamic_hasher_names(HMAC_KEYS): """ Return base dynamic hasher names for each entry in HMAC_KEYS (we need to create one hasher class for each key). Names are sorted to make sure the HMAC_KEYS are tested in the correct order and the first one is always the first hasher name returned. """ algo_name = lambda hmac_id: 'bcrypt{0}'.format(hmac_id.replace('-', '_')) return [algo_name(key) for key in sorted(HMAC_KEYS.keys(), reverse=True)]
def is_iterable(obj) -> bool: """Whether the given object is iterable or not. This is tested simply by invoking ``iter(obj)`` and returning ``False`` if this operation raises a TypeError. Args: obj: The object to test Returns: bool: True if iterable, False else """ try: iter(obj) except TypeError: return False return True
def pattern_to_similarity(pattern: str) -> int: """ Convert a pattern of string consisting of '0', '1' and '2' to a similarity rating. '2': right letter in right place '1': right letter in wrong place '0': wrong letter :param pattern: string of pattern :return: similarity """ return int(pattern[::-1], 3)
def is_valid_file(ext, argument): """ Checks if file format is compatible """ formats = { 'input_pdb_path': ['pdb', 'pqr'], 'input_clusters_zip': ['zip'], 'resid_pdb_path': ['pdb'], 'input_pdbqt_path': ['pdbqt'], 'output_pdb_path': ['pdb'], 'output_pdbqt_path': ['pdbqt'] } return ext in formats[argument]
def value_shape(shape): """ """ return [shape[0], shape[1], 1]
def remove_html_spans(batch): """Removes lines containing a '<' or '>' from the texts.""" bad_strings = ["<", ">"] return { **batch, "text": [ "\n".join([line for line in text.split("\n") if not any([bs in line for bs in bad_strings])]) for text in batch["text"] ] }
def isTriangle(input): """Check if list of three sides is a triangle.""" if 2*max(input) < sum(input): return True return False
def parse_commit_info(git_log, git_commit_fields=('id', 'body', 'author', 'timestamp', 'subject')): """Seperates the various parts of git commit messages Args: git_log(str): git commits as --format='%H%x1f%b%xlf%ae%xlf%ci%xlf%s%x1e' git_commit_fields(tuple): labels for the different components of the commit messages corresponding to the --format Return: git_log_dict(list): list of dictionaries each corresponding to the parsed components of a single commit message """ git_log_cmds = git_log.strip('\n\x1e').split("\x1e") git_log_rows = [row.strip().split("\x1f") for row in git_log_cmds] git_log_dict = [dict(zip(git_commit_fields, row)) for row in git_log_rows] return git_log_dict
def bounding_box_text(svg, node, font_size): """Bounding box for text node.""" return node.get('text_bounding_box')
def nf_input_to_cl(inp): """Convert an input description into command line argument. """ sep = " " if inp.get("separate") else "" val = "'%s'" % inp.get("default") if inp.get("default") else "$%s" % inp["name"] return "%s%s%s" % (inp["prefix"], sep, val)
def differentiateTwice(f, x0, h): """ @param f: function to differentiate) @param method: FDF, BDF, centered @param x0: point to differentiate at @return: f'(x0) """ df = (f(x0+h)-2*f(x0)+f(x0-h))/(h*h) return df
def build_years_array(db_years, first_year=None, last_year=None): """The database queries are parameterized to accept a single year of data. This function takes a two element array [db_years] (which contains year first and last years in the target database) and two optional arguments that specify the earliest and latest year to subset that array by. returns an array of years that encapsulate the years inthe database, subsetted by the provided first_year and last_year parameters. Arguments: - `db_years`: [min([year]), max([year])] - `first_year`: - `last_year`: """ fyear = max(first_year, db_years[0]) if first_year else db_years[0] lyear = min(last_year, db_years[1]) if last_year else db_years[1] return list(range(fyear, lyear + 1))
def convert_points_to_letter(point): """ Converts a given point to a letter grade. Parameters: A number in range of 0 to 100. Returns: Letter grade. """ if point >= 94: grade = 'A' elif point >= 87 and point < 94: grade = 'A-' elif point >= 83 and point < 86: grade = 'B+' elif point >= 80 and point < 83: grade = 'B' elif point >= 77 and point < 80: grade = 'B-' elif point >= 73 and point < 77: grade = 'C' elif point >= 67 and point < 73: grade = 'C-' elif point in range(60, 67): grade = 'D' else: grade = 'F' return grade
def log_evidence_from_file_summary(file_summary, prior_count): """Open the file "multinestsummary.txt" and extract the log evidence of the Multinest analysis. Early in the analysis this file may not yet have been created, in which case the log evidence estimate is unavailable and (would be unreliable anyway). In this case, a large negative value is returned.""" try: with open(file_summary) as summary: summary.read(2 + 112 * prior_count) return float(summary.read(28)) except FileNotFoundError: return -1.0e99
def top_similars_reduce(data): """ reduce for top similars """ sim, item = data return (sim, item)
def union_overlapping(intervals): """Union any overlapping intervals in the given set.""" disjoint_intervals = [] for interval in intervals: if disjoint_intervals and disjoint_intervals[-1].overlaps(interval): disjoint_intervals[-1] = disjoint_intervals[-1].union(interval) else: disjoint_intervals.append(interval) return disjoint_intervals
def distribute_n(n, n_proc, pid): """ l: list of elements to be distributed among n_proc processors pid: (int) process id of the process calling this function n_proc: total number of processors Returns the min and max index to be assigned to the processor with id pid """ n_per_proc = int(n / n_proc) R = n % n_proc offset = min(pid, R) n_min = int(pid * n_per_proc + offset) if (pid < R): n_max = int(n_min + n_per_proc + 1) else: n_max = int(n_min + n_per_proc) return (n_min, n_max)
def get_layerwise_manipulation_strength(num_layers, truncation_psi, truncation_layers): """Gets layer-wise strength for manipulation. Recall the truncation trick played on layer [0, truncation_layers): w = truncation_psi * w + (1 - truncation_psi) * w_avg So, when using the same boundary to manipulate different layers, layer [0, truncation_layers) and layer [truncation_layers, num_layers) should use different strength to eliminate the effect from the truncation trick. More concretely, the strength for layer [0, truncation_layers) is set as `truncation_psi`, while that for other layers are set as 1. """ strength = [1.0 for _ in range(num_layers)] if truncation_layers > 0: for layer_idx in range(0, truncation_layers): strength[layer_idx] = truncation_psi return strength
def is_valid_role(role): """Check if a string is a valid role for use with gspread.""" return role in ["writer", "reader"]
def pop_recursive(d, key, default=None): """dict.pop(key) where `key` is a `.`-delimited list of nested keys. >>> d = {'a': {'b': 1, 'c': 2}} >>> pop_recursive(d, 'a.c') 2 >>> d {'a': {'b': 1}} """ if not isinstance(d, dict): return default if key in d: return d.pop(key, default) if '.' not in key: return default key_head, key_tail = key.split('.', maxsplit=1) if key_head in d: return pop_recursive(d[key_head], key_tail, default) return default
def datetime_as_iso(value): """Helper function to format datetime object to ISO8601 string.""" import datetime if not isinstance(value, datetime.datetime): return "" retval = value.strftime("%Y-%m-%dT%H:%M:%S%z") if not value.utcoffset(): return "".join([retval, "Z"]) return retval
def intersect(lst1, lst2): """intersection of two lists """ lst3 = [value for value in lst1 if value in lst2] return lst3
def dp_make_weight(egg_weights, target_weight, memo={}): """ Find number of eggs to bring back, using the smallest number of eggs. Assumes there is an infinite supply of eggs of each weight, and there is always a egg of value 1. Parameters: egg_weights - tuple of integers, available egg weights sorted from smallest to largest value (1 = d1 < d2 < ... < dk) target_weight - int, amount of weight we want to find eggs to fit memo - dictionary, OPTIONAL parameter for memoization (you may not need to use this parameter depending on your implementation) Returns: int, smallest number of eggs needed to make target weight """ # This will be the key used to find answers in the memo subproblem = (egg_weights, target_weight) # If we've already stored this answer in the memo, return it if subproblem in memo: return memo[subproblem] # If no eggs are left or no space is left on ship, there's nothing left to do if egg_weights == () or target_weight == 0: return 0 # If the next heaviest egg is too heavy to fit, consider subset of lighter eggs elif egg_weights[-1] > target_weight: result = dp_make_weight(egg_weights[:-1], target_weight, memo) else: # Find the minimum number of eggs by testing both taking heaviest egg and not # taking heaviest egg. this_egg = egg_weights[-1] num_eggs_with_this_egg = 1 + dp_make_weight( egg_weights, target_weight - this_egg, memo) num_eggs_without_this_egg = dp_make_weight(egg_weights[:-1], target_weight, memo) if num_eggs_without_this_egg != 0: result = min(num_eggs_with_this_egg, num_eggs_without_this_egg) else: result = num_eggs_with_this_egg # Store this answer in the memo for future use. memo[subproblem] = result return result
def fifo_pre_sequencing(dataset, *args, **kwargs): """ Generates an initial job sequence based on the first-in-first-out dispatching strategy. The job sequence will be feed to the model. """ sequence = [job for job in dataset.values()] return sequence
def tolist(a): """Given a list or a scalar, return a list.""" if getattr(a, '__iter__', False): return a else: return [a]
def _linearly_scale(inputmatrix, inputmin, inputmax, outputmin, outputmax): """ used by linke turbidity lookup function """ inputrange = inputmax - inputmin outputrange = outputmax - outputmin OutputMatrix = (inputmatrix-inputmin) * outputrange/inputrange + outputmin return OutputMatrix
def extend(target, element): """ Extend the given list with the given element. If element is a scalar, it is appended to target; if element is a list, target is extended with it. >>> extend([1, 2], 4) [1, 2, 4] >>> extend([1, 2], (1, 2,)) [1, 2, 1, 2] >>> extend([1, 2], {1: 2, 3: 4}) [1, 2, {1: 2, 3: 4}] >>> extend([1, 2], [0, 1]) [1, 2, 0, 1] :param target: :type target: list :param element: :rtype: list """ assert isinstance(target, list) if isinstance(element, (list, tuple,)): target.extend(element) else: target.append(element) return target
def bisection1D(f, a, b, delta=0.00001): """ Find solution to f(x) = 0 with bisection method :param f: function f :param a: left starting point for x :param b: right starting point for x :param delta: threshold for solution :return: x """ start, end = a, b if f(a) == 0: return a elif f(b) == 0: return b elif f(a) * f(b) > 0: print("couldn't find root in [{}, {}], return {}".format(a, b, None)) return None else: mid = (start + end) / 2 while abs(start - mid) > delta: if f(mid) == 0: return mid elif f(mid) * f(start) < 0: end = mid else: start = mid mid = (start + end) / 2 return mid
def full_name(user): """ returns users full name. Args: user (User): user object. Returns: str: full name from profile. """ if not user or not user.profile: return None profile = user.profile first = profile.first_name or profile.user.username last = " {}".format(profile.last_name or '') return "{first_name}{last_name}".format( first_name=first, last_name=last )
def count_elements(head): """Counts the total number of elements inside the list""" if head is None: return -1 count = 1 curr = head while curr.next != head: count += 1 curr = curr.next return count
def _mk_range_bucket(name, n1, n2, r1, r2): """ Create a named range specification for encoding. :param name: The name of the range as it should appear in the result :param n1: The name of the lower bound of the range specifier :param n2: The name of the upper bound of the range specified :param r1: The value of the lower bound (user value) :param r2: The value of the upper bound (user value) :return: A dictionary containing the range bounds. The upper and lower bounds are keyed under ``n1`` and ``n2``. More than just a simple wrapper, this will not include any range bound which has a user value of `None`. Likewise it will raise an exception if both range values are ``None``. """ d = {} if r1 is not None: d[n1] = r1 if r2 is not None: d[n2] = r2 if not d: raise TypeError('Must specify at least one range boundary!') d['name'] = name return d
def o_summ(listy): """ Input: A list of numbers. Output: The sum of all the numbers in the list, using sum function. """ return (sum(listy))
def int2bin(n): """convert a non-negative integer to raw binary buffer""" buf = bytearray() while n > 0: buf.insert(0, n & 0xFF) n >>= 8 return bytes(buf)
def get_fusion_options(defaults=None): """Adding arguments for the fusion subcommand """ if defaults is None: defaults = {} options = { # If a list of BigML models is provided, the script will # create a Fusion from them '--fusion-models': { "action": 'store', "dest": 'fusion_models', "default": defaults.get('fusion_models', None), "help": ("Comma-separated list of models to be included " "in the Fusion resource.")}, # If a path to the JSON of model maps, the script will # create a Fusion from it '--fusion-models-file': { "action": 'store', "dest": 'fusion_models_file', "default": defaults.get('fusion_models_file', None), "help": ("Path to a JSON file that contains a list " "of model IDs or maps to include in the " "Fusion resource.")}, # If a BigML Fusion is provided, the script will # use it to generate predictions '--fusion': { 'action': 'store', 'dest': 'fusion', 'default': defaults.get('fusion', None), 'help': "BigML Fusion Id."}, # The path to a file containing fusion ids. '--fusions': { 'action': 'store', 'dest': 'fusions', 'default': defaults.get('fusions', None), 'help': ("Path to a file containing fusion/ids." " One Fusion" " per line (e.g., " "fusion/50a206a8035d0706dc000376" ").")}, # If a BigML JSON file containing a Fusion # structure is provided, # the script will use it. '--fusion-file': { 'action': 'store', 'dest': 'fusion_file', 'default': defaults.get('fusion_file', None), 'help': "BigML Fusion JSON structure file."}, # The path to a file containing Fusion attributes. '--fusion-attributes': { 'action': 'store', 'dest': 'fusion_attributes', 'default': defaults.get('fusion_attributes', None), 'help': ("Path to a json file describing Fusion" " attributes.")}} return options
def increment(valence, increment): """ increment in the same direction as the valence """ if valence == 0.0: return valence elif valence > 0: return valence + increment else: # valence < 0 return valence - increment
def getobjectindex(blocklst,object): """ getobjectindex(blocklst,'SOLUTION ALGORITHM') returns the index of the object corresponding slashcomments can be retrived using this index """ ls=[] for el in blocklst: ls.append(el[0].upper()) return ls.index(object.upper())
def fix_3(lst): """ For each element in `lst`, add it with its following element in the list. Returns all of the summed values in a list. >>> fix_3([1, '1', 2, None]) <class 'TypeError'> <class 'TypeError'> <class 'TypeError'> <class 'IndexError'> [] >>> fix_3([1, 2, 3, 4]) <class 'IndexError'> [3, 5, 7] >>> fix_3([]) [] """ sum_of_pairs = [] for i, _ in enumerate(lst): try: sum_of_pairs.append(lst[i] + lst[i + 1]) except TypeError as error: print(type(error)) except IndexError as error: print(type(error)) return sum_of_pairs
def get_puzzle(complex:bool = False) -> str: """Returns puzzle with high or medium complexity. Args: complex (bool, optional):An option if harder puzzle is required. Defaults to False. Returns: str: Returns puzzle string. """ if complex: return '4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......' return '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..'
def get_z_2p(x_1, x_2, s1, s2, n1, n2): """ """ if (n1 <= 30 or n2 <= 30): print("The sample sizes must be greater than 30.") else: s_error = ((s1**2 / n1) + (s2**2 / n2))**0.5 z = (x_1 - x_2) / s_error return z
def getAllIndices(element, alist): """ Find the index of an element in a list. The element can appear multiple times. input: alist - a list element - objective element output: index of the element in the list """ result = [] offset = -1 while True: try: offset = alist.index(element, offset + 1) except ValueError: return result result.append(offset)
def module_name(prog): """ get the module name for a program """ return '_{}'.format(prog)
def is_final_option(string): """Whether that string means there will be no further options >>> is_final_option('--') True """ return string == '--'
def timezone_choice(tzname, country=None): """ Given a timezone string, convert it to a two-tuple suitable for a choice in a FormField. >>> timezone_choice('UTC') ('UTC', 'UTC') >>> timezone_choice('Australia/Sydney') ('Australia/Sydney', 'Sydney') >>> timezone_choice('America/Indiana/Indianapolis') ('America/Indiana/Indianapolis', 'Indianapolis (Indiana)') """ try: base, rest = tzname.split('/', 1) except ValueError: base, rest = None, tzname rest = rest.replace('_', ' ') try: mid, rest = rest.split('/') if mid != base and mid != country: rest = '{0} ({1})'.format(rest, mid) except ValueError: pass return (tzname, rest)
def format_dictionary(adict, sep=', ', sort_first=True): """\ Formats a dictionary into a string of key=value pairs. :param adict: dictionary to format :param sep: separator. Default ',' :param sort_first: whether to sort the key-value pairs first before formatting :return: a string with the formatted dictionary """ preprocess = sorted if sort_first else lambda x: x return sep.join('{}={}'.format(k, v) for k, v in preprocess(iter(adict.items())))
def html_format(obj, indent=0, notify_complete=False): """Formats one or more patient objects to a nice html string Args: obj(list): a list of patient objects or a patient object notify_complete(bool): set to False to NOT notify variants and phenotype terms by email """ if isinstance(obj, list): # a list pf match results htmls = [] for k in obj: htmls.append(html_format(obj=k, indent=indent + 1, notify_complete=notify_complete)) return '[<div style="margin-left: %dem">%s</div>]' % (indent, ",<br>".join(htmls)) if isinstance(obj, dict): # patient object htmls = [] for k, v in obj.items(): if notify_complete or k in [ "node", "patients", "patient", "contact", "id", "name", "href", "email", "institution", ]: htmls.append( "<span style='font-style: italic; color: #888'>%s</span>: %s" % (k, html_format(obj=v, indent=indent + 1, notify_complete=notify_complete)) ) return '{<div style="margin-left: %dem">%s</div>}' % (indent, ",<br>".join(htmls)) return str(obj)
def induce_soft_constraints(data, result): """ Induces Soft-constraints by adding High cost value to hingh confident edges. """ if "high_confident" in data["edges"].keys(): high_confidention_cost = -1000 result['edge_costs'] = result['edge_costs'] + data["edges"]["high_confident"] * high_confidention_cost return result