content
stringlengths
42
6.51k
def unique_elements(array): """Return unique elements of an array.""" unique = [] for x in array: if x not in unique: unique.append(x) return unique
def _get_ace_translation(value, *args): """ This will take the ace name and return the total number accosciated to all the ace accessmasks and flags Below you will find all the names accosiated to the numbers: """ ret = 0 ace_dict = {'ReadData': 1, 'CreateFiles': 2, 'AppendData': 4, 'ReadExtendedAttributes': 8, 'WriteExtendedAttributes': 16, 'ExecuteFile': 32, 'DeleteSubdirectoriesAndFiles': 64, 'ReadAttributes': 128, 'WriteAttributes': 256, 'Write': 278, 'Delete': 65536, 'ReadPermissions': 131072, 'ChangePermissions': 262144, 'TakeOwnership': 524288, 'Read': 131209, 'ReadAndExecute': 131241, 'Modify': 197055, 'ObjectInherit': 1, 'ContainerInherit': 2, 'NoPropagateInherit': 4, 'Success': 64, 'Failure': 128} aces = value.split(',') for arg in args: aces.extend(arg.split(',')) for ace in aces: if ace in ace_dict: ret += ace_dict[ace] return ret
def is_valid_error_cls(cls): """ Check whether the supplied object is valid VK API error exception class. :param cls: Class object to be checked :rtype: bool """ valid_name = cls.__name__.endswith('Error') valid_attrs = hasattr(cls, 'error_code') return valid_name and valid_attrs
def lr_poly(base_lr, iter, max_iter, power): """ Args: base_lr: initial learning rate iter: current iteration max_iter: maximum number of iterations power: power value for polynomial decay Returns: the updated learning rate with polynomial decay """ return base_lr * ((1 - float(iter) / float(max_iter)) ** (power))
def total_t(arr): """ Returns the total of nodes with value "TRUE" in a given array """ count = 0 for x in arr: if x == True: count += 1 return count
def s_one_one(topics): """ This function performs s_one_one segmentation on a list of topics. s_one_one segmentation is defined as: s_one_one = {(W', W*) | W' = {w_i}; W* = {w_j}; w_i, w_j belongs to W; i != j} Example: >>> topics = [np.array([1, 2, 3]), np.array([4, 5, 6])] >>> s_one_pre(topics) [[(1, 2), (1, 3), (2, 1), (2, 3), (3, 1), (3, 2)], [(4, 5), (4, 6), (5, 4), (5, 6), (6, 4), (6, 5)]] Args: topics : list of topics obtained from an algorithm such as LDA. Is a list such as [array([ 9, 10, 11]), array([ 9, 10, 7]), ...] Returns: s_one_one : list of list of (W', W*) tuples for all unique topic ids """ s_one_one = [] for top_words in topics: s_one_one_t = [] for w_prime_index, w_prime in enumerate(top_words): for w_star_index, w_star in enumerate(top_words): if w_prime_index == w_star_index: continue else: s_one_one_t.append((w_prime, w_star)) s_one_one.append(s_one_one_t) return s_one_one
def should_pursue_references(path): """ Given a file path, returns whether or not we are interested in seeing whether or not the file contains additional references to other files. """ return not ( path.endswith('.wav') or path.endswith('.bsp') or path.endswith('.vtf') or path.endswith('.cache') or 'hc_changelog' in path )
def none_conv(result): """Just in case of a None result, to prevent script crashing""" if result != None: return result else: new_result = 0.0 return new_result
def get_used_in(id_set, script_id): """ Gets the integrations, scripts and playbooks that used the input script, without test playbooks. :param id_set: updated id_set object. :param script_id: the script id. :return: list of integrations, scripts and playbooks that used the input script """ used_in_list = set() id_set_sections = list(id_set.keys()) id_set_sections.remove('TestPlaybooks') for key in id_set_sections: items = id_set[key] for item in items: key = list(item.keys())[0] scripts = item[key].get('implementing_scripts', []) if scripts and script_id in scripts: used_in_list.add(item[key].get('name', [])) used_in_list = list(used_in_list) used_in_list.sort() return used_in_list
def make_list_dict(item): """ Initialize a dictionary of keys with empty lists as values. Parameters ---------- item: list List of titles used to initialize a dictionary Returns ------- list_dict: dict Dictionary initialized with objects in `item` list as keys and empty arrays as its values """ return dict((obj, []) for obj in item)
def send_to_right_side(targets, values): """Send the given target values to the right of all other values. Example: targets = ["b", "x","c"] values = ["a", "b", "c", "x", "y", "z"] send_to_right_side(targets, values) # -> ["a", "y", "z", "b", "x", "c"] Args: targets: Values to send to right side. values: The values of all elements. Returns: A list of elements of values in the desired permutation. """ target_set = set(targets) return tuple([x for x in values if x not in target_set] + list(targets))
def safe_add(direction, direction_delta, check_constraints=False): """Check resulting vector values and add if not violates constraints. Constraints are [-1; 1]. """ x, y, z = direction new_x = x + direction_delta['x'] new_y = y + direction_delta['y'] new_z = z + direction_delta['z'] if not check_constraints or (-1 <= new_x <= 1 and -1 <= new_y <= 1): return new_x, new_y, new_z return direction
def timeStamp(list_time): """ Format time stam into `00h00m00s` into the dictionary :param list_time: float list of time stamp in second :return format_time: dictionary of format time """ format_time = dict() i = 0 for time in list_time: m, s = divmod(time, 60) h, m = divmod(m, 60) format_time[str(i)] = {"%dh%02dm%02ds" % (h, m, s): time} i += 1 return format_time
def _hgvs_to_zbc(i): """Convert hgvs (1 based, missing zero) """ if i >= 1: i -= 1 return i
def one(nodes, or_none=False): """ Assert that there is exactly one node in the give list, and return it. """ if not nodes and or_none: return None assert len( nodes) == 1, 'Expected 1 result. Received %d results.' % (len(nodes)) return nodes[0]
def get_sum(a, b): """ Given two integers a and b, which can be positive or negative, find the sum of all the numbers between including them too and return it. If the two numbers are equal return a or b. :param a: :param b: :return: """ if a > b: a, b = b, a return sum(i for i in range(a, (b + 1)))
def get_fraction_unique(molecular_graphs): """ Returns the fraction (`float`) of unique graphs in `molecular_graphs` (`list` of `MolecularGraph`s) by comparing their canonical SMILES strings. """ smiles_list = [] for molecular_graph in molecular_graphs: smiles = molecular_graph.get_smiles() smiles_list.append(smiles) smiles_set = set(smiles_list) try: smiles_set.remove(None) # remove placeholder for invalid SMILES except KeyError: # no invalid SMILES in set! pass n_repeats = len(smiles_set) try: fraction_unique = n_repeats / len(smiles_list) except (ValueError, ZeroDivisionError): fraction_unique = 0 return fraction_unique
def copy_files(in_files, out_files): """ Create a function to copy a file that can be modified by a following node without changing the original file """ import shutil import sys if len(in_files) != len(out_files): print( "ERROR: Length of input files must be identical to the length of " + "outrput files to be copied") sys.exit(-1) for i, in_file in enumerate(in_files): out_file = out_files[i] print("copying {0} to {1}".format(in_file, out_file)) shutil.copy(in_file, out_file) return out_files
def is_rotated_list(list_a, list_b): """ Check if `list_b` is a rotated version of `list_a`. To do this, concat `list_a` with `list_a`, then check if `list_b` is a sublist of it. Args: list_a (list): The "primary" list. list_b (list): The list to check if is rotated. Returns: bool """ if len(list_a) != len(list_b): return False double_list = list_a + list_a j = 0 for i in range(len(double_list)): if double_list[i] == list_b[j]: j += 1 else: j = 0 if j == (len(list_b) - 1): return True return j == (len(list_b) - 1)
def combine_data(data: dict) -> dict: """ Clean all data """ keys = data.keys() for number in keys: tempt = {} loudness, tempo, mode, time_signature, key = [], [], [], [], [] for i in data[number]["sections"]: loudness.append(i["loudness"]) tempo.append(i["tempo"]) mode.append(i["mode"]) time_signature.append(i["time_signature"]) key.append(i["key"]) tempt["sections"] = { "loudness": loudness, "tempo": tempo, "mode": mode, "time_signature": time_signature, "key": key, } loudness_max_time, timbre = [], [] for i in data[number]["segments"]: loudness_max_time.append(i["loudness_max_time"]) timbre.append(i["timbre"]) tempt["segments"] = {"loudness_max_time": loudness_max_time, "timbre": timbre} data[number] = tempt return data
def array_to_string(array, delimiter=" ", format="{}", precision=None): """ Converts a numeric array into the string format in mujoco. Examples: [0, 1, 2] => "0 1 2" """ if precision is not None and format == "{}": return delimiter.join([format.format(round(x, precision)) for x in array]) else: return delimiter.join([format.format(x, precision) for x in array])
def mk_int(s): """ Function to change a string to int or 0 if None. :param s: String to change to int. :return: Either returns the int of the string or 0 for None. """ try: s = s.strip() return int(s) if s else 0 except: return s
def clean_for_geocode(orig_str): """ Remove punctuation and stopwords at the end of the string only, repeatedly. :param orig_str: original string :return: clean string, ready to be geocoded """ stopwords = ['in', 'the', 'upon', 'of', 'at', 'within', 'to', 'along', 'near'] prev_len = len(orig_str) clean_str = orig_str while True: clean_str = clean_str.strip(" (,).") # remove stopwords at the end of the word if clean_str.split(" ")[-1] in stopwords: clean_str = " ".join(clean_str.split(" ")[0:-1]) if len(clean_str) == prev_len: break prev_len = len(clean_str) return clean_str
def complement_base(base, material ='DNA'): """Returns the Watson-Crick complement of a base.""" if base == 'A' or base == 'a': if material == 'DNA': return 'T' elif material == 'RNA': return 'U' elif base == 'T' or base == 't' or base == 'U' or base == 'u': return 'A' elif base == 'G' or base == 'g': return 'C' else: return 'G'
def _split_varpath(cont, path): """Return a tuple of compname,component,varname given a path name of the form 'compname.varname'. If the name is of the form 'varname', then compname will be None and comp is cont. """ try: compname, varname = path.split('.', 1) except ValueError: return (None, cont, path) t = cont.get_trait(compname) if t and t.iotype: return (None, cont, path) return (compname, getattr(cont, compname), varname)
def effir(interest_rate, compounds): """ """ return (1 + interest_rate / compounds) ** compounds - 1
def convertRowIntoIndexValuePairs(row): """ Converts [x, y, z, ...] into [(0, x), (1, y), (2, z)] for use in the classifiers in their "where" statements """ return [ (index, value) for index, value in enumerate(row)]
def make_url(type: str) -> str: """Will compose a url based on the given type.""" return f"/{type}/add"
def odeFun(t,y,**kwargs): """ Contains system of differential equations. Arguments: t : current time variable value y : current state variable values (order matters) **kwargs : constant parameter values, interpolanting functions, etc. Returns: Dictionary containing dY/dt for the given state and parameter values """ IW,IR = y # unpack state vars # Unpack variables passed through kwargs (see I thought I could avoid this # and I just made it messier) alp,bet,det,eps,N = kwargs['alp'],kwargs['bet'],kwargs['det'],kwargs['eps'], \ kwargs['N'] # ODEs dIW = ( bet * ( N + ( eps - 1 ) * IR - IW ) - det - alp ) * IW dIR = ( bet * ( N - ( eps + 1 ) * IW - IR ) - det ) * IR # Gather differential values in list (state variable order matters for # numerical solver) dy = [dIW,dIR] return dy
def isNearBrightStar(nearStarPeak, mainPeak): """ Is the main star near another bright star? """ return (0.2 * mainPeak < nearStarPeak)
def mapping(row): """ @param row : The row which is generated as a result of the select query done The new values are returned which are are generarated for each row . """ return row["id"]
def call_until(fun, expr): """Keep calling function for timeout secs and exit if eval() expression is True. """ ret = fun() assert eval(expr) return ret
def humanize_time(seconds): """Convert time in seconds to (better) human readable format. :param seconds: seconds :type seconds: float :return: Human readable time in h:m:s:ms :rtype: str """ ms = seconds % int(seconds) * 1000 mins, seconds = divmod(int(seconds), 60) hours, mins = divmod(mins, 60) return "{:02d} hours {:02d} minutes {:02d} seconds ~{:d} milliseconds".format(hours, mins, seconds, int(ms))
def is_valid_gatech_username(username: str) -> bool: """ Rough validator for GT usernames :param username: the username to check :return: whether this is a valid username """ if not username.isalnum(): return False if not username[0].isalpha(): return False if not username[-1].isnumeric(): return False return True
def check_shell(cmd, shell=None): """ Determine whether a command appears to involve shell process(es). The shell argument can be used to override the result of the check. :param str cmd: Command to investigate. :param bool shell: override the result of the check with this value. :return bool: Whether the command appears to involve shell process(es). """ if isinstance(shell, bool): return shell return "|" in cmd or ">" in cmd or r"*" in cmd
def get_overlap( tabix, chrom, start, end, priority=["exon", "gene", "transcript", "cds"], no_hit="intergenic", fix_chr=True, ): """ args: tabix (pysam.libctabix.TabixFile) - open TabixFile chrom (str) start (int) end (int) priority (Optional[list]) - order of preferred region annotation no_hit (Optional[str]) - use this annotation if no matches among priority fix_chr (Optional[bool]) - try to fetch a region using both non-'chr' and 'chr' prefix on failures returns: str """ overlaps = None try: overlaps = set( [i.split("\t")[2].lower() for i in tabix.fetch(chrom, start, end)] ) except IndexError: # probably not a gff or gtf print("Invalid annotation file specified for --gff") overlaps = None except ValueError: if fix_chr: # try removing chr if chrom.startswith("chr"): overlaps = get_overlap( tabix, chrom[3:], start, end, priority, no_hit, False ) # or adding chr else: overlaps = get_overlap( tabix, "chr{chrom}".format(chrom=chrom), start, end, priority, no_hit, False, ) except: # bad regions print( "Error fetching {chrom}:{start}-{end}".format( chrom=chrom, start=start, end=end ) ) overlaps = None overlap = "" if overlaps: for feature in priority: if feature in overlaps: overlap = feature break else: # fetching overlaps failed overlap = "unknown" if not overlap and no_hit: overlap = no_hit return overlap
def is_constructed(val): """Check if a tag represents a "constructed" value, i.e. another TLV""" return val & 0b00100000 == 0b00100000
def number_of_words(string): """Return the number of words in a string. :param string: The string to check """ return len(string.split())
def validate(config): """ Validate the beacon configuration """ # Configuration for service beacon should be a list of dicts if not isinstance(config, list): return False, "Configuration for service beacon must be a list." else: _config = {} list(map(_config.update, config)) if "services" not in _config: return False, "Configuration for service beacon requires services." else: if not isinstance(_config["services"], dict): return ( False, "Services configuration item for service beacon must " "be a dictionary.", ) for config_item in _config["services"]: if not isinstance(_config["services"][config_item], dict): return ( False, "Configuration for service beacon must " "be a list of dictionaries.", ) return True, "Valid beacon configuration"
def patch_transform(diff_opcodes, a_transformed, b_raw, transform, verify=False): """ Minimally patch the list a_transformed == [transform(ai) for ai in a_raw] into the list b_transformed == [transform(bi) for bi in b_raw] by using the given opcodes returned by the get_opcodes() method of a difflib.SequenceMatcher between a_raw and b_raw. a_raw is not needed. If transform is passed (as a function), applies the transform function to each item in a first. The assumption is that the transform function is expensive and/or the diff is small relative to the size of b_raw. The goal is to save time versus applying transform() to every item in b_raw. """ c_transformed = a_transformed.copy() offset = 0 for (opcode, i1, i2, j1, j2) in diff_opcodes: new = [transform(cell) for cell in b_raw[j1:j2]] c_transformed[i1+offset:i2+offset] = new offset += (j2-j1) - (i2-i1) if verify: assert [transform(bi) for bi in b_raw] == c_transformed return c_transformed
def _is_singleton(input_value, param_info): """ Returns True if the given input value is a singleton of that parameter. E.g., if it's a single string or number for a text parameter, or a list of strings/numbers if the parameter allows multiples, or it's a dict if the parameter is a group param. That is, if the input parameter is treated as a list by the app, and a list of strings is given as the value to iterate over, that's still a "singleton". For example, if the input_type is a list of reads to assemble together, and the goal is to build a batch run with several lists, that should be a list of lists. Doesn't do any validation or anything. Shouldn't raise any errors. It just checks whether we have a list / dict / (int or str) where allow_multiple=True. """ if input_value and isinstance(input_value, list): if not param_info.get("allow_multiple", False): return False elif isinstance(input_value[0], list): return False return True
def sum_that_match_next(sequence): """Return the sum of numbers that match the next digit.""" total = 0 for idx, val in enumerate(sequence): if int(val) == int(sequence[(idx + 1) % len(sequence)]): total += int(val) return total
def find_diff_of_lists_and_sets(stat1, stat2): """ Finds the difference between two stats. If there is no difference, returns "unchanged". Removes duplicates and returns [unique values of stat1, shared values, unique values of stat2]. :param stat1: the first statistical input :type stat1: Union[list, set] :param stat2: the second statistical input :type stat2: Union[list, set] :return: the difference of the stats """ diff = "unchanged" if stat1 is None and stat2 is None: pass elif stat1 is None or stat2 is None: diff = [stat1, stat2] elif set(stat1) != set(stat2): unique1 = [element for element in stat1 if element not in stat2] shared = [element for element in stat1 if element in stat2] unique2 = [element for element in stat2 if element not in stat1] diff = [unique1, shared, unique2] return diff
def do_something(x): """ Do something so we have something to test. >>> do_something(3) 16 >>> do_something(7) 24 """ return (x+5)*2
def _merge_by_type(usage_types, missing_values, unusual_changes_by_type): """Merge the contents of dicts `missing_values` and `unusual_changes_by_type`. The result will have the following form: { <UsageType>: { 'unusual_changes': [ (datetime.date, datetime.date, float, float, float), ... ], 'missing_values': [ datetime.date, ... ], }, } """ merged_anomalies = {} for usage_type in usage_types: unusual_changes = unusual_changes_by_type.get(usage_type) missing_values_ = missing_values.get(usage_type) if unusual_changes is None and missing_values is None: continue merged_anomalies[usage_type] = { 'unusual_changes': unusual_changes, 'missing_values': missing_values_, } return merged_anomalies
def parse_lats_lons(val): """Takes a 'lats' or 'lons' value and returns two floats representing the southern/western coordinate and the northern/eastern coordinate. Example input: '60 S 80 N' Example output: (-60.0, 80.0) Parameters ---------- val (str): String representing 'lats' or 'lons' value. Returns ---------- (tuple): First coord (float), second coord (float). """ val = val.replace(',', '') substrings = val.split(' ') first_coord = float(substrings[0]) second_coord = float(substrings[2]) if substrings[1] == 'W' or substrings[1] == 'S': first_coord = -1*first_coord if substrings[3] == 'W' or substrings[3] == 'S': second_coord = -1*second_coord return (first_coord, second_coord)
def get_board_copy(board): """Duplicates the board list & returns it duplicate.""" BoardCopy = [] board_length = len(board) for i in range(board_length): BoardCopy.append(board[i]) return BoardCopy
def _has_file(mod): """ :param mod: Module object. Can be any of the multiple types used to represent a module, we just check for a __file__ attribute. :type mod: ``Any`` :return: If given module has a not None __file__ attribute. :rtype: ``bool`` """ return hasattr(mod, "__file__") and mod.__file__ is not None
def exc_repr(exc): """Construct representation of given exception appropriate for printed output. """ exc_repr = '' if exc.__class__.__module__ != 'builtins': exc_repr += f'{exc.__class__.__module__}.' exc_repr += exc.__class__.__name__ if exc.args: exc_repr += ': ' + ', '.join(map(str, exc.args)) return exc_repr
def validate_password(password: str): """ Validate the user password >>> validate_password("short") Traceback (most recent call last): ... raise ValueError("password must have at least 8 characters") ValueError: password must have at least 8 characters >>> validate_password("This is a good password!") 'This is a good password!' """ minlen = 8 if not isinstance(password, str): raise TypeError("password must be a string") if len(password) < minlen: raise ValueError("password must have at least 8 characters") return password
def _get_column_lables(line): """ Extract the following column lables from pilercr "SUMMARY BY POSITION" table: "Position", "Length", "Copies", "Repeat", "Spacer", "Strand", "Consensus". """ lables = line.split() lables_to_remove = ["Array", "Sequence", "#", "+"] lables = [lable for lable in lables if lable not in lables_to_remove] lables.insert(5, "Strand") return lables
def challenge_request(name): """Create ACME "challengeRequest message. :param str name: Domain name :returns: ACME "challengeRequest" message. :rtype: dict """ return { "type": "challengeRequest", "identifier": name, }
def commands(user_input): """funtion used to evaluate user's commands""" if user_input == "/help": print("The program performs simple mathematic operations based on user input") return False elif user_input == "/exit": print("Bye!") return True else: print("Unknown command") return False
def replace_printable(s, printables, wild=" "): """ If the character in s is not in printables, replaces by wild. """ new_s = "" for i in s: new_s += i if i in printables else wild return new_s
def Get_state_as_str(n_qubits, qubit_state_int): """ converts qubit state int into binary form. Args: n_qubits (int): Number of qubits qubit_state_int (int): qubit state as int (NOT BINARY!) Returns: string of qubit state in binary! state = |000> + |001> + |010> + |011> + |100> + |101 > + |110 > + |111> state = |0> + |1> + |2> + |3> + |4> + |5 > + |6 > + |7> n_qubits = 3 state = 5 Get_state_as_str(n_qubits, state) >> '101' """ bin_str_len = '{' + "0:0{}b".format(n_qubits) + '}' return bin_str_len.format(qubit_state_int)
def encode(plaintext, key): """Encodes plaintext Encode the message by shifting each character by the offset of a character in the key. """ ciphertext = "" i, j = 0, 0 # key, plaintext indices # strip all non-alpha characters from key key2 = "" for x in key: key2 += x if x.isalpha() else "" # shift each character for x in plaintext: if 97 <= ord(x) <= 122: # if character is alphabetic lowercase ciphertext += chr(((ord(x) - 97) + (ord(key2[i].lower()) - 97)) % 26 + 97) i += 1 elif 65 <= ord(x) <= 90: # if character is alphabetic uppercase ciphertext += chr(((ord(x) - 65) + (ord(key2[i].upper()) - 65)) % 26 + 65) i += 1 else: # non-alphabetic characters do not change ciphertext += x j += 1 if i == len(key2): i = 0 return ciphertext
def fix_interactive(code: str) -> str: """Strip out >>> and output (lines without >>>)""" lines = code.split("\n") good_lines = [] # interactive = sum(1 for line in lines if ">>>" in line) for line in lines: if line.startswith("... "): good_lines.append(line.strip()[4:]) continue if line.strip() == "...": good_lines.append("") continue if line.strip().startswith(">>> "): good_lines.append(line.strip()[4:]) continue if line.strip() == ">>>": good_lines.append("") continue # e.g. output <class xyz> if line.startswith("<"): good_lines.append("# " + str(line)) continue # e.g shell output if line.startswith(">"): good_lines.append("# " + str(line)) continue # e.g shell output if line.startswith(">> "): good_lines.append("# " + str(line)) continue new_line = "# " + line if interactive else line good_lines.append(new_line) return "\n".join(good_lines)
def reference_values(): """Reference values for fit statistics test. Produced using sherpa stats module in dev/sherpa/stats/compare_wstat.py """ return dict( wstat=[ 1.19504844, 0.625311794002, 4.25810886127, 0.0603765381044, 11.7285002468, 0.206014834301, 1.084611, 2.72972381792, 4.60602990838, 7.51658734973, ], cash=[ 1.19504844, -39.24635098872072, -9.925081055136996, -6.034002586236575, -30.249839537105466, -55.39143500383233, 0.9592753, -21.095413867175516, 0.49542219758430406, -34.19193611846045, ], cstat=[ 1.19504844, 1.4423323052792387, 3.3176610316373925, 0.06037653810442922, 0.5038564644586838, 1.3314041078406706, 0.9592753, 0.4546285248764317, 1.0870959295929628, 1.4458234764515652, ], )
def second_to_day(seconds): """ :param seconds: (int) Time in seconds starting at 0 as start of data collection. :return: (int) Time in days starting at 0 as start of data collection """ return int(seconds) / 86400
def magND(v): """Returns magnitude of an nD vector""" return sum(vv**2 for vv in v) ** 0.5
def _get_partition_from_id(partitions, user_partition_id): """ Look for a user partition with a matching id in the provided list of partitions. Returns: A UserPartition, or None if not found. """ for partition in partitions: if partition.id == user_partition_id: return partition return None
def calc_diff(time1, time2): """Returns the difference between two time objects or returns None.""" try: return time1 - time2 except TypeError: return None
def bruteforce_range(sequence, target): """ Again not my proudest achievement, it's obviously not optimized enough... But at least I have my second gold star and can go to sleep :) """ n = len(sequence) for start in range(n): for length in range(1, n-start): if target == sum(sequence[start:start+length]): mini = min(sequence[start:start+length]) maxi = max(sequence[start:start+length]) return (mini + maxi)
def u4(vector): """ A utility function that is a constant. :param vector: The input payoff vector. :return: A constant utility k. """ k = 2 return k
def query_result_to_cluster_token_pairs(query_result): """Given a query result structure, returns a sequence of (cluster, token) pairs from the result""" cluster_token_pairs = ((c, t) for c, e in query_result['clusters'].items() for t in e['tokens']) cluster_token_pairs_sorted = sorted(cluster_token_pairs, key=lambda p: (p[1]['token'], p[0])) return cluster_token_pairs_sorted
def is_route_dfs(graph, node1, node2): """Determine if there is a route between two nodes.""" def dfs(current_node, visited_nodes): for child_node in graph[current_node]: if child_node not in visited_nodes: if child_node == node2: return True return dfs(child_node, visited_nodes + [current_node]) if dfs(node1, []): return True return False
def factorial(num): """Another recursive function""" if num == 0 or num == 1: return 1 return num * factorial(num - 1)
def detectTextTwgoCancelled(frame): """Return an empty string if there is no cancelled message in this text TWGO frame. Otherwise return string with cancellation details. Args: frame (dict): Contains a text TWGO frame. Returns: (str): '' if there is no cancellation associated with this frame, otherwise will be a string with information about the cancellation. """ location = '' if 'location' in frame['contents']: location = (frame['contents']['location']).strip() if len(location) > 0: location = '-' + location if 'records' in frame['contents']: records = frame['contents']['records'] for x in records: if 'report_status' in x: if x['report_status'] == 0: return ' [CANCELLED {}-{}{}]'.format(x['report_year'],\ x['report_number'], location) return ''
def getPitch(cmdf, tau_min, tau_max, harmo_th=0.1): """ Return fundamental period of a frame based on CMND function. :param cmdf: Cumulative Mean Normalized Difference function :param tau_min: minimum period for speech :param tau_max: maximum period for speech :param harmo_th: harmonicity threshold to determine if it is necessary to compute pitch frequency :return: fundamental period if there is values under threshold, 0 otherwise :rtype: float """ tau = tau_min while tau < tau_max: if cmdf[tau] < harmo_th: while tau + 1 < tau_max and cmdf[tau + 1] < cmdf[tau]: tau += 1 return tau tau += 1 return 0
def has_rna_tracks(case_obj): """Returns True if one of more individuals of the case contain RNA-seq data Args: case_obj(dict) Returns True or False (bool) """ # Display junctions track if available for any of the individuals for ind in case_obj.get("individuals", []): # Track contains 2 files and they should both be present if all([ind.get("splice_junctions_bed"), ind.get("rna_coverage_bigwig")]): return True return False
def get_id_str(objects, delimiter='_'): """ Get a string of sorted ids, separated by a delimiter. Args: objects (Model): An iterable of model instances. Keyword Args: delimiter (str): The string the ids will be joined on. Returns: str: The joined and sorted id string. """ ids = [obj.id for obj in objects] ids.sort() ids = [str(identifier) for identifier in ids] id_str = delimiter.join(ids) return id_str
def splitall(string, splitcharlist): """Splits the supplied string at all of the characters given in the second argument list :param string: the string to break up :type string: string :param splitcharlist: a list of characters to break on :type splitcharlist: a list of characters (string) :example: >>> splitall("fred,daphne.velma", ",.") ["fred", "daphne", "velma"] >>> """ strlist = [string] for i in splitcharlist: newlist = [] for j in strlist: tmplist = j.split(i) for k in tmplist: newlist.append(k) strlist = [] for j in newlist: strlist.append(j) newlist = [] for i in strlist: if i != '': newlist.append(i) return newlist
def default_popup_element(title=None, operation=None, format=None): """Helper function for quickly adding a default popup element based on the style. A style helper is required. Args: title (str, optional): Title for the given value. By default, it's the name of the value. operation (str, optional): Cluster operation, defaults to 'count'. Other options available are 'avg', 'min', 'max', and 'sum'. format (str, optional): Format to apply to number values in the widget, based on d3-format specifier (https://github.com/d3/d3-format#locale_format). Example: >>> default_popup_element(title='Popup title', format='.2~s') """ return { 'value': None, 'title': title, 'operation': operation, 'format': format }
def dict_pop_nested(d, key): """Get a (potentially nested) key from a dict-like.""" if '.' in key: key, rest = key.split('.', 1) if key not in d: raise KeyError(key) return dict_pop_nested(d[key], rest) else: if key not in d: raise KeyError(key) return d.pop(key)
def bin_search(find, lst): """ >>> bin_search(0, [0, 1]) 0 >>> bin_search(1, [0, 1]) 1 >>> bin_search(2, [0, 1]) -1 >>> bin_search(0, [0, 1, 2]) 0 >>> bin_search(1, [0, 1, 2]) 1 >>> bin_search(2, [0, 1, 2]) 2 >>> bin_search(3, [0, 1, 3, 5]) 2 >>> bin_search(4, [0, 1, 3, 5]) -1 """ left, right = 0, len(lst) - 1 while (right >= left): mid = (left + right) // 2 if lst[mid] == find: return mid if lst[mid] > find: right = mid - 1 else: left = mid + 1 return -1
def needleman_wunsch(a,b,p=0.97): """Needleman-Wunsch and Smith-Waterman""" z=[] for i,r in enumerate(a): z.append([]) for j,c in enumerate(b): if r==c: z[-1].append(z[i-1][j-1]+1 if i*j>0 else 1) else: z[-1].append(p*max(z[i-1][j] if i>0 else 0,z[i][j-1] if j>0 else 0)) return z
def try_add_value_case_insensitive(d, key_name, new_value): """ Look in a dictionary for a key with the given key_name, without concern for case. If the key is found, return the associated value. Otherwise, set the value to that provided. """ for name, value in d.items(): if name.lower() == key_name.lower(): return value d[key_name] = new_value return new_value
def formatter(format_string, kwargs): """ Default formatter used to format strings. Instead of `"{key}".format(**kwargs)` use `formatter("{key}", kwargs)` which ensures that no errors are generated when an user uses braces e.g. {}. Bear in mind that formatter consumes kwargs which in turns replaces an used key with empty string "". This can generate unusual behaviour if not well used. """ for key, val in kwargs.items(): key2 = "{%s}" % (key) if key2 in format_string: # explicitly convert val to str format_string = format_string.replace(key2, str(val)) kwargs[key] = "" return format_string
def strip_prefices(columns, prefices): """Filters leaderboard columns to get the system column names. Args: columns(iterable): Iterable of leaderboard column names. prefices(list): List of prefices to strip. You can choose one of ['channel_', 'parameter_', 'property_'] Returns: list: A list of clean column names. """ new_columns = [] for col in columns: for prefix in prefices: if col.startswith(prefix): col = col.replace(prefix, '') new_columns.append(col) return new_columns
def parse_response(resp): """Parse requests response. `resp` will have the following format: {'base': 'stations', 'clouds': {'all': 20}, 'cod': 200, 'coord': {'lat': 46.05, 'lon': 14.51}, 'dt': 1495803600, 'id': 3196359, 'main': {'humidity': 37, 'pressure': 1018, 'temp': 295.7, 'temp_max': 296.15, 'temp_min': 295.15}, 'name': 'Ljubljana', 'sys': {'country': 'SI', 'id': 5882, 'message': 0.0026, 'sunrise': 1495768697, 'sunset': 1495824027, 'type': 1}, 'visibility': 10000, 'weather': [{'description': 'few clouds', 'icon': '02d', 'id': 801, 'main': 'Clouds'}], 'wind': {'deg': 160, 'speed': 2.1}} """ flattened = { 'Location': resp['name'], 'Weather': resp['weather'][-1]['description'], 'Temperature': resp['main']['temp'], } return flattened
def to_byte_string(value, count=2, signed=False, byteorder='little'): """Take bytes and return string of integers. Example: to_byte_string(123456, count=4) = '64 226 1 0' """ byte_value = value.to_bytes(count, byteorder=byteorder, signed=signed) return ' '.join([str(x) for x in byte_value])
def lol_tuples(head, ind, values, dummies): """ List of list of tuple keys Parameters ---------- head : tuple The known tuple so far ind : Iterable An iterable of indices not yet covered values : dict Known values for non-dummy indices dummies : dict Ranges of values for dummy indices Examples -------- >>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {}) ('x', 1, 0) >>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)}) [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)] >>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)}) [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)] >>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE [[('x', 1, 0, 0), ('x', 1, 0, 1)], [('x', 1, 1, 0), ('x', 1, 1, 1)], [('x', 1, 2, 0), ('x', 1, 2, 1)]] """ if not ind: return head if ind[0] not in dummies: return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies) else: return [lol_tuples(head + (v,), ind[1:], values, dummies) for v in dummies[ind[0]]]
def sigmoid_derivative(x): """sigmoid_derivative the derivative of sigma(x) d sig(x)/ dx! """ return x * (1.0 - x)
def is_palindrome_iterative(text): """is_palindrome_iterative return True if input text is a palindrome, and false if not""" # Start at either end of the word, work towards middle left_index = 0 right_index = len(text)-1 # Ensure middle hasn't been surpased while left_index <= right_index: if text[left_index] != text[right_index]: return False else: left_index += 1 right_index -= 1 return True
def append_write(filename="", text=""): """ Write a new file or append info if exists Args: filename: string containing the name or "" if not given. text: content of the file Return: number of chars written """ with open(filename, 'a', encoding="utf-8") as fl_opened: return fl_opened.write(text)
def excape_u(byte_str): """ Replace '\\u' with '$' folowed by capital letters. """ if b'\\u' in byte_str: index = byte_str.find(b'\\u') left = byte_str[:index] right = byte_str[index + 6:] digit = byte_str[index + 2: index + 6] return left + b'$' + digit.upper() + right return byte_str
def in_clause_subs(number): """ Returns a string with n question marks to be used as substitutions placeholders in sql queries. """ return ','.join(['?'] * number)
def electron_binding_energy(charge_number): """Return the electron binding energy for a given number of protons (unit is MeV). Expression is taken from [Lunney D., Pearson J. M., Thibault C., 2003, Rev. Mod. Phys.,75, 1021].""" return 1.44381e-5 * charge_number ** 2.39\ + 1.55468e-12 * charge_number ** 5.35
def sourcemorerecent(sourcefile, targetfile): """ Returns True if sourcefile is more recent than targetfile """ from os.path import isfile, getmtime if isfile(targetfile): time_source = getmtime(sourcefile) time_target = getmtime(targetfile) return time_source > time_target else: return True
def search_all_places_for_place_name(place_array, place_name, state): """Searches a place with a specified name and state in a given array.""" all_places = [] for place in place_array: if place.place_name == place_name and place.state == state: all_places.append(place) return all_places
def check_uniqueness_in_rows(board: list) -> bool: """ Check buildings of unique height in each row. Return True if buildings in a row have unique length, False otherwise. >>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***']) True >>> check_uniqueness_in_rows(['***21**', '452453*', '423145*', '*543215', '*35214*', '*41532*', '*2*1***']) False >>> check_uniqueness_in_rows(['***21**', '412453*', '423145*', '*553215', '*35214*', '*41532*', '*2*1***']) False """ for i in range(1, len(board)-1): line_temp = [] for j in range(1, len(board[i])-1): if board[i][j] in line_temp: return False line_temp.append(board[i][j]) return True
def check_weights(nodes_with_a_weight): """ Ensure that the sum of the values is 1 :param nodes_with_a_weight: a list of Node objects with a weight attribute """ weights = [n['weight'] for n in nodes_with_a_weight] if abs(sum(weights) - 1.) > 1E-12: raise ValueError('The weights do not sum up to 1: %s' % weights) return nodes_with_a_weight
def _alien_domain(environ, space_name): """ Detect if a space_name is in an alien domain. Alien means not in the server_host subdomain or _the_ main domain. """ # Some other things will eventually happen here, which # is why environ is being passed in. if space_name == 'frontpage': return True return False
def _get_result(args): """Calculates a full result code for use in generating predictions""" stage, app_type, result_code, attending, waitlisted, deferred = args if result_code == "denied": return "Denied" elif result_code in ["accepted", "cond. accept", "summer admit"]: if attending == "yes": return "CHOICE!" else: return "Accepted!" elif result_code == "guar. transfer": return "Guar. Xfer" elif (waitlisted == 1) | (waitlisted == "1"): return "Waitlist" elif (deferred == 1) | (deferred == "1"): return "Deferred" elif stage == "pending": return "Pending" elif stage in [ "initial materials submitted", "mid-year submitted", "final submitted", ]: return "Submitted" elif app_type == "interest": return "Interest" else: return "?"
def parse_file(filename): """Read a recipe file and return the data as a dict{name, body}. Return None on failure. """ try: recipe_dict = {} with open(filename, "r") as file: contents = file.read() recipes = contents.split("\n\n\n") for recipe in recipes: rows = recipe.split("\n") name = rows[0] recipe_body = "\n".join(rows[1:]) if not name in recipe_dict.keys(): recipe_dict[name] = recipe_body else: raise Exception("Recipe with given name already exists.") return recipe_dict except Exception as e: print(e) return None
def class_in_path(class_name, path_str): """ Checks if the class of interest is in the tree path Note that the path is supposed to have the nodes separated by "."-symbol. The main issue this function addresses is ambiguous cases, e.g. SL:PATH vs SL:PATH_AVOID Args: class_name: str, e.g. SL:PATH path_str: str, e.g. IN:GET_ESTIMATED_DEPARTURE.SL:SOURCE Returns: True if the class in the tree path, else otherwise """ # "DEPARTURE." in "IN:DEPARTURE.SL:SOURCE" case class_in_the_middle = (class_name + ".") in path_str # "IN:DEPARTURE.SL:SOURCE".endswith("SOURCE") case class_in_the_end = path_str.endswith(class_name) return class_in_the_middle or class_in_the_end
def multi_lane_to_dict(lane): """Convert a list of lane entries into a dictionary indexed by library ID """ return dict( ((x['library_id'],x) for x in lane) )
def local_repo_name(group, repo_name, pull_id): """ combine name to avoid name conflit """ return "{}_{}_{}".format(group, repo_name, pull_id)
def get_whois_key(text): """Method that parses a line of text extracting a key value that appears just before a ':'""" is_key = "" if text[-1] == ":": # Check if the last character in text is a ':' # if so, this matches the format of a key value is_key = text[:-1] else: is_key = None return is_key
def normalize_bbox(bbox): """Normalizes bounding box: sets minimal coords for first point and maximal for second point. Returns new bounding box. :type bbox: list :param bbox: bounding box :rtype: list :return: new bounding box """ x0, y0, x1, y1 = bbox return [min(x0, x1), min(y0, y1), max(x0, x1), max(y0, y1)]