content
stringlengths
42
6.51k
def reparsing_namespaces(namespace_map): """compile a string representation of the namespaces""" namespace_string = "" for prefix, uri in namespace_map.items(): namespace_string += 'xmlns:%s="%s" ' % (prefix, uri) return namespace_string.rstrip()
def list_to_number(column): """ Turns a columns of 0s and 1s to an integer Args: column: List of 0s and 1s to turn into a number """ # Cast column integers to strings column = [str(cell) for cell in column] # Turn to an integer with base2 return int(''.join(column), 2)
def reverse_config_section(section): """ creates an inverted lookup from a config section. Useful to find LEDs and PWM. """ return {v: k for k, v in section.items()}
def define_browsers(browsers, remote_browsers, default_browsers, custom_browsers): """Generate the definitions for the browsers. A defined browser contains the following attributes: 'name': real name 'capabilities': capabilities defined by remote_browsers setting """ browsers_definition = [] for browser in browsers: if browser in remote_browsers: browsers_definition.append({ 'name': browser, 'capabilities': remote_browsers[browser] }) elif browser in default_browsers: browsers_definition.append({ 'name': browser, 'capabilities': {} }) elif browser in custom_browsers: browsers_definition.append({ 'name': browser, 'capabilities': {} }) else: msg = [f'Error: the browser {browser} is not defined\n', 'available options are:\n', '\n'.join(default_browsers), '\n'.join(remote_browsers)] raise Exception(''.join(msg)) return browsers_definition
def _get_resource_tree(req_path): """ Given a :requested_path split this path into array that represents the tree to that resource in server's uploaded-resources-folder (uploads). e.g.: venv_a/etl_archive_a/subdir_1/etl_script.py => [venv_a, etl_archive_a, subdir_1, etl_script.py] """ #path_parts = abs_path.split('/')[2:] #path_parts = abs_path.split('/')[1:] return req_path.split('/')
def calculate_cell_power_level(x, y, serial): """ Calculate the convoluted power level of a cell based on the formula provided. :param x: the x coordinate :param y: the y coordinate :param serial: the serial number of the device :return: the power level >>> calculate_cell_power_level(3, 5, 8) 4 >>> calculate_cell_power_level(122, 79, 57) -5 >>> calculate_cell_power_level(217, 196, 39) 0 >>> calculate_cell_power_level(101, 153, 71) 4 """ rack_id = x + 10 num = (rack_id * y + serial) * rack_id if abs(num) < 100: # There is no digit in the 100s position, so take it to be zero, and subtract five from it. return -5 else: # Fetch the digit in the 100s position and subtract five from it. return int(str(num)[-3]) - 5
def gf_quote(str): """ Changes the underscore for a space and quotes the string. """ return '"' + str.replace("_", " ").replace('"', '\\"') + '"'
def replace(_, result, original, substitute): """ Replace substrings within content. """ return result.replace(original, substitute)
def transpose(matrix): """ Transpose e.g. [[1,2,3], [4,5,6]] to [[1,4], [2,5], [3,6]] """ return list(map(list, zip(*matrix)))
def tf_format(string: str, clip_start: int, clip_end: int) -> str: """ formats a string with clip information, returns result clip_start: int clip start in seconds clip_end: int clip end in seconds """ def ts_format(ts: int) -> str: """nested function represent `ts: int` as [(h*)mm]ss, returns result""" _mm = ts // 60 hh = _mm // 60 mm = _mm - hh * 60 ss = ts % 60 result = "" for index, unit in enumerate([ss] + [u for u in (mm, hh) if u != 0]): if index < 2: # ss or mm result = str(unit).rjust(2, "0") + result else: result = str(unit) + result return result.lstrip("0") replaceables = ( ("{cs}", ts_format(clip_start)), ("{css}", clip_start), ("{ce}", ts_format(clip_end)), ("{ces}", clip_end), ("{cer}", f"+{clip_end - clip_start}"), ) for placeholder, value in replaceables: if placeholder in string: string = string.replace(placeholder, str(value)) return string
def get_lambda_zip_name(domain): """Get name of zip file containing lambda. This must match the name created in the makedomainenv script that runs on the lambda build server. Args: domain (string): The VPC's domain name such as integration.boss. Returns: (string) """ return 'multilambda.{}.zip'.format(domain)
def extract_years_filter(config): """ Extract min and max years to filter data from "years_filter" dictionary value the query configuration. The years will be splited by the "-" character. years_filter: 1780-1918 :param config: config :type config: dict :return: min_year, max_year :rtype: int, int """ if "years_filter" not in config: raise ValueError('years_filter value not found in the config file') else: years= config["years_filter"] year_min=years.split("-")[0] year_max=years.split("-")[1] return year_min, year_max
def parse_coordinates(result): """ Function purpose: parse coordinates from console result into a list Input: string Output: list """ bboxStr = result[result.find("[") + 1:result.find("]")] bboxList = [float(i) for i in bboxStr.split(',')] return bboxList
def least_missing(colors): """The smallest integer not in 'colors'.""" colors.sort() for color in colors: if color + 1 not in colors: return color + 1
def upper_bound(arr, value, first, last): """Find the upper bound of the value in the array upper bound: the first element in arr that is larger than value Args: arr : input array value : target value first : starting point of the search, inclusive last : ending point of the search, exclusive Return index : integer if index == last => upper bound does not exist else => arr[index] > value """ while first < last: mid = first + (last - first) // 2 if arr[mid] <= value: first = mid + 1 else: last = mid return first
def with_end_char(text, char): """ Description: Append an after character to a text :param text: raw text :param char: character to put :return: Appended Text (e.g. with_end_char("Accounts", ":")-> "Accounts:") """ return str("").join([text, char])
def generate_ucc_amplitudes(n_electrons, n_spin_orbitals): """ Create lists of amplidues to generate UCC operators. This function does not enforce spin-conservation in the excitation operators. Args: n_electrons (int): Number of electrons. n_spin_orbitals (int): Number of spin-orbitals, equivalent to number of qubits. Returns: single_amplitudes, double_amplitudes (list): List of single and double amplitudes as [[i,j], t_ij, [k,l], t_kl] Raises: TypeError: if n_electrons or n_spin_orbitals is not integer or float. ValueError: if n_electrons is greater than n_spin_orbitals. Notes: Assigns value 1 to each amplitude. """ if isinstance(n_electrons, (int, float)): n_electrons = int(n_electrons) else: raise TypeError('Electrons must be a number.') if isinstance(n_spin_orbitals, (int, float)): n_spin_orbitals = int(n_spin_orbitals) else: raise TypeError('Orbitals must be a number.') if n_electrons > n_spin_orbitals: raise ValueError( 'Number of electrons can not be greater than orbitals.') single_amplitudes = [] double_amplitudes = [] for one_el in range(0, n_electrons): for unocc_orb in range(n_electrons, n_spin_orbitals): single_amplitudes.append([[unocc_orb, one_el], 1.0]) for two_el in range(one_el, n_electrons): for two_unocc_orb in range(unocc_orb, n_spin_orbitals): if ((two_unocc_orb != unocc_orb) and (two_el != one_el)): double_amplitudes.append( [[two_unocc_orb, two_el, unocc_orb, one_el], 1.0]) return single_amplitudes, double_amplitudes
def clean_file_extensions(filename): """ For solving the error: Unknown image file format. One of JPEG, PNG, GIF, BMP required. """ return filename # return filename.replace("jpg", "jpeg") # if '.webp' in filename: # image = Image.open('new-format-image-from-png.webp') # image = image.convert('RGB') # image.save('converting-from-webp-to-png-format.png', 'png')
def _is_equal(x, y): """special comparison used in get_all_doc_starts""" return x[0] == y
def get_parent_hierarchy_object_id_str(elt): """Get the elt path from the 1st parent with an objectId / ElectionReport.""" elt_hierarchy = [] current_elt = elt while current_elt is not None: if current_elt.get("objectId"): elt_hierarchy.append(current_elt.tag + ":" + current_elt.get("objectId")) break else: elt_hierarchy.append(current_elt.tag) current_elt = current_elt.getparent() return " > ".join(elt_hierarchy[::-1])
def _pointer_str(obj): """ Get the memory address of *obj* as used in :meth:`object.__repr__`. This is equivalent to ``sprintf("%p", id(obj))``, but python does not support ``%p``. """ full_repr = object.__repr__(obj) # gives "<{type} object at {address}>" return full_repr.rsplit(" ", 1)[1][:-1]
def TSA_rn_v( rnet, vegetation_fraction): """ //Chen et al., 2005. IJRS 26(8):1755-1762. //Estimation of daily evapotranspiration using a two-layer remote sensing model. Vegetation net radiation TSA_rn_v( rnet, vegetation_fraction) """ result = vegetation_fraction * rnet return result
def convert_to_unsigned_integer(value, size): """ :param int size: number of bits containing this integer """ upper_bound = 2 ** size if not (-upper_bound // 2 <= value < upper_bound): msg = '{} is out of range of {} bits'.format(value, size) raise ValueError(msg) all_f_mask = upper_bound - 1 return value & all_f_mask
def IsSorted(arr): """ Returns True if the array is Sorted in either ascending or descending order; False If Not """ ascend, descend = True, False for i in range(len(arr)-1): if (arr[i] < arr[i+1]) and not descend: ascend, descend = True, False elif (arr[i] > arr[i+1]) and not ascend: descend, ascend = True, False else: return False else: return True
def _plan(D, W): """The naming scheme for a ResNet is 'cifar_resnet_N[_W]'. The ResNet is structured as an initial convolutional layer followed by three "segments" and a linear output layer. Each segment consists of D blocks. Each block is two convolutional layers surrounded by a residual connection. Each layer in the first segment has W filters, each layer in the second segment has 32W filters, and each layer in the third segment has 64W filters. The name of a ResNet is 'cifar_resnet_N[_W]', where W is as described above. N is the total number of layers in the network: 2 + 6D. The default value of W is 16 if it isn't provided. For example, ResNet-20 has 20 layers. Exclusing the first convolutional layer and the final linear layer, there are 18 convolutional layers in the blocks. That means there are nine blocks, meaning there are three blocks per segment. Hence, D = 3. The name of the network would be 'cifar_resnet_20' or 'cifar_resnet_20_16'. """ if (D - 2) % 3 != 0: raise ValueError('Invalid ResNet depth: {}'.format(D)) D = (D - 2) // 6 plan = [(W, D), (2*W, D), (4*W, D)] return plan
def bit_reversed(x, n): """ Bit-reversal operation. Parameters ---------- x: ndarray<int>, int a vector of indices n: int number of bits per index in ``x`` Returns ---------- ndarray<int>, int bit-reversed version of x """ result = 0 for i in range(n): # for each bit number if (x & (1 << i)): # if it matches that bit result |= (1 << (n - 1 - i)) # set the "opposite" bit in result return result
def depth(expr): """ depth(expr) finds the depth of the mathematical expr formatted as a list. depth is defined as the deepest level of nested operations within the expression. """ if not isinstance(expr, list): raise TypeError("depth() : expr must be of type list") else: exprlength = len(expr) # number of nodes at current recursion level depths = [0]*exprlength # depth beneath each node at the current level # for every node at the current level for ii in range(exprlength): # if the node has branches if isinstance(expr[ii], list): # increment depth and recurse down the branch depths[ii] = 1 + depth(expr[ii]) # pass up the deepest depth beneath the current level return max(depths)
def pyramid_sum(lower, upper, margin = 0): """Returns the sum of the numbers from lower to upper, and outputs a trace of the arguments and return values on each call.""" blanks = " " * margin print(blanks, lower, upper) # Print the arguments if lower > upper: print(blanks, 0) # Print the returned value return 0 else: result = lower + pyramid_sum(lower + 1, upper, margin + 4) print(blanks, result) # Print the returned value return result
def countSlopeChange(slopesBetweenJumps): """ Counts the number of slope changes and corresponds that to sides slopesBetweenJumps: list of slopes Returns: side count """ currSlope = 10000 sides = 0 for x in slopesBetweenJumps: if(abs(x - currSlope) > 10): sides +=1 currSlope = x return sides
def find_largest_digit(n): """ :param n: (int) An integer. :return: (int) Single digit integer which is the bigger than any other digits integer. """ if n < 0: n = - n # Make each integer a positive integer # Base case if n < 10: return n # Recursive case else: remainder_one = n % 10 # Store the last number. n = n // 10 remainder_two = n % 10 # Store the second last number. if remainder_one > remainder_two: # Compare the last two number to find the bigger one. n = n // 10 * 10 + remainder_one # Store the bigger number in unit digit. return find_largest_digit(n)
def _splitTag(tag): """Split the namespace and tag name""" return [part.strip('{') for part in tag.split('}')]
def parse_slice_inv(text): """Parse a string into a slice notation. This function inverts the result from 'parse_slice'. :param str text: the input string. :return str: the slice notation. :raise ValueError Examples: parse_slice_inv('[None, None]') == ":" parse_slice_inv('[1, 2]') == "1:2" parse_slice_inv('[0, 10, 2]') == "0:10:2" """ err_msg = f"Failed to convert '{text}' to a slice notation." if len(text) > 1: try: parts = [None if v.strip() == 'None' else int(v) for v in text[1:-1].split(',')] except ValueError: raise ValueError(err_msg) if len(parts) == 2: s0 = '' if parts[0] is None else str(parts[0]) s1 = '' if parts[1] is None else str(parts[1]) return f"{s0}:{s1}" if len(parts) == 3: s0 = '' if parts[0] is None else str(parts[0]) s1 = '' if parts[1] is None else str(parts[1]) s2 = '' if parts[2] is None else str(parts[2]) return f"{s0}:{s1}:{s2}" raise ValueError(err_msg)
def cumdiff(yin): """ compute the cumulative mean normalized difference """ W = len(yin) yin[0] = 1. cumsum = 0. for tau in range(1, W): cumsum += yin[tau] if cumsum != 0: yin[tau] *= tau/cumsum else: yin[tau] = 1 return yin
def get_median_two_sorted_arrays_merge_sort(arr1, arr2): """ Time complexity: O(m+n) Space complexity: O(n) Args: arr1: arr2: Returns: """ new_arr = [] i = 0 j = 0 while i < len(arr1) and j < len(arr2): if arr1[i] < arr2[j]: new_arr.append(arr1[i]) i += 1 else: new_arr.append(arr2[j]) j += 1 while i < len(arr1): new_arr.append(arr1[i]) i += 1 while j < len(arr2): new_arr.append(arr2[j]) j += 1 N = len(new_arr) mid = len(new_arr)//2 if N %2 == 0: return (new_arr[mid] + new_arr[mid-1])/2 else: return new_arr[mid]
def alpha_blend(img1,img2,alpha): """ Blend two images with weights as in alpha. """ return (1 - alpha)*img1 + alpha * img2
def get_var_val(key, ii, varDict): """Gets an input in the likes of ${var} and returns the corresponding var value from the dict Parameters ---------- key: string unparsed key of var ii: int current iteration idx varDict: dict variable dictionary Returns ------- string variable value as string """ res = varDict.get(key[2:-1], '0')[ii] return str(res)
def compare(c1, c2): """Compares two configuration dictionaries Returns: < 0 if c1 is bigger than c2 0 if they're equivalent sizes > 0 if c2 is bigger than c1 """ result = len(c2.keys()) - len(c1.keys()) while result > -1: for k, v in c1.items(): delta = c2[k] - v if delta < 0: result = -1 else: result += delta break return result
def get_next_available_key(iterable, key, midfix="", suffix="", is_underscore=True, start_from_null=False): """Get the next available key that does not collide with the keys in the dictionary.""" if start_from_null and key + suffix not in iterable: return key + suffix else: i = 0 underscore = "_" if is_underscore else "" while "{}{}{}{}{}".format(key, underscore, midfix, i, suffix) in iterable: i += 1 new_key = "{}{}{}{}{}".format(key, underscore, midfix, i, suffix) return new_key
def tail(iterable): """Returns all elements excluding the first out of an iterable. :param iterable: Iterable sequence. :returns: All elements of the iterable sequence excluding the first. """ return iterable[1:]
def extract_method_header(headers): """ Extracts the request method from the headers list. """ for k, v in headers: if k in (b':method', u':method'): if not isinstance(v, bytes): return v.encode('utf-8') else: return v
def index_to_bytes(i): """ Map the WHATWG index back to the original ShiftJIS bytes. """ lead = i // 188; lead_offset = 0x81 if lead < 0x1F else 0xC1 trail = i % 188 trail_offset = 0x40 if trail < 0x3F else 0x41 return (lead + lead_offset, trail + trail_offset)
def convert_units(val, fromUnit, toUnit): """ Convert flowrate units. Possible volume values: ml, ul, pl; possible time values: hor, min, sec :param fromUnit: unit to convert from :param toUnit: unit to convert to :type fromUnit: str :type toUnit: str :return: float """ time_factor_from = 1 time_factor_to = 1 vol_factor_to = 1 vol_factor_from = 1 if fromUnit[-3:] == "sec": time_factor_from = 60 elif fromUnit == "hor": # does it really return hor? time_factor_from = 1/60 else: pass if toUnit[-3:] == "sec": time_factor_to = 1/60 elif toUnit[-3:] == "hor": time_factor_to = 60 else: pass if fromUnit[:2] == "ml": vol_factor_from = 1000 elif fromUnit[:2] == "nl": vol_factor_from = 1/1000 elif fromUnit[:2] == "pl": vol_factor_from = 1/1e6 else: pass if toUnit[:2] == "ml": vol_factor_to = 1/1000 elif toUnit[:2] == "nl": vol_factor_to = 1000 elif toUnit[:2] == "pl": vol_factor_to = 1e6 else: pass return val * time_factor_from * time_factor_to * vol_factor_from * vol_factor_to
def f(LL): """list[list[int]] -> int""" (LL + [])[0].append(1) return 0
def _get_object_id_from_url(url): """ Extract object ID from Ralph API url. """ return url.rstrip('/').rpartition('/')[2]
def clean_tag(tag): """clean up tag.""" if tag is None: return None t = tag if isinstance(t, list): t = t[0] if isinstance(t, tuple): t = t[0] if t.startswith('#'): t = t[1:] t = t.strip() t = t.upper() t = t.replace('O', '0') t = t.replace('B', '8') return t
def has_even_parity(message: int) -> bool: """ Return true if message has even parity.""" parity_is_even: bool = True while message: parity_is_even = not parity_is_even message = message & (message - 1) return parity_is_even
def pattern_string_generator(patterns): """ Creates a list of viable pattern strings that are easier to read Input: patterns --- a list of lists of individual characters e.g. [["A","B","B","A"],["B","A","B","A"]] Output: pattern_strings --- a list of lists of strings e.g. [["ABBA"],["BABA"]] """ # Convert the site pattern lists to strings pattern_strings = [] while patterns: a_count = 0 b_count = 0 pattern_str = "" pattern = patterns.pop() for site in pattern: if site == "A": b_count += 1 elif site == "B": a_count += 1 pattern_str += site if a_count > 0 and b_count > 0: pattern_strings.append(pattern_str) return pattern_strings
def build_error_report(results): """Build an user-friendly error report for a failed job. Args: results (dict): result section of the job response. Returns: str: the error report. """ error_list = [] for index, result in enumerate(results): if not result['success']: error_list.append('Experiment {}: {}'.format(index, result['status'])) error_report = 'The following experiments failed:\n{}'.format('\n'.join(error_list)) return error_report
def label_connected_components(num_nodes, edges): """Given a graph described by a list of undirected edges, find all connected components and return labels for each node indicating which component they belong to.""" leader = list(range(num_nodes)) def head(k): if leader[k] == k: return k else: leader[k] = head(leader[k]) return leader[k] for i, j in edges: hi, hj = head(i), head(j) if hi != hj: leader[hi] = hj leaders = [head(i) for i in range(num_nodes)] reduction = {leader: index for index, leader in enumerate(set(leaders))} return [reduction[leader] for leader in leaders]
def dict_key_apply(iterable, str_fkt): """ Applys a string modifiying function to all keys of a nested dict. """ if type(iterable) is dict: for key in list(iterable.keys()): new_key = str_fkt(key) iterable[new_key] = iterable.pop(key) if type(iterable[new_key]) is dict or type(iterable[new_key]) is list: iterable[new_key] = dict_key_apply(iterable[new_key], str_fkt) elif type(iterable) is list: for item in iterable: item = dict_key_apply(item, str_fkt) return iterable
def addNewCards(old_dict, comparison_dict, new_dict): """Compare old_dict and comparison_dict, add only those keys not in old_dict to the new_dict.""" if old_dict: for k, v in comparison_dict.items(): if k not in old_dict: new_dict[k] = comparison_dict[k] return(new_dict) else: return(comparison_dict)
def diff(list_1, list_2): """ get difference of two lists :param list_1: list :param list_2: list :return: list """ return list(set(list_1) - set(list_2))
def sec_from_hms(start, *times): """ Returns a list of times based on adding each offset tuple in times to the start time (which should be in seconds). Offset tuples can be in any of the forms: (hours), (hours,minutes), or (hours,minutes,seconds). """ ret = [] for t in times: cur = 0 if len(t) > 0: cur += t[0] * 3600 if len(t) > 1: cur += t[1] * 60 if len(t) > 2: cur += t[2] ret.append(start + cur) return ret
def check_7(oe_repos, srcoe_repos): """ All repositories' name must follow the gitee requirements """ print("All repositories' name must follow the gitee requirements") errors_found = 0 error_msg = "Repo name allows only letters, numbers, or an underscore (_), dash (-),"\ " and period (.). It must start with a letter, and its length is 2 to 200"\ " characters." for repos in oe_repos, srcoe_repos: for repo in repos: repo_name = repo["name"].lower() if len(repo_name) < 2 or len(repo_name) > 200: print("ERROR! {name} too long or too short".format(name=repo_name)) errors_found += 1 else: new_repo_name = repo_name.replace("_", "").replace("-", "").replace(".", "") if not new_repo_name.isalnum(): print("ERROR! {name} contains invalid character".format(name=repo_name)) errors_found += 1 elif not repo_name[0].isalpha(): print("ERROR! {name} must start with a letter".format(name=repo_name)) errors_found += 1 if errors_found != 0: print(error_msg) else: print("PASS WITHOUT ISSUES FOUND.") return errors_found
def sum_of_n_natual_numbers(n): """ Returns sum of first n natural numbers """ try: n+1 except TypeError: # invlid input hence return early return if n < 1: # invlid input hence return early return return n*(n+1) // 2
def is_interesting_transaction(txdata, all_addresses): """Check if the transaction contains any deposits to our addresses.""" return any([(detail["category"] == "receive" and detail["address"] in all_addresses) for detail in txdata["details"]])
def remove_trailing_s(token): """Remove trailing s from a string.""" if token.endswith('s'): return token[:-1] else: return token
def one(iterable, too_short=None, too_long=None): """Return the first item from *iterable*, which is expected to contain only that item. Raise an exception if *iterable* is empty or has more than one item. :func:`one` is useful for ensuring that an iterable contains only one item. For example, it can be used to retrieve the result of a database query that is expected to return a single row. If *iterable* is empty, ``ValueError`` will be raised. You may specify a different exception with the *too_short* keyword: >>> it = [] >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: too many items in iterable (expected 1)' >>> too_short = IndexError('too few items') >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... IndexError: too few items Similarly, if *iterable* contains more than one item, ``ValueError`` will be raised. You may specify a different exception with the *too_long* keyword: >>> it = ['too', 'many'] >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError: Expected exactly one item in iterable, but got 'too', 'many', and perhaps more. >>> too_long = RuntimeError >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... RuntimeError Note that :func:`one` attempts to advance *iterable* twice to ensure there is only one item. See :func:`spy` or :func:`peekable` to check iterable contents less destructively. """ it = iter(iterable) try: first_value = next(it) except StopIteration: raise too_short or ValueError('too few items in iterable (expected 1)') try: second_value = next(it) except StopIteration: pass else: msg = ( 'Expected exactly one item in iterable, but got {!r}, {!r}, ' 'and perhaps more.'.format(first_value, second_value) ) raise too_long or ValueError(msg) return first_value
def dice_coefficient(a, b, case_insens=True): """ :type a: str :type b: str :type case_insens: bool dice coefficient 2nt/na + nb. https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Dice%27s_coefficient#Python """ if case_insens: a = a.lower() b = b.lower() if not len(a) or not len(b): return 0.0 if len(a) == 1: a = a + u'.' if len(b) == 1: b = b + u'.' a_bigram_list = [] for i in range(len(a) - 1): a_bigram_list.append(a[i:i + 2]) b_bigram_list = [] for i in range(len(b) - 1): b_bigram_list.append(b[i:i + 2]) a_bigrams = set(a_bigram_list) b_bigrams = set(b_bigram_list) overlap = len(a_bigrams & b_bigrams) dice_coeff = overlap * 2.0 / (len(a_bigrams) + len(b_bigrams)) return round(dice_coeff, 6)
def reducefn(x, y): """Combine counts arrays. e.g. [12, 7, ...], [3, 1, ...] -> [15, 8, ...] """ for i, v in enumerate(y): x[i] += v return x
def half_adder(a,b): """Single bit addition""" return (a+b)%2
def convert_array(arr): """ In-place algorithm without using extra space """ size = len(arr) // 3 for idx in range(len(arr)): swap_index = (idx % 3) * size + (idx // 3) while swap_index < idx: swap_index = (swap_index % 3) * size + (swap_index // 3) arr[idx], arr[swap_index] = arr[swap_index], arr[idx] return arr
def int_dpid(dpid): """Convert a str dpid to an int.""" dpid = int(dpid.replace(":", ""), 16) return dpid
def clean_bibtex_authors(author_str): """Convert author names to `firstname(s) lastname` format.""" authors = [] for s in author_str: s = s.strip() if len(s) < 1: continue if ',' in s: split_names = s.split(',', 1) last_name = split_names[0].strip() first_names = [i.strip() for i in split_names[1].split()] else: split_names = s.split() last_name = split_names.pop() first_names = [i.replace('.', '. ').strip() for i in split_names] if last_name in ['jnr', 'jr', 'junior']: last_name = first_names.pop() for item in first_names: if item in ['ben', 'van', 'der', 'de', 'la', 'le']: last_name = first_names.pop() + ' ' + last_name authors.append('"{} {}"'.format(' '.join(first_names), last_name)) return authors
def get_simbench_code_from_parameters(sb_code_parameters): """ Converts flag parameters, describing a SimBench grid selection, into the unique regarding SimBench Code. """ switch_param = "no_sw" if not sb_code_parameters[6] else "sw" sb_code = str(sb_code_parameters[0])+"-"+sb_code_parameters[1]+sb_code_parameters[2]+"-" + \ sb_code_parameters[3]+"-"+str(sb_code_parameters[4])+"-"+str(sb_code_parameters[5])+"-" + \ switch_param return sb_code
def lr_scheduler(optimizer, epoch, lr_decay=0.3, lr_decay_epoch=2, number_of_decay=5): """ lr_scheduler method is written for decay learning rate by a factor of lr_decay every lr_decay_epoch epochs :param optimizer: input optimizer :param epoch: epoch number :param lr_decay: the rate of reduction, multiplied to learning_rate :param lr_decay_epoch: epoch number for decay :param number_of_decay: total number of learning_rate reductions :return: optimizer """ if lr_decay_epoch * number_of_decay < epoch: return optimizer if (epoch+1) % lr_decay_epoch: return optimizer for param_group in optimizer.param_groups: param_group["lr"] *= lr_decay return optimizer
def create_tron_config(*args) -> str: """ Convert a list of parameters into a serialized tron grid string. Parameters ---------- args All of the arguments into tron grid as star args. Returns ------- str Serialized string """ raw_string = "{};" * len(args) return raw_string[:-1].format(*args)
def mod(a: int, b: int) -> int: """ Return a mod b, account for positive/negative numbers """ return (a % b + b) % b
def is_channel(code, length): """Method to check if given axis code belongs to channel dimension. Parameters ---------- code : Returns ------- bool """ return code == "C" and not length > 8
def get_version(version: tuple) -> str: """ Gets the version of the package based on a :class:`tuple` and returns a :class:`str`. This method is based on ``django-extensions`` get_version method. """ str_version = '' for idx, n in enumerate(version): try: str_version += '%d' % int(n) except ValueError: str_version = str_version[:-1] str_version += '_%s' % str(n) finally: if idx < len(version) - 1: str_version += '.' return str_version
def build_dict(*param_dicts, **param_dict): """ Create a merged dictionary from the supplied dictionaries and keyword parameters. """ merged_param_dict = param_dict.copy() for d in param_dicts: if d is not None: # log.info("param_dicts %r"%(d,)) merged_param_dict.update(d) return merged_param_dict
def is_leap_year(year): """ Return boolean flag indicating whether the given year is a leap year or not. """ if year > 1582: return year % 4 == 0 and year % 100 != 0 or year % 400 == 0 else: return year % 4 == 0
def find_top(char_locs, pt): """Finds the 'top' coord of a word that a character belongs to. :param char_locs: All character locations on the grid. :param pt: The coord of the required character. :return: The 'top' coord. """ if pt not in char_locs: return [] l = list(pt) while (l[0], l[1]-1) in char_locs: l = [l[0], l[1]-1] return l
def get_pixel_brightness(pixel): """rgb pixel to brightness value""" return(max((pixel[0], pixel[1], pixel[2])) / 255 * 100)
def count_change(amount): """Return the number of ways to make change for amount. >>> count_change(7) 6 >>> count_change(10) 14 >>> count_change(20) 60 >>> count_change(100) 9828 """ "*** YOUR CODE HERE ***" def max_m(num): i = 0 while pow(2, i) < num: i += 1 return i - 1 def count_change_max(amount, max): if amount == 0 or max == 0: return 1 elif amount < 0 or max < 0: return 0 else: return count_change_max(amount - pow(2, max), max) + count_change_max(amount, max - 1) return count_change_max(amount, max_m(amount))
def scale(input_value, input_min, input_max, out_min, out_max): """ scale a value from one range to another """ # Figure out how 'wide' each range is input_span = input_max - input_min output_span = out_max - out_min # Convert the left range into a 0-1 range (float) valuescaled = float(input_value - input_min) / float(input_span) # Convert the 0-1 range into a value in the right range. return out_min + (valuescaled * output_span)
def lerp(x, x0, x1, y0, y1): """ Interpolates to get the value of y for x by linearly interpolating between points (x0, y0) and (x1, y) :return: """ t = (x-x0)/(x1 - x0) return (1 - t) * y0 + t * y1
def second_to_day(seconds): """ :param seconds: :return: """ day = int(seconds)/86400 assert day < 58, 'Too many seconds, reached day %s' % day return day
def get_violations_per_category(guideline_violations, guidelines): """ Get the number of violations per category """ violations_per_category = {} if any(guidelines): # initialize the violations to 0 for all categories violations_per_category = {"Required": 0, "Advisory": 0, "Mandatory": 0} # count all violations per category for guideline_violation in guideline_violations: guideline = guideline_violation[0] category = guideline.get_category() if category in violations_per_category.keys(): violations_per_category[category] += 1 else: violations_per_category[category] = 1 return violations_per_category
def func_linear(data, a_factor, y_int): """A linear function with y-intercept""" return a_factor * data + y_int
def recompute_fuel( positions ): """ Move from 16 to 5: 66 fuel : 11 = 1+2+3+4+5+6+7+8+9+10+11 = 66 Move from 1 to 5: 10 fuel : 4 = 1+2+3+4= 10 """ fuel_per_target = dict() for target in range( max(positions)+1 ): fuel_per_submarine = [ sum( range(1,abs(target-position)+1) ) for position in positions ] fuel_per_target[target] = sum( fuel_per_submarine ) return min( fuel_per_target.values() )
def default_pop(dictobj, key, default={}): """Pop the key from the dict-like object. If the key doesn't exist, return a default. Args: dictobj (dict-like): A dict-like object to modify. key (hashable): A key to pop from the dict-like object. default: Any value to be returned as default, should the key not exist. Returns: value: Either the value stored at the key, or the default value. """ try: default = dictobj.pop(key) except KeyError: pass return default
def n_xx_n_mod_k(n, k): """ Compute n ** n mod k. """ return pow(n, n, k)
def _year_matches_code(year, code): """ Returns whether `year` falls in the range specified by `code` """ if isinstance(code, int): # explicitly specified year as an int return year == code if not code or code.lower() == "default": # empty indicates default case return True code = code.replace(" ", "") # strip it of spaces for processing if code.startswith("<="): return year <= int(code[2:]) elif code.startswith(">="): return year >= int(code[2:]) elif code.startswith("<"): return year < int(code[1:]) elif code.startswith(">"): return year > int(code[1:]) elif code.startswith("=="): return year == int(code[2:]) elif code.startswith("!="): return year != int(code[2:]) elif "-" in code: # range of years (inclusive), such as "2018-2020" fr, to, *_ = code.split("-") return year >= int(fr) and year <= int(to) else: # just the year is the same as == return year == int(code)
def paths_prob_to_edges_flux(paths_prob): """Chops a list of paths into its edges, and calculate the probability of that edge across all paths. Parameters ---------- paths: list of tuples list of the paths. Returns ------- edge_flux: dictionary Edge tuples as keys, and probabilities as values. """ edge_flux = {} for path, prob in paths_prob.items(): edges = [] for i in range(len(path) - 1): # Get edge edges.append((path[i], path[i + 1])) # If paths loop over same pair of states multiple times per path, the probability shouldnt be summed. uniq_edges = set(edges) for edge in uniq_edges: # Get path probability to edge. if edge in edge_flux: edge_flux[edge] += prob # Else start at zero else: edge_flux[edge] = prob return edge_flux
def convert_to_kw(value: float, precision: int = 1) -> float: """Converts watt to kilowatt and rounds to precision""" # Don't round if precision is -1 if precision == -1: return value / 1000 else: return round(value / 1000, precision)
def _ensure_mfdataset_filenames(fname): """Checks if grib or nemsio data Parameters ---------- fname : string or list of strings Description of parameter `fname`. Returns ------- type Description of returned object. """ from glob import glob from numpy import sort import six if isinstance(fname, six.string_types): names = sort(glob(fname)) else: names = sort(fname) gribs = [True for i in names if 'grb2' in i or 'grib2' in i] grib = False if len(gribs) >= 1: grib = True return names, grib
def _rreplace(s, old, new, occurrence=1): """Simple function to replace the last 'occurrence' values of 'old' with 'new' in the string 's' Thanks to https://stackoverflow.com/questions/2556108/ rreplace-how-to-replace-the-last-occurrence- of-an-expression-in-a-string """ s = s.rsplit(old, occurrence) return new.join(s)
def keywords_encode(d): """ Takes a dictionary of keywords and encodes them into a somewhat readable url query format. For example: { 'color': ['blue', 'red'], 'weight': ['normal'] } Results in '+color:blue+color:red+weight:normal' Instead of a dictionary we can also use any kind of object which has a 'keywords' property returning the expected dictionary. Note that that object won't be recreated during decode however. """ if not d: return '' if hasattr(d, 'keywords'): d = d.keywords return '+'.join('{}:{}'.format(k, v) for k in d for v in d[k])
def check_args(allowed, arg): """Raise name error if argument doesn't match 'allowed' list/set of values""" assert type(allowed) in {list, set}, "First argument must be 'allowed' list/set of args values" if arg not in allowed: raise NameError("Unexpected arg {0}: allowed args are {1}".format(arg, allowed.__str__())) return True
def get_correct_url(html, vid_id): """get the url that's the exact video we want from a link with multiple results""" try: url_portion = html.split('" title="' + vid_id + ' ')[0].split('><a href=".')[1] return "http://www.javlibrary.com/en" + url_portion except: return None
def make_wheel_filename_generic(wheel): """ Wheel filenames contain the python version and the python ABI version for the wheel. https://www.python.org/dev/peps/pep-0427/#file-name-convention Since we're distributing a rust binary this doesn't matter for us ... """ name, version, python, abi, platform = wheel.split("-") # our binary handles multiple abi/versions of python python, abi = "py2.py3", "none" # hack, lets pretend to be manylinux1 so we can do a binary distribution if platform == "linux_x86_64.whl": platform = "manylinux1_x86_64.whl" elif platform == "linux_i686.whl": platform = "manylinux1_i686.whl" return "-".join((name, version, python, abi, platform))
def concatenate_codelines(codelines): """ Compresses a list of strings into one string. """ codeline = "" for l in codelines: codeline = codeline + l return codeline
def is_letter(char_code): """Return True if char_code is a letter character code from the ASCII table. Otherwise return False. """ if isinstance(char_code, str) or isinstance(char_code, bytes): char_code = ord(char_code) if char_code >= 65 and char_code <= 90: # uppercase letters return True if char_code >= 97 and char_code <= 122: # lowercase letters return True return False
def reduce_names(l): """Reduce the names in the list to acronyms, if possible. Args: l (list(str)): list of names to convert. Returns: (list(str)): list of converted names. """ for i, item in enumerate(l): if item == 'QuadraticDiscriminantAnalysis': l[i] = 'QDA' elif item == 'KNeighborsClassifier': l[i] = 'KNN' elif item == 'RandomForestClassifier': l[i] = 'RandomForest' elif item == 'LogisticRegression': l[i] = 'LogReg' return l
def cropRegion(coord, topCrop=0, bottomCrop=0, leftCrop=0, rightCrop=0): """crops a region defined by two coordinates""" w = coord[1][0]-coord[0][0] # x2 - x1 h = coord[1][1]-coord[0][1] # y2 - y1 y1 = coord[0][1] + topCrop * h # y1 = y1 + topCrop * h y2 = coord[1][1] - bottomCrop * h # y2 = y2 - bottomCrop * h x1 = coord[0][0] + leftCrop * w # x1 = x1 + letCrop * w x2 = coord[1][0] - rightCrop * w # x2 = x2 - rightCrop * w return [(int(x1),int(y1)),(int(x2),int(y2))]
def intensity_perpendicular_from_monomer(M, A_1, A_2, b, c_b_1): """ Calculate perpendicular intesity from monomer fraction, monomer and dimer anisotropy, brightness relation and monomer brightness. """ return (1/3)*(((1-A_1) - (1-A_2)*b) *M + (1-A_2)*b)*c_b_1
def slash_esc(string): """ :type string: str :rtype: str """ return string.replace("/", r"\/")
def format_list(list1, fmt = '%16s', delimiter = ","): """ format list of numbers to string. delimiter defaults = ',' """ string1 = delimiter.join(fmt % h for h in list1) + '\n' return string1
def restricang(thtg): """ 0 to 2pi range restriction of angle """ while(thtg<0): if thtg<0: thtg = 6.28+thtg else: break while(thtg>6.28): if thtg>6.28: thtg = thtg - 6.28 else: break return thtg