content
stringlengths
42
6.51k
def allowed_file(filename, extensions={'csv'}): """ Checks if a filename contains an allowable extension. Parameters ---------- filename : str The filename to check. extensions : set The set of allowable file extensions. Returns ------- allowed : bool True if allowable extension, False otherwise. """ return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in extensions
def add_zeros(x, n): """Private function for multiplications""" x_digits = str(x) for degree in range(n): x_digits += '0' return int(x_digits)
def sum_min_edge_weight(acc, w1, w2): """Sums up edge weights by adding the minum of w1 and w2. This results in lower weighted edges for margin figures. """ return acc + min(w1, w2)
def format_string(input_): """Formats the input depending on if it is a string or a list of strings. Determines the type of the input. If it is a string then strips leading spaces and lowercases all letters. If it is a list, then the aforementioned process is applied to each of the list elements. Arg: s: string or list of strings that needs to be formatted Return: a formatted string is returned if a string is provided a formatted list is returned if a list is provided """ if isinstance(input_, str): return input_.strip() if isinstance(input_, list): res = [] for element in input_: if isinstance(element, str): res.append(element.strip()) else: # NOTE when the element is not of string typ (we can handle this case different if necessary) res.append(element) return res # NOTE Simply returns the input if the input is not a string or a list return input_ # TestCases: # print(format_string(" JaS on ")) # print(format_string([" JaS on C ", "CHE"])) # print(format_string([" JaS on C ", "CHE", 6])) # print(format_string(6)) # print(format_string({"TeamRU": 2020}))
def get_coach_data(filename): """ :param filename: eg: 'james.txt' :return: list which has removed null and ',' """ try: with open(filename) as f: data = f.readline() return data.strip().split(',') except IOError as ioerr: print('File error: ' + str(ioerr)) return None
def filter_messages_keep(frame_ids_to_keep, array_of_msgs): """Given an array of can msgs, remove the messages that have ids NOT in frame_ids_to_keep and return. If the data field is in bytearray format it will be changed to str. """ filtered_arr = [] for msg in array_of_msgs: frame_id, _, data, bus = msg if type(data) != str: msg = (frame_id, _, str(data), bus) for id_to_keep in frame_ids_to_keep: if frame_id == id_to_keep: filtered_arr.append(msg) return filtered_arr
def mul(a,b): """ component-wise 3 vector multiplication""" return [a[0]*b[0],a[1]*b[1],a[2]*b[2],1.0]
def merge_dicts(*dict_args): """ Given any number of dicts, shallow copy and merge into a new dict, precedence goes to key value pairs in latter dicts. (Pre-Python 3.5) (http://stackoverflow.com/questions/38987/how-to-merge-two-python-dictionaries-in-a-single-expression) """ result = {} for dictionary in dict_args: result.update(dictionary) return result
def clip(val, lower=0, upper=100): """Limit value to be between lower and upper limits""" return max(lower, min(val, upper))
def parse_host(hostname, default_port): """Translate something like 'foobar:123' -> ('foobar', 123).""" port = default_port num_cols = hostname.count(":") if num_cols: index = hostname.rindex(":") if num_cols > 1: for i in range(len(hostname) - 1, index, -1): char = hostname[i] if char == "]": # our nearest end colon is inside brackets. no port here. index = None break if index: port = hostname[index + 1 :] if port: port = int(port, 10) ipv6_hostname = hostname[:index] if (ipv6_hostname[0], ipv6_hostname[-1]) != ("[", "]"): raise ValueError( f"An IPv6 address ({hostname!r}) must be enclosed in square brackets" ) hostname = ipv6_hostname[1:-1] else: hostname, port = hostname[:index], hostname[index + 1 :] if port: port = int(port, 10) return hostname, port
def exp_out(t): """Exponential out. :math:`f(t) = 2^t`""" return 1 - pow(2, -10 * t)
def build_logfile_path(logname, logType, threadId, log_num=1): """ Helper to create a logfile path incorporating the thread ID in the filename before the file type suffix. @param logname: The valid base path without the thread Id. @type logname: Str @param logType: The type of log (e.g. fuzzing) @type logType: Str @param threadId: The thread id to be inserted into the path. @type threadId: Int @param log_num: The current log number of this type. Used when chunking logs @type log_num: Int @return: Formatted logfile path @rtype : Str """ typeStart = logname.rfind(".txt") return f"{logname[:typeStart]}.{logType}.{threadId!s}.{log_num!s}{logname[typeStart:]}"
def convert_to_object(obj_dict): """ Function that takes in a dict and returns a custom object associated with the dict. This function makes use of the "__module__" and "__class__" metadata in the dictionary to know which object type to create. """ if "__class__" in obj_dict: # Pop ensures we remove metadata from the dict to leave only the instance arguments class_name = obj_dict.pop("__class__") # Get the module name from the dict and import it module_name = obj_dict.pop("__module__") # We use the built in __import__ function since the module name is not yet known at runtime module = __import__(module_name) # Get the class from the module class_ = getattr(module,class_name) # Use dictionary unpacking to initialize the object obj = class_(**obj_dict) else: obj = obj_dict return obj
def isYes(string): """Returns True if the string represents a yes, False, if it represents no, and another string if it represents something else""" value = string.strip().lower() if value in ['yes', 'always', 'on', 'true']: return True if value in ['no', 'never', 'off', 'false', 'null']: return False if value in ['changed', 'change', 'onchange', 'on_change', 'diff']: return 'change'
def _threshold_calc(random_edge, max_edge, vertex_degree): """ Calculate threshold for branch_gen function. :param random_edge: number of vertex edges :type random_edge: int :param max_edge : maximum edge number :type max_edge : int :param vertex_degree: vertex degree :type vertex_degree: int :return: threshold as int """ threshold = min(random_edge, abs(max_edge - vertex_degree)) return threshold
def find_cavities(grid): """ :type grid: list[list[str]] :rtype: list[list[str]] """ for i in range(1, len(grid) - 1): for j in range(1, len(grid[i]) - 1): if ( (grid[i - 1][j] != 'X' and int(grid[i][j]) > int(grid[i - 1][j])) and (grid[i][j + 1] != 'X' and int(grid[i][j]) > int(grid[i][j + 1])) and (grid[i + 1][j] != 'X' and int(grid[i][j]) > int(grid[i + 1][j])) and (grid[i][j - 1] != 'X' and int(grid[i][j]) > int(grid[i][j - 1])) ): grid[i] = grid[i][:j] + "X" + grid[i][j + 1:] j += 1 return grid
def shell_quote(var): """ Escape single quotes and add double quotes around a given variable. Args: _str (str): string to add quotes to Returns: str: string wrapped in quotes .. warning:: This is not safe for untrusted input and only valid in this context (``os.environ``). """ _repr = repr(var) if _repr.startswith('\''): return "\"%s\"" % _repr[1:-1]
def get_color(color_dict, colors): """ retrive the absolute number corresponding a color set by color_dict""" for i, color in enumerate(colors): for data in color: equal = True for k, v in data.items(): if k not in color_dict or v != color_dict[k]: equal = False break if equal: return i return -1
def rigidity_bending_plate(height, e_modulus, poisson): """ Calculates the bending rigidity of a plate. """ return e_modulus * (height ** 3) / (12 * (1 - poisson ** 2))
def implies(x, y): """ Returns "x implies y" / "x => y" """ return not(x) or y
def merge_schemas(schema, old_schema): """ Merges two JSON schemas on a column. """ if old_schema is None: return schema elif schema['type'] != old_schema['type']: return old_schema elif 'enum' in schema and 'enum' in old_schema: merged_enum = list(old_schema['enum']) for enum in schema['enum']: if enum not in merged_enum: merged_enum.append(enum) return dict(old_schema, enum=merged_enum) elif 'inclusiveMinimum' in schema and 'inclusiveMinimum' in old_schema: merged_min = min(schema['inclusiveMinimum'], old_schema['inclusiveMinimum']) merged_max = min(schema['inclusiveMaximum'], old_schema['inclusiveMaximum']) return dict(old_schema, inclusiveMinimum=merged_min, inclusiveMaximum=merged_max)
def filter_dict_keys(orig_dict, keys_to_keep, *, optional=False): """ Returns a copy of a dictionary filtered by a collection of keys to keep Args: orig_dict (dict): A dictionary keys_to_keep (iterable): Keys to filter on optional (bool): If True, ignore keys that don't exist in the dict. If False, raise a KeyError. """ return { key: orig_dict[key] for key in keys_to_keep if not optional or key in orig_dict }
def find_range(iterable, predicate): """Find the indices of the first range of consecutive items which satisfy the given predicate. Returns (-1, -1) if it there is no such ranges. find_range([0, 0, 1, 1, 0], lambda e: e > 0) => (2, 4) """ iterator = enumerate(iterable) start_index = next((i for i, value in iterator if predicate(value)), -1) if start_index == -1: return -1, -1 j = start_index for j, value in iterator: if not predicate(value): end_index = j break else: end_index = j + 1 return start_index, end_index
def euclidean_distance(point_a, point_b): """ Returns the euclidean distance between two points with (i, j) coordinates. """ return (point_a[0] - point_b[0]) ** 2 + (point_a[1] - point_b[1]) ** 2
def max_lt(seq, val): """ Return greatest item in seq for which item < val applies. None is returned if seq was empty or all items in seq were >= val. """ max = 0 idx = len(seq) - 1 while idx >= 0: if seq[idx] < val and seq[idx] >= 0 and seq[idx] > max: max = seq[idx] idx -= 1 return max
def validate_rng_seed(seed, min_length): """ Validate random hexadecimal seed. returns => <boolean> seed: <string> hex string to be validated min_length: <int> number of characters required. > 0 """ if len(seed) < min_length: print("Error: Computer entropy must be at least {0} characters long".format(min_length)) return False if len(seed) % 2 != 0: print("Error: Computer entropy must contain an even number of characters.") return False try: int(seed, 16) except ValueError: print("Error: Illegal character. Computer entropy must be composed of hexadecimal characters only (0-9, a-f).") return False return True
def convert_to_bq_string(mapping_list): """ Converts list of lists to bq INSERT friendly string :param mapping_list: list of lists where the inner lists have two items :return: bq INSERT formatted string """ bq_insert_list = [] for hpo_rdr_item in mapping_list: bq_insert_list.append("(\"{hpo_rdr_id}\", \"{src_id}\")".format(hpo_rdr_id=hpo_rdr_item[0], src_id=hpo_rdr_item[1])) bq_insert_string = ', '.join(bq_insert_list) return bq_insert_string
def find(word,letter): """ find letter in word , return first occurence """ index=0 while index < len(word): if word[index]==letter: #print word,' ',word[index],' ',letter,' ',index,' waht' return index index = index + 1 return -1
def _simplify_device_name(device): """/job:localhost/replica:0/task:0/device:CPU:0 -> /cpu:0""" prefix = '/job:localhost/replica:0/task:0/device:' if device.startswith(prefix): device = '/'+device[len(prefix):] return device.lower()
def transpose_2d(a): """ Transpose a given matrix using Zip. A 1x4 matrix becomes a 4x1 matrix :param a: (list) 2D Matrix to transpose :return: (list) Transposed 2d matrix of a """ # check if given matrix is list if type(a) != list: raise TypeError('Error xm10: Incorrect type, matrices must be of type "list"') # check that rows are of matching length l = len(a[0]) for row in a: if len(row) != l: raise Exception("Error xm11: Row lengths do not match") # return transposed matrix return list(map(list, zip(*a)))
def get_book_tag(tool_name, category): """ Get User Manual HTML tag """ prefix = "https://www.whiteboxgeo.com/manual/wbt_book/available_tools" url = "{}/{}.html#{}".format(prefix, category, tool_name) html_tag = "<a href='{}' target='_blank'>WhiteboxTools User Manual</a>".format( url) return html_tag
def compile_output_errors(filepath, is_filename_error, filename_error_output, is_error, forecast_error_output, is_date_error, forecast_date_output): """ purpose: update locally_validated_files.csv and remove deleted files params: * filepath: Full filepath of the forecast * is_filename_error: Filename != file path (True/False) * filename_error_output: Text output error filename != file path * is_error: Forecast file has error (True/False) * forecast_error_output: Text output forecast file error * is_date_error: forecast_date error (True/False) * forecast_date_output: Text output forecast_date error """ # Initialize output errors as list output_error_text = [] # Iterate through params error_bool = [is_filename_error, is_error, is_date_error] error_text = [filename_error_output, forecast_error_output, forecast_date_output] # Loop through all possible errors and add to final output for i in range(len(error_bool)): if error_bool[i]: # Error == True output_error_text += error_text[i] # Output errors if present as dict # Output_error_text = list(chain.from_iterable(output_error_text)) return output_error_text
def parse_feats(feats): """ Helper function for dealing with the feature values that Stanza returns. They look like "Case=Nom|Gender=Fem|Number=Sing" and we convert it to a dictionary with keys "Case" (e.g. "NOM"), "Gender" (e.g. "FEM"), "Number" (e.g. "SIN"). We capitalize the values and make them 3 characters long for compatibility with the notation that is used in Morphy. """ # Returns pairs = [] for pair in feats.split("|"): key, val = pair.split("=") val = val[:3].upper() pairs.append((key, val)) return dict(pairs)
def quote(lst: list) -> list: """Put quotation marks around every list element, which is assumed to be a str.""" return [f"\"{element}\"" for element in lst]
def getManifestName(manifest): """ returns name of manifest""" return manifest["manifest"]["name"]
def compute_occurrence_indices(lst): """ Returns a 0-based list of integers specifying which occurrence, i.e. enumerated duplicate, each list item is. For example, if `lst` = [ 'A','B','C','C','A'] then the returned list will be [ 0 , 0 , 0 , 1 , 1 ]. This is useful when working with `DataSet` objects that have `collisionAction` set to "keepseparate". Parameters ---------- lst : list The list to process. Returns ------- list """ lookup = {}; ret = [] for x in lst: if x not in lookup: lookup[x] = 0 else: lookup[x] += 1 ret.append(lookup[x]) return ret
def strip_prefixes(s, prefixes=()): """ Return the `s` string with any of the string in the `prefixes` set striped. Normalize and strip spacing. """ s = s.split() # strip prefixes. # NOTE: prefixes are hard to catch otherwise, unless we split the # author vs copyright grammar in two while s and s[0].lower() in prefixes: s = s[1:] s = u' '.join(s) return s
def to_be_archived(row): """Condition function to designate if issue should be archived. Args: row (dict): Row to be checked. Returns: bool: True if issue should archived, False otherwise. """ return row["Priority"] == "Done"
def _make_pr(source_repo, source_branch, base_ref, base_url=''): """Create a PR JSON object.""" return { 'head': { 'repo': { 'full_name': source_repo, }, 'ref': source_branch, }, 'base': { 'ref': base_ref, 'repo': { 'clone_url': base_url, }, }, }
def make_id(letter_code, id_number): """ Make the standard-format id (3- or 5-letter alpha code, followed by 7-digit number). Parameters ---------- letter_code : str 3-character code (e.g. USA, BRA, CHN) or 5-character source code. id_number : int Number less than 10-million. Returns ------- idnr : unicode Alpha code followed by zero-leading 7-character numeral. """ return u"{alpha}{num:07d}".format(alpha=letter_code, num=id_number)
def get_minimum_set_cover(nodelist, listofsubsets): """ Implements the minimum set cover algorithm to find non-overlapping sets out of the 80 ribosomal sampled regions Parameters ---------- nodelist: list listofsubsets: list Returns ------- cover: list list of sets of sampled regions """ indices = set(range(len(nodelist))) elems = set(e for s in listofsubsets for e in s) if elems != indices: return None covered = set() cover = [] while covered != elems: subset = max(listofsubsets, key=lambda s: len(s - covered)) cover.append(subset) covered |= subset return cover
def munsell_value_moon1943(Y): """ Returns the *Munsell* value :math:`V` of given *luminance* :math:`Y` using *Moon and Spencer (1943)* method. Parameters ---------- Y : numeric *luminance* :math:`Y`. Returns ------- numeric *Munsell* value :math:`V`. Notes ----- - Input *Y* is in domain [0, 100]. - Output *V* is in domain [0, 10]. References ---------- .. [5] http://en.wikipedia.org/wiki/Lightness (Last accessed 13 April 2014) Examples -------- >>> munsell_value_moon1943(10.08) # doctest: +ELLIPSIS 3.7462971... """ V = 1.4 * Y ** 0.426 return V
def get_category_id(k): """ :param: class id which corresponding coco.names :return: category id is used in instances_val2014.json """ kk = k if 12 <= k <= 24: kk = k + 1 elif 25 <= k <= 26: kk = k + 2 elif 27 <= k <= 40: kk = k + 4 elif 41 <= k <= 60: kk = k + 5 elif k == 61: kk = k + 6 elif k == 62: kk = k + 8 elif 63 <= k <= 73: kk = k + 9 elif 74 <= k <= 80: kk = k + 10 return kk
def pad(data, pad_id): """ Pad all lists in data to the same length. """ width = max(len(d) for d in data) return [d + [pad_id] * (width - len(d)) for d in data]
def camel_case(name): """Convert words into CamelCase.""" return ''.join(name.capitalize().split())
def find_first_list_element_above(list, value): """ Simple method to return the index of the first element of a list that is greater than a specified value. Args: list: List of floats value: The value that the element must be greater than """ return next(x[0] for x in enumerate(list) if x[1] > value)
def subtract_dict(bigger, smaller, prefix = ''): """Subtract a dict from another""" ret = bigger.copy() for key, val in smaller.items(): if key not in ret: continue if isinstance(ret[key], dict) and isinstance(val, dict) and ret[key] != val: ret[key] = subtract_dict(ret[key], val, prefix + ' ') elif ret[key] == val: del ret[key] return ret
def get(id): """ Returns a user by ID. :param id: Id of the user. :return: Dictionary containing the user. 200 if user found. 404 if the user does not exist. """ result = { "id": id, "first_name": "John", "last_name": "Smith", "profile_image_url": "IMAGE_URL" } return result
def tags_from_context(context): """Helper to extract meta values from a Celery Context""" tag_keys = ( 'compression', 'correlation_id', 'countdown', 'delivery_info', 'eta', 'exchange', 'expires', 'hostname', 'id', 'priority', 'queue', 'reply_to', 'retries', 'routing_key', 'serializer', 'timelimit', 'origin', 'state', ) tags = {} for key in tag_keys: value = context.get(key) # Skip this key if it is not set if value is None or value == '': continue # Skip `timelimit` if it is not set (it's default/unset value is a # tuple or a list of `None` values if key == 'timelimit' and value in [(None, None), [None, None]]: continue # Skip `retries` if it's value is `0` if key == 'retries' and value == 0: continue # Celery 4.0 uses `origin` instead of `hostname`; this change preserves # the same name for the tag despite Celery version if key == 'origin': key = 'hostname' # prefix the tag as 'celery' tag_name = 'celery.{}'.format(key) tags[tag_name] = value return tags
def word_counter(words, text): """Vectorized string search""" total = [0]*len(text) # Empty list for i, txt in enumerate(text): for word in words: if word in txt: total[i] = total[i] + 1 return total
def is_turkish_id(x): """ checks if given id is valid TC ID """ if len(str(x)) != 11: return False str_id = str(x) # first 10 digit sum mod 10 equals 11th digit control lst_id = [int(n) for n in str_id if n.isdigit()] if lst_id[0] == 0: return False # first 10 digit sum first_10_sum = sum(lst_id[:10]) if first_10_sum % 10 != lst_id[-1]: return False total_odds = sum(lst_id[0::2][:-1]) total_evens = sum(lst_id[1::2][:-1]) is_10 = (total_odds * 7 + total_evens * 9) % 10 == lst_id[9] is_11 = (total_odds * 8) % 10 == lst_id[-1] if not is_10 or not is_11: return False else: return True
def render_app_label(context, app, fallback=""): """ Render the application label. """ try: text = app['app_label'] except KeyError: text = fallback except TypeError: text = app return text
def _get_name(f): """Gets the name of underlying objects.""" if hasattr(f, '__name__'): return f.__name__ # Next clause handles functools.partial objects. if hasattr(f, 'func') and hasattr(f.func, '__name__'): return f.func.__name__ return repr(f)
def sorter(a, b): """Option sorter""" if 'priority' not in a[1] and 'priority' not in b[1]: return 0 elif 'priority' in a[1] and 'priority' not in b[1]: return -1 elif 'priority' in b[1] and 'priority' not in a[1]: return +1 elif a[1]['priority'] > b[1]['priority']: return +1 elif a[1]['priority'] == b[1]['priority']: return 0 else: return -1
def tkcolour_from_rgb(rgb): """ Translates an rgb tuple of ints to a tkinter friendly color code """ return "#%02x%02x%02x" % rgb
def snake_case_split(identifier): """Split snake_case funtion names to tokens. Args: identifier (str): Identifier to split Returns: (list): lower case split tokens. ex: ['snake', 'case'] """ return [token.lower() for token in identifier.split('_')]
def bubbleSort2(nums): """ Improved version, stop when no swap occur :type nums: List[int] :rtype: List[int] """ res = list(nums) # I don't want to change the input list flag = True while flag: flag = False for j in range(1, len(res)): if res[j - 1] > res[j]: res[j - 1], res[j] = res[j], res[j - 1] flag = True return res
def partition(arr, low, high): """Take the last element as the pivot, and place that element in the correct sorted position by moving all smaller items to the left of the pivot and all larger items to the right of the pivot""" # Set i to be index of smaller element i = low - 1 # Assume last element as pivot pivot = arr[high] # Iterate over indices from low to high for j in range(low, high): # If you encounter an element smaller than the pivot if arr[j] <= pivot: # increment index of smaller element i += 1 # swap the smaller and larger elements arr[i], arr[j] = arr[j], arr[i] # once you reach the end, swap the pivot with the index larger element arr[i + 1], arr[high] = arr[high], arr[i + 1] # return index of pivot return i + 1
def find_message(text): """Find a secret message""" import re patten = re.compile(r'[A-Z]') res = patten.findall(text) resi ='' for each in res: resi = resi + each return resi
def get_B0(dataset): """Get the trait value at the minimum temperature.""" # Initialize the temperature variable at a very high number. min_temp = 9999 # Get the minimum temperature value. for row in dataset: min_temp = min(min_temp, float(row[4])) # Initialize the trait variable at a very high number. min_trait = 999999 # Get the value at the minimum temperature. for row in dataset: if float(row[4]) == min_temp: min_trait = min(min_trait, float(row[5])) return min_trait
def get_sex(sex): """Return a consistent sex notation (male, female).""" if sex.lower() == 'm': return 'male' if sex.lower() == 'f': return 'female' return sex.lower()
def para_filtering(turn, sentences, K): """filter paraphrase""" missing_values = [] for domain_slot, value in turn["turn_label"]: if (value in turn["system_transcript"]) and (value not in turn["transcript"]): missing_values.append(value) value_list = [] best_sent = "" for domain_slot, value in turn["turn_label"]: domain, slot = domain_slot.split("-") if slot == "parking": value = slot elif slot == "internet": value = "wifi" if value not in missing_values: value_list.append(value) count = 0 for sent in sentences: sent = sent.lower() flag = True for value in value_list: if value not in sent: flag = False break if flag and (K == count): best_sent = sent break elif flag and (count < K): count += 1 return best_sent
def resolve_data_sink_option(args, pipeline): """ This Function takes in... :param args: :param pipeline: :return: """ if ( args["--rewrite-datasinks"] or pipeline["ds_overwrite"] ): # GLOBAL_DATA_SINK_REWRITE return True return False
def calc_ar_neff(phi, n=1): """Calculate number of effective, i.e. independent samples for a given lag-one autocorrelation Parameters ---------- phi : float Lag-one autocorrelation parameter n : Number of datapoints in the time series Returns ------- neff : float Number of independent observations Reference --------- .. Wilks, D.S., 2006, Statistical methods in the atmospheric sciences, Elsevier, 2nd Edition, Chapter 5, p. 144 """ neff = n * (1 - phi) / (1 + phi) return neff
def _maybe_parenthesise(x, convert=str): """ convert an object to string and surround in parenthesis if it contains an operator """ warrenters = '+*/\\-^' ret = convert(x) if any((w in ret) for w in warrenters): return "(" + ret + ")" return ret
def ltrimboth (l,proportiontocut): """ Slices off the passed proportion of items from BOTH ends of the passed list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost' 10% of scores. Assumes list is sorted by magnitude. Slices off LESS if proportion results in a non-integer slice index (i.e., conservatively slices off proportiontocut). Usage: ltrimboth (l,proportiontocut) Returns: trimmed version of list l """ lowercut = int(proportiontocut*len(l)) uppercut = len(l) - lowercut return l[lowercut:uppercut]
def update_lambda_value(config, n_iter): """ Update a lambda value according to its schedule configuration. """ ranges = [i for i in range(len(config) - 1) if config[i][0] <= n_iter < config[i + 1][0]] if len(ranges) == 0: assert n_iter >= config[-1][0] return config[-1][1] assert len(ranges) == 1 i = ranges[0] x_a, y_a = config[i] x_b, y_b = config[i + 1] return y_a + (n_iter - x_a) * float(y_b - y_a) / float(x_b - x_a)
def join_path_segments(*args): """ Join multiple lists of path segments together, intelligently handling path segments borders to preserve intended slashes of the final constructed path. This function is not encoding aware. It doesn't test for, or change, the encoding of path segments it is passed. Examples: join_path_segments(['a'], ['b']) == ['a','b'] join_path_segments(['a',''], ['b']) == ['a','b'] join_path_segments(['a'], ['','b']) == ['a','b'] join_path_segments(['a',''], ['','b']) == ['a','','b'] join_path_segments(['a','b'], ['c','d']) == ['a','b','c','d'] Returns: A list containing the joined path segments. """ finals = [] for segments in args: if not segments or segments == ['']: continue elif not finals: finals.extend(segments) else: # Example #1: ['a',''] + ['b'] == ['a','b'] # Example #2: ['a',''] + ['','b'] == ['a','','b'] if finals[-1] == '' and (segments[0] != '' or len(segments) > 1): finals.pop(-1) # Example: ['a'] + ['','b'] == ['a','b'] elif finals[-1] != '' and segments[0] == '' and len(segments) > 1: segments = segments[1:] finals.extend(segments) return finals
def remove_suffix(x, suffix=" "): """ Remove a specific suffix from the end of a string. """ if x.endswith(suffix): x = x[: -len(suffix)] return x
def five_uneven_peak_trap(x=None): """ F1: Five-Uneven-Peak Trap Variable ranges: x in [0, 30 No. of global peaks: 2 No. of local peaks: 3. """ if x is None: return None result = None if 0 <= x < 2.50: result = 80*(2.5-x) elif 2.50 <= x < 5: result = 64*(x-2.5) elif 5.0 <= x < 7.5: result = 64*(7.5-x) elif 7.50 <= x < 12.5: result = 28*(x-7.5) elif 12.50 <= x < 17.5: result = 28*(17.5-x) elif 17.5 <= x < 22.5: result = 32*(x-17.5) elif 22.5 <= x < 27.5: result = 32*(27.5-x) elif 27.5 <= x <= 30: result = 80*(x-27.5) return result
def where_math(condition, x, y): """ scalar version of numpy.where """ if condition: return x else: return y
def is_duckument_type(obj): """Internal mapping type checker Instead of using `isinstance(obj, MutableMapping)`, duck type checking is much cheaper and work on most common use cases. If an object has these attritubes, is a document: `__len__`, `keys`, `values` """ doc_attrs = ("__len__", "keys", "values") return all(hasattr(obj, attr) for attr in doc_attrs)
def is_contained(a, b): """ Check if segment b is fully contained within segment a """ return b[0] >= a[0] and b[1] <= a[1]
def absolute_value(x): """Compute the absolute value. Parameters ---------- x : float Returns ------- float The absolute value. """ if x > 0: return x else: return -x
def aoec_colon_concepts2labels(report_concepts): """ Convert the concepts extracted from colon reports to the set of pre-defined labels used for classification Params: report_concepts (dict(list)): the dict containing for each colon report the extracted concepts Returns: a dict containing for each colon report the set of pre-defined labels where 0 = absence and 1 = presence """ report_labels = dict() # loop over reports for rid, rconcepts in report_concepts.items(): # assign pre-defined set of labels to current report report_labels[rid] = {'cancer': 0, 'hgd': 0, 'lgd': 0, 'hyperplastic': 0, 'ni': 0} # textify diagnosis section diagnosis = ' '.join([concept[1].lower() for concept in rconcepts['Diagnosis']]) # update pre-defined labels w/ 1 in case of label presence if 'colon adenocarcinoma' in diagnosis: # update cancer report_labels[rid]['cancer'] = 1 if 'dysplasia' in diagnosis: # diagnosis contains dysplasia if 'mild' in diagnosis: # update lgd report_labels[rid]['lgd'] = 1 if 'moderate' in diagnosis: # update lgd report_labels[rid]['lgd'] = 1 if 'severe' in diagnosis: # update hgd report_labels[rid]['hgd'] = 1 if 'hyperplastic polyp' in diagnosis: # update hyperplastic report_labels[rid]['hyperplastic'] = 1 if sum(report_labels[rid].values()) == 0: # update ni report_labels[rid]['ni'] = 1 return report_labels
def rgb_to_int(r, g, b): """ Convert color from RGB to 24-bit integer """ return b*65536 + g*256 + r
def _validate_tag_sets(tag_sets): """Validate tag sets for a MongoReplicaSetClient. """ if tag_sets is None: return tag_sets if not isinstance(tag_sets, list): raise TypeError(( "Tag sets %r invalid, must be a list") % (tag_sets,)) if len(tag_sets) == 0: raise ValueError(( "Tag sets %r invalid, must be None or contain at least one set of" " tags") % (tag_sets,)) for tags in tag_sets: if not isinstance(tags, dict): raise TypeError( "Tag set %r invalid, must be an instance of dict, or" "bson.son.SON" % (tags,)) return tag_sets
def string_to_number(s): """ :param s: word user input :return number: transfer the word into int """ number = '' n = 0 number += str(n) for i in range(len(s)-1): n += 1 number += str(n) return number
def find_intterupts(envelope, high_theshold_ratio=.5, low_threshold_ratio=.35): """ Returns a list of times when the signal goes high using a software schmitt trigger. Input: evelope: the envelope of the signal to process high_theshold_ratio: ratio of the max of the signal to trigger a high_theshold low_threshold_ratio: ratio of the max of the signal to trigger a low_threshold Output: interrupt_t: list of times when the signal goes high thresholds: tuple of the high and low sthresholds """ # Set thresholds based on max of the signal high_theshold = max(envelope) * high_theshold_ratio low_threshold = max(envelope) * low_threshold_ratio flag = False interrupt_t = [] # Loop through the signal and detect rising an falling edges. # Records the times of rising edges. Similar to a schmitt trigger for x in range(len(envelope)): if envelope[x] < low_threshold and flag: flag = False elif envelope[x] > high_theshold and not flag: interrupt_t.append(x) flag = True return interrupt_t, (high_theshold, low_threshold)
def get_resource_id(resource): """ Get a resource's ID. Args: resource (obj): a Python object. Returns: str: the resource's ID or None. """ if hasattr(resource, 'resource_id'): attr = resource.resource_id if callable(attr): return attr() return attr elif hasattr(resource, 'id'): return resource.id return None
def efficency_vol_poly(cappa): """Calculate volumetric efficency for a given pump's typical number. The polynomial has been calculated applaying the curve fitting at nodes cappa .2 .3 .4 .5 .6 .7 .8 .9 1.0 1.1 1.2 eta_hyd .940 .948 .953 .956 .957 .958 .959 .960 .961 .962 .963 weights ones(cappa) n 5 :param cappa (float): typical number :return eta_vol (float): volumetric efficency """ coef = [0.907, 0.236, -0.433, 0.378, -0.144, 0.016] eta_vol = sum([val * cappa**idx for idx, val in enumerate(coef)]) return eta_vol
def do_overrides(data, overrides): """ Form is: { "foo.bar.0.star": "value", "top_level_item": "value2", "foo.bar.append": "value3", } >>> do_overrides({}, {"foo": "bar"}) {"foo": "bar"} >>> do_overrides({"foo": {"bar": []}}, {"foo.bar.append": 5}) {"foo": {"bar": [5]}} >>> do_overrides({"foo": {"bar": [1, 3]}}, {"foo.bar.1": 4}) {"foo": {"bar": [1, 4]}} Super naive path selector rules separate path keys with periods if we are setting/selecting into a list attempt to coerce key to an integer if we are on the last path key and the key is "append" and the location is a list then append Mutates passed in dictionary """ for path, value in overrides.items(): parts = path.split(".") last_part = parts[-1] first_parts = parts[:-1] item = data for part in first_parts: if isinstance(item, list): part = int(part) try: item = item[part] except KeyError: raise ValueError("Invalid key: {0} in {1}".format(part, path)) # If the resulting "thing" is a list and the last part is the keyword "append" # then we append to the list and continue, otherwise try to set it if isinstance(item, list): if last_part == "append": item.append(value) continue else: last_part = int(last_part) item[last_part] = value return data
def blend_average(*args): """ Blends images by averaging pixels. """ s = 0 for i in args: s+=i return s/len(args)
def _get_index_list_of_values(d, k, def_value=None): """Like _index_list_of_values, but uses get to access and returns an empty list if the key is absent. Returns d[k] or [d[k]] if the value is not a list""" v = d.get(k, def_value) if v is None: return [] if isinstance(v, list): return v return [v]
def arrays_to_strings(measure_json): """To facilitate readability via newlines, we express some JSON strings as arrays, but store them as strings. Returns the json with such fields converted to strings. """ fields_to_convert = [ 'title', 'description', 'why_it_matters', 'numerator_columns', 'numerator_where', 'denominator_columns', 'denominator_where'] for field in fields_to_convert: if isinstance(measure_json[field], list): measure_json[field] = ' '.join(measure_json[field]) return measure_json
def format_dollars(amount): """Input float, Returns formatted dollars($x,xxx.xx)""" balance = '{:.2f}'.format(amount) balance = balance.split('.') bit = balance[0][::-1] new_thing = '' for i in range(1, len(bit) + 1): if (i-1) % 3 == 0 and i != 1: new_thing += ',' new_thing += bit[i - 1] balance = '$' + new_thing[::-1] + '.' + balance[1] return balance
def normalize_attributes(d): """ Sort returned attribute values. """ if d is None: return None r = {} for key, value in d.items(): if isinstance(value, list): r[key] = sorted(value) else: r[key] = value return r
def _version(name): """Return the version component of a package name.""" return name.rpartition("-")[2]
def _singularity_image_name_on_disk(name: str) -> str: """Convert a singularity URI to an on disk sif name :param str name: Singularity image name :rtype: str :return: singularity image name on disk """ docker = False if name.startswith('shub://'): name = name[7:] elif name.startswith('library://'): name = name[10:] elif name.startswith('oras://'): name = name[7:] elif name.startswith('docker://'): docker = True name = name[9:] # singularity only uses the final portion name = name.split('/')[-1] name = name.replace('/', '-') if docker: name = name.replace(':', '-') name = '{}.sif'.format(name) else: tmp = name.split(':') if len(tmp) > 1: name = '{}_{}.sif'.format(tmp[0], tmp[1]) else: name = '{}_latest.sif'.format(name) return name
def policy_v2_1(probability=0.7, magnitude=5): """Randomly select one transformation from {color} transformations, and then randomly select two transformations from {shape} transformations.""" policy = { # color augment 0: [[('Mixup', probability, magnitude)], [('Gaussian_noise', probability, magnitude)], [('Saturation', probability, magnitude)], [('Contrast', probability, magnitude)], [('Brightness', probability, magnitude)], [('Sharpness', probability, magnitude)], [('Color_casting', probability, magnitude)], [('Equalize_YUV', probability, magnitude)], [('Posterize', probability, magnitude)], [('AutoContrast', probability, magnitude)], # [('SolarizeAdd', probability, magnitude)], [('Solarize', probability, magnitude)], [('Equalize', probability, magnitude)], [('Vignetting', probability, magnitude)]], # shape augment 1: [[('Rotate', probability, magnitude)], [('Flip', probability, magnitude)], [('Cutout', probability, magnitude)], [('Shear_x', probability, magnitude)], [('Shear_y', probability, magnitude)], [('Scale', probability, magnitude)], [('Scale_xy_diff', probability, magnitude)], [('Lens_distortion', probability, magnitude)]], 2: [[('Rotate', probability, magnitude)], [('Flip', probability, magnitude)], [('Cutout', probability, magnitude)], [('Shear_x', probability, magnitude)], [('Shear_y', probability, magnitude)], [('Scale', probability, magnitude)], [('Scale_xy_diff', probability, magnitude)], [('Lens_distortion', probability, magnitude)]] } return policy
def pad_number(num): """ If given number has only one digit, a new string with two spaces in the left is returned. Otherwise, a string without spaces is returned. :param num: integer :return: padded string """ pad = ' ' if num < 10 else '' return '%s%s' % (pad, num)
def unindent(text, skip1=False): """Remove leading spaces that are present in all lines of ``text``. Parameters ---------- test : str The text from which leading spaces should be removed. skip1 : bool Ignore the first line when determining number of spaces to unindent, and remove all leading whitespaces from it. """ # count leading whitespaces lines = text.splitlines() ws_lead = [] for line in lines[skip1:]: len_stripped = len(line.lstrip(' ')) if len_stripped: ws_lead.append(len(line) - len_stripped) if len(ws_lead) > skip1: rm = min(ws_lead) if rm: if skip1: lines[0] = ' ' * rm + lines[0].lstrip() text = '\n'.join(line[rm:] for line in lines) return text
def complement(l, universe=None): """ Return the complement of a list of integers, as compared to a given "universe" set. If no universe is specified, consider the universe to be all integers between the minimum and maximum values of the given list. """ if universe is not None: universe = set(universe) else: universe = set(range(min(l), max(l)+1)) return sorted(universe - set(l))
def decrypt (lst: list): """ Decodes html encoded emails Pass a list of emails Decodes email if starting characters are '&#' Returns list of unencoded emails """ while True: unencoded_emails = [] for string in lst: if string[0:2] == '&#': slices = int(len(string) / 6) count = 0 starting_pos = 0 decoded_email = '' while count < slices: decoded_email = decoded_email + chr(int(string[starting_pos + 2: starting_pos + 5])) count += 1 starting_pos += 6 unencoded_emails.append(decoded_email) else: unencoded_emails.append(string) return unencoded_emails
def cauchy_cooling_sequence(initial_t, it): """ Calculates the new temperature per iteration using a cauchy progression. Parameters ---------- initial_t : float initial temperature it: int actual iteration Returns -------- tt: float new temperature """ tt = initial_t/(1+it) return tt
def n_max_iter_heuristics(n_data, n_query, low_bound=5, up_bound=20): """ Helper method to define maximum number of iterations for a given campaign. This is based on the empirical evidence in various systems >90% of stable materials are identified when 25% of candidates are tested. We also enforce upper and lower bounds of 20 and 5 to avoid edge cases with too many or too few calculations to run. Args: n_data (int): number of data points in candidate space n_query (int): number of queries allowed in each iteration low_bound (int): lower bound allowed for n_max_iter up_bound (int): upper bound allowed for n_max_ite Returns: maximum number of iterations as integer """ _target = round(n_data * 0.25/n_query) if _target < low_bound: return low_bound else: return min(_target, up_bound)
def formatAbilities(abilities): """ same as formatTypes but for abilities """ res = "" for i in range (len(abilities) - 1, -1, -1): res += abilities[i]["ability"]["name"] + ";" return res
def save_txt(str_file_path, str_txt): """ .. _save_txt : This funciton saves the given string into the given file. Parameters ---------- str_file_path : str The text file full path. str_txt : str The string to be write into the text file. Returns ------- bool Returns True if read successful (no exception). Returns False on exception. Examples -------- .. code:: python bool_success = save_txt(str_file_path, str_txt) """ try: file = open(str_file_path, 'w') file.write(str_txt) file.close() return True except: return False
def human_time(time_s): """ Converts a time in seconds to a string using days, hours, minutes and seconds. """ time_s = int(time_s) # Ensure int out = [] days = time_s // 86400 if days == 1: out.append("%i day" % days) time_s -= days * 86400 elif days >= 1: out.append("%i days" % days) time_s -= days * 86400 hours = time_s // 3600 if hours >= 1: out.append("%i hr" % hours) time_s -= hours * 3600 minutes = time_s // 60 if minutes >= 1: out.append("%i min" % minutes) time_s -= minutes * 60 if time_s >= 1: out.append("%i sec" % time_s) return " ".join(out)
def save_a_vocab_file(vocab_file, vocab_list): """Save a Vocab file for test.""" with open(vocab_file, "w", encoding='utf-8') as out_f: for vocab in vocab_list: out_f.write(vocab) out_f.write('\n') return vocab_file