content
stringlengths
42
6.51k
def _get_bot_coverage_list(sorted_infos, thresholds): """ generate the list of bot coverage ratio and bot coverage counts based on the list of thresholds :param sorted_infos: :param thresholds: :return: """ tol = len(sorted_infos) cur_bot_coverage = tol bot_coverage_count_list = list() bot_coverage_list = list() current_step = 0 for t in thresholds: while sorted_infos[current_step][2] < t: cur_bot_coverage -= 1 current_step += 1 bot_coverage_count_list.append(cur_bot_coverage) bot_coverage_list.append(cur_bot_coverage / tol) return bot_coverage_list, bot_coverage_count_list
def readbytes(path): """Read binary from file. """ with open(path, "rb") as f: return f.read()
def strip_epoch(nvr: str): """ If an NVR string is N-V-R:E, returns only the NVR portion. Otherwise returns NVR exactly as-is. """ return nvr.split(':')[0]
def galileo_nav_decode(dwrds: list) -> dict: """ Helper function to decode RXM-SFRBX dwrds for GALILEO navigation data. :param list dwrds: array of navigation data dwrds :return: dict of navdata attributes :rtype: dict """ return {"dwrds": dwrds}
def rivers_with_stations(stations): """This returns a list of all the rivers which have monitoring stations on them""" rivers = [] for s in stations: if s.river not in rivers: rivers.append(s.river) return rivers
def any_scores_greater_than_one(scores): """Check if there are enough scores for KDE If there are more than scores for at least one user, we plot the density estimate of scores """ for score in scores: if len(score) > 1: return True return False
def validate_update_parcel_admin_status(parcel, data): """ this funtion validates the updated parcel data """ # Check for empty status if data['status'] == '': data['status'] = parcel['status'] # check for valid status if data['status'].strip(' ').isdigit(): return {'warning': 'Enter non digit status'}, 400 if not data["status"].strip(): return {"warning": "Enter valid status"}, 400 # Check for large/long inputs if len(data['status']) > 40: return {'warning': 'status is too long'}, 400
def hex_16bit(value): """Converts 16bit value into bytearray. args: 16bit value returns: bytearray of size 2 """ if value > 0xffff or value < 0: raise Exception('Sar file 16bit value %s out of range' % value) return value.to_bytes(2, 'little')
def vnesi_none(tab): """ Elemente, kjer nastopa prazen niz zamenja z None in vrne novo popravljeno tabelo. """ nova_tab = [el if el != '' else None for el in tab] return nova_tab
def commify(n): """ Add commas to an integer n. >>> commify(1) '1' >>> commify(123) '123' >>> commify(1234) '1,234' >>> commify(1234567890) '1,234,567,890' >>> commify(123.0) '123.0' >>> commify(1234.5) '1,234.5' >>> commify(1234.56789) '1,234.56789' >>> commify('%.2f' % 1234.5) '1,234.50' >>> commify(None) """ if n is None: return None n = str(n) if '.' in n: dollars, cents = n.split('.') else: dollars, cents = n, None r = [] for i, c in enumerate(reversed(dollars)): if i and (not (i % 3)): r.insert(0, ',') r.insert(0, c) out = ''.join(r) if cents: out += '.' + cents return out
def value2bool(d): """ Convert specific string values to boolean (input: dict, output: dict) 'true' to True and 'false' to False """ for k,v in d.items(): if v.lower() == 'true': d[k] = True elif v.lower() == 'false': d[k] = False return d
def _green_ampt_infiltration_rate(F, psi, eff_theta, eff_sat, K): """Compute the Green-Ampt infiltration rate Compute the infiltration rate using the Green-Ampt cumulative infiltration. Parameters ---------- F : scalar The cumulative infiltration for the time-period. psi : scalar Soil suction head at wetting front. eff_theta : scalar Effective porosity. eff_sat : scalar Effective saturation. K : scalar Saturated hydraulic conductivity. Returns ------- ft : scalar Infiltration rate for the given `F`. Raises ------ ValueError - If `F` is zero or negative. """ if F <= 0: raise ValueError('F must be greater than zero.') dtheta = (1 - eff_sat)*eff_theta return K*((psi*dtheta)/F + 1)
def warn_config_absent(sections, argument, log_printer): """ Checks if the given argument is present somewhere in the sections and emits a warning that code analysis can not be run without it. :param sections: A dictionary of sections. :param argument: The argument to check for, e.g. "files". :param log_printer: A log printer to emit the warning to. """ if all(argument not in section for section in sections.values()): log_printer.warn('coala will not run any analysis. Did you forget ' 'to give the `--{}` argument?'.format(argument)) return True return False
def __isFloatType__(obj): """ Returns true if the obj is a float """ return isinstance(obj, float)
def get_attribute_from_dict(dictionary, keyword, default_value=''): """Get value from a dictionary if the dictionary exists. :param dictionary: 'dict' with the elements :param keyword: attributes that correspond to a key in the dict :param default_value: The default value returned if the dictionary does not exist or if the keyword does not exist. :returns: The value to keyword or the default value. """ if dictionary: return dictionary.get(keyword, default_value) return default_value
def get_factorial(x): """ Computes factorial of x :param x: int :return: int """ if x < 0: raise ValueError("Input must be positive, but {} was given") factorial = 1 for value in range(1, x + 1): factorial *= value return factorial
def to_seconds(t): """ Convert length of time in m:s.d format into seconds e.g. to_seconds('1:15.2') -> 75.2 """ m, s = t.split(':') seconds = int(m) * 60 + float(s) return seconds
def format_time(time): """ Formats the given time into HH:MM:SS. """ hours, remainder = divmod(time / 1000, 3600) minutes, seconds = divmod(remainder, 60) if hours: return '%02dh %02dm %02ds' % (hours, minutes, seconds) if minutes: return '%02dm %02ds' % (minutes, seconds) if seconds: return '%02ds' % (seconds)
def get_optional_height_names(num=4): """Get list of possible column names for optional extrapolation. (of form Ane_WS_Ht1, for example) Parameters ---------- num : int number of possible Additional Comparison Heights Returns ------- list list of allowed names """ optionalData = [] for typ in ["Ane", "RSD"]: for ht in range(1, num + 1): optionalData.append( ["%s_%s_Ht%d" % (typ, var, ht) for var in ["WS", "SD", "TI"]] ) return optionalData
def check_if_bst(root, min, max): """Given a binary tree, check if it follows binary search tree property To start off, run `check_if_bst(BT.root, -math.inf, math.inf)`""" if root == None: return True if root.key < min or root.key >= max: return False return check_if_bst(root.left, min, root.key) and check_if_bst( root.right, root.key, max )
def _insert_automodapi_configs(c): """Add configurations related to automodapi, autodoc, and numpydoc to the state. """ # Don't show summaries of the members in each class along with the # class' docstring c["numpydoc_show_class_members"] = False c["autosummary_generate"] = True c["automodapi_toctreedirnm"] = "py-api" c["automodsumm_inherited_members"] = True # Docstrings for classes and methods are inherited from parents. c["autodoc_inherit_docstrings"] = True # Class documentation should only contain the class docstring and # ignore the __init__ docstring, account to LSST coding standards. # c['autoclass_content'] = "both" c["autoclass_content"] = "class" # Default flags for automodapi directives. Special members are dunder # methods. # NOTE: We want to used `inherited-members`, but it seems to be causing # documentation duplication in the automodapi listings. We're leaving # this out for now. See https://jira.lsstcorp.org/browse/DM-14782 for # additional notes. # NOTE: Without inherited members set, special-members doesn't need seem # to have an effect (even for special members where the docstrings are # directly written in the class, not inherited. # c['autodoc_default_flags'] = ['inherited-members'] c["autodoc_default_flags"] = ["show-inheritance", "special-members"] return c
def findin(item, list): """ Find C{item} in C{list}. """ try: return list.index(item) except ValueError: # x not in list return -1
def table_name(board): """ Table name for board 'board' """ return "idx_" + board
def fibonacci(i): """Get i-th number from Fibonacci Series. | 1 if i = 0 F(i) = { 2 if i = 1 | F(i - 1) + F(i - 2) if i >= 2 """ if i == 0: return 1 elif i == 1: return 2 else: f_minus_1 = 2 f_minus_2 = 1 for j in range(i - 2): f_minus_2, f_minus_1 = f_minus_1, f_minus_1 + f_minus_2 return f_minus_1 + f_minus_2
def concat_string(target, msg=[], delimiter="", last=""): """ Concatenate a series of strings to the end of the target Delimiter is optional filler between items :param target: :param msg: :return: target """ result = target for m in msg[:-1]: result = result + m + delimiter result = result + msg[-1] + last return result
def is_inside_tag(tag): """ Simple helper """ return tag.startswith('I-')
def split_into_integers(coordinate): """Get individual parts of a float and transform into integers :coordinate: float value :returns: list of integers """ return list(map(int, str(coordinate).split('.')))
def _filterData(data): """See method filterData(). Args: data (dict); return value of the method fetchAppDetails() Returns: filtered data """ filtered = {} appid = '' for key in data: appid = key break shorcut = data[appid]['data'] filtered['appid'] = appid filtered['name'] = shorcut['name'] filtered['is_free'] = shorcut['is_free'] filtered['detailed_description'] = shorcut['detailed_description'] filtered['publishers'] = shorcut['publishers'] if 'categories' in shorcut: filtered['categories'] = shorcut['categories'] else: unknown = {} unknown['description'] = 'Unknown' unknown['id'] = -1; l = []; l.append(unknown) filtered['categories'] = l; # filtered['about_the_game'] = shorcut['about_the_game'] # filtered['short_description'] = shorcut['short_description'] return filtered
def combine_adjacent(arr): """Sum like signed adjacent elements arr : starting array Returns ------- output: new summed array indexes: indexes indicating the first element summed for each group in arr """ output, indexes = [], [] curr_i = 0 while len(arr) > 0: curr_sign = arr[0]/abs(arr[0]) index = 0 while index < len(arr) and arr[index]/abs(arr[index]) == curr_sign: index += 1 output.append(sum(arr[:index])) indexes.append(curr_i) curr_i += index for _ in range(index): arr.pop(0) return output, indexes
def filter_debt_to_income(monthly_debt_ratio, bank_list): """Filters the bank list by the maximum debt-to-income ratio allowed by the bank. Args: monthly_debt_ratio (float): The applicant's monthly debt ratio. bank_list (list of lists): The available bank loans. Returns: A list of qualifying bank loans. """ # create an empty list debit_to_income_approval_list = [] # go throught all the banks to find which banks meet the debt to income ratio requirements for bank in bank_list: # select the bank if the user's monthly debt to income ratio meets the bank's maximum debt to income ratio requirement if monthly_debt_ratio <= float(bank[3]): debit_to_income_approval_list.append(bank) # return the list of qualifying banks return debit_to_income_approval_list
def is_list_or_tuple(obj) -> bool: """ Checks whether an object is a list or a tuple""" if isinstance(obj, (list, tuple)): return True else: return False
def _hyperparameters_to_cmd_args(hyperparameters): """ Converts our hyperparameters, in json format, into key-value pair suitable for passing to our training algorithm. """ cmd_args_list = [] for key, value in hyperparameters.items(): cmd_args_list.append("--{}".format(key)) cmd_args_list.append(value) return cmd_args_list
def pipeline(data, funcs): """ Pipes *functions* onto a given *data*, where the result of the previous function is fed to the next function. """ for func in funcs: data = func(data) return data
def get_ovs_dpdk_cfg(k8s_conf): """ Returns ovs_dpdk enablement choice :return: true/false """ if k8s_conf.get('enable_ovs_dpdk') : return k8s_conf['enable_ovs_dpdk']
def unit_vector(v): """Return the unit vector of the points v = (a,b)""" h = ((v[0] ** 2) + (v[1] ** 2)) ** 0.5 if h == 0: h = 0.000000000000001 ua = v[0] / h ub = v[1] / h return (ua, ub)
def getGlideinCpusNum(glidein): """ Given the glidein data structure, get the GLIDEIN_CPUS configured. If GLIDEIN_CPUS is not configured or is set to auto, ASSUME it to be 1 """ glidein_cpus = 1 cpus = str(glidein['attrs'].get('GLIDEIN_CPUS', 1)) if cpus.upper() == 'AUTO': glidein_cpus = 1 else: glidein_cpus = int(cpus) return glidein_cpus
def convert_seconds_to_human_readable_form(seconds: int) -> str: """Convert seconds to human readable time format, e.g. 02:30 **Keyword arguments:** - seconds (int) -- Seconds to convert **Returns:** Formatted string """ if seconds <= 0: return "00:00" minutes = int(seconds / 60) remainder = seconds % 60 minutes_formatted = str(minutes) if minutes >= 10 else "0" + str(minutes) seconds_formatted = str(remainder) if remainder >= 10 else "0" + str(remainder) return f"{minutes_formatted}:{seconds_formatted}"
def hex2rgb(hexstr): """Converts a hex "#rrggbb" color string code to a tuble of (r,g,b)""" if not isinstance(hexstr, str): raise ValueError('I was expecting a string with the hex code color') if hexstr[0] is not '#': raise ValueError('Invalid hex color code: missing "#" at the begining') if len(hexstr) is not 7: raise ValueError('Invalid hex color code: length of the string code is not 7') hexstr = hexstr[1:] return tuple(int(hexstr[i:i+2], 16)/255 for i in (0, 2 ,4))
def getDomainFromFP(fp): """ Returns domain number from file path """ path, fileInfo = fp.split("LSBU_") #split returns ["/user/.../data/subdomain_8/LSBU", "<timestep>_<subdomain>.vtu"] timestep, domain = fileInfo.split("_") return domain[:-4]
def ParseRegisterNotices(notices): """Parses registration notices. Args: notices: list of notices (lowercase-strings). Returns: Pair (public privacy ack: bool, hsts ack: bool). """ if not notices: return False, False return 'public-contact-data-acknowledgement' in notices, 'hsts-preloaded' in notices
def find_string_anagrams(s, pattern): """Find all anagrams of pattern in the given string, s. >>> find_string_anagrams("ppqp", "pq") [1, 2] >>> find_string_anagrams("abbcabc", "abc") [2, 3, 4] """ results = [] k = len(pattern) if k > len(s): return results counts = {} for c in pattern: counts[c] = counts.get(c, 0) + 1 matched = 0 win_start = 0 for win_end, next_char in enumerate(s): if next_char in counts: counts[next_char] -= 1 if counts[next_char] == 0: matched += 1 if win_end >= k - 1: if matched == len(counts): results.append(win_start) first_char = s[win_start] if first_char in counts: if counts[first_char] == 0: matched -= 1 counts[first_char] += 1 win_start += 1 return results
def vectorproduct(a,b): """ Return vector cross product of input vectors a and b """ a1, a2, a3 = a b1, b2, b3 = b return [a2*b3 - a3*b2, a3*b1 - a1*b3, a1*b2 - a2*b1]
def colorize(value: str, is_warning: bool) -> str: """ Utility to set a color for the output string when it exceeds the threshold. Args: value: String to be output. is_warning: Whether it exceeds the threshold. Returns: colorized string output """ if is_warning: return termcolors.make_style(fg=get_config()["PRINT_THRESHOLDS"]["COLOR"])( # type: ignore value ) return value
def strahler_stream_order(start_arc_id, start_up_node, nodes_per_arc, arcs_per_node, stream_orders): """Calculate the Strahler stream order This function recursively computes the Strahler stream order using the algorithm described by Gleyzer et al. (2004). The sequence of stream orders for the starting arc and each upstream arc is returned in the dictionary `stream_orders`. To compute the Strahler order for the entire network, `start_arc_id` should be the arc ID for the stream arc closest to the catchment outlet and `start_up_node` should be the node ID at the upstream end of `start_arc_id`. Parameters ---------- start_arc_id : int The integer ID of the current stream arc as defined in `nodes_per_arc`. start_up_node : int The integer ID of the upstream node for the current stream arc. nodes_per_arc : dict A dictionary containing an ordered tuple representing the upstream and downstream node IDs for each stream arc ID. e.g. {0 : (upstream_node, downstream_node)} arcs_per_node : dict A dictionary containing a list of the stream arc IDs for stream arcs adjacent to each node in the network. stream_orders : dict A dictionary with the (key, value) pairs representing the stream arc ID and associated Strahler order. Returns ------- order : int The stream order of the stream arc described by `start_arc_id`. References ---------- Alexander Gleyzer, Michael Denisyuk, Alon Rimmer and Yigal Salingar, 2004. A Fast Recursive GIS Algorithm for Computing Strahler Stream Order in Braided and Nonbraided Networks. Journal of the American Water Resources Association (JAWRA) 40(4):937-946. """ if len(arcs_per_node[start_up_node]) == 1: stream_orders[start_arc_id] = 1 else: upstream_orders = {} for arc_id in arcs_per_node[start_up_node]: if arc_id != start_arc_id: up_node, down_node = nodes_per_arc[arc_id] if up_node != start_up_node: upstream_orders[arc_id] = strahler_stream_order(arc_id, up_node, nodes_per_arc, arcs_per_node, stream_orders) else: upstream_orders[arc_id] = strahler_stream_order(arc_id, down_node, nodes_per_arc, arcs_per_node, stream_orders) max_order = 0 max_order_count = 0 up_orders = sorted(upstream_orders.values(), reverse=True) for order in up_orders: if order > max_order: max_order = order max_order_count += 1 elif order == max_order: max_order_count += 1 if max_order_count > 1: stream_orders[start_arc_id] = max_order + 1 else: stream_orders[start_arc_id] = max_order return stream_orders[start_arc_id]
def sample_service(name=None): """ This is a sample service. Give it your name and prepare to be greeted! :param name: Your name :type name: basestring :return: A greeting or an error """ if name: return { 'hello': name} else: return {"error": "what's your name?"}
def eir_to_rate(match_to, compounds): """ """ return (match_to + 1) ** (1 / compounds) - 1
def has_callables(iterable): """ A function used to determine if an iterable contains at least one callable. """ for item in iterable: if callable(item): return True return False
def squeeze_list(listA, val=-1): """ Compact a list of lists into a single list. Squeezes (spaghettify) a list of lists into a single list. The lists are concatenated into a single one, and to separate them it is used a separating value to mark the split location when unsqueezing the list. Parameters ---------- listA : list List of lists. val : number, optional Value to separate the lists. Returns ------- list A list with all lists concatenated into one. Examples -------- Compact a list of lists into a single list. >>> from dbcollection.utils.pad import squeeze_list >>> squeeze_list([[1,2], [3], [4,5,6]], -1) [1, 2, -1, 3, -1, 4, 5, 6] """ concatA = [l + [val] for l in listA] out = [li for l in concatA for li in l] return out[:-1]
def get_rgb_from_value(n): """ Bins and some bit shifting """ if n < 2: k = 0xFF0000 elif n < 4: k = 0xFFFF00 elif n < 8: k = 0x009900 elif n < 32: k = 0x0080FF elif n < 128: k = 0x00FFFF else: k = 0xFFFFFF R = (k & 0xFF0000) >> 16 G = (k & 0x00FF00) >> 8 B = (k & 0x0000FF) return (R, G, B)
def base16encode(string, layer = 1, upper = False): """ >>> base16encode("string") '737472696e67' """ from binascii import hexlify if(layer > 0): res = base16encode( hexlify(string.encode('UTF-8')).decode(), layer - 1, upper) if(upper): string = res.upper() else: string = res return string
def simple_closure(s, implications): """ Input: A set of implications and an attribute set s Output: The closure of s with respect to implications Examples ======== >>> from fca.implication import Implication >>> cd2a = Implication(set(('c', 'd')), set(('a'))) >>> ad2c = Implication(set(('a', 'd')), set(('c'))) >>> ab2cd = Implication(set(('a', 'b')), set(('c', 'd'))) >>> imps = [cd2a, ad2c, ab2cd] >>> print simple_closure(set('a'), imps) set(['a']) >>> print simple_closure(set(), imps) set([]) >>> simple_closure(set(['b', 'c', 'd']), imps) == set(['a', 'b', 'c', 'd']) True >>> a2bc = Implication(set(('a')), set(('b', 'c'))) >>> ce2abd = Implication(set(('c', 'e')), set(('a', 'b', 'd'))) >>> de2abc = Implication(set(('d', 'e')), set(('a', 'b', 'c'))) >>> cd2abe = Implication(set(('c', 'd')), set(('a', 'b', 'e'))) >>> imps = [a2bc, ce2abd, de2abc, cd2abe] >>> simple_closure(set(['b', 'a']), imps) == set(['a', 'b', 'c']) True >>> simple_closure(set(['a', 'e']), imps) == set(['a', 'b', 'c', 'd', 'e']) True >>> imps = [ce2abd, a2bc, de2abc, cd2abe] >>> simple_closure(set(['a', 'e']), imps) == set(['a', 'b', 'c', 'd', 'e']) True """ unused_imps = implications[:] new_closure = s.copy() changed = True while changed: changed = False for imp in unused_imps: if imp.premise <= new_closure: new_closure |= imp.conclusion changed = True unused_imps.remove(imp) return new_closure
def config_bool(value): """Represent a boolean in a way compatible with configparser.""" return "true" if value else "false"
def serialize_header(value, style='simple', explode=False): # noqa """ Serialize a header according to https://swagger.io/docs/specification/serialization/. Parameters ---------- value : Value to serialize style : str ('simple') Serialization style. explode : bool Explode the object serialization. Returns ------- str Serialized header. """ if type(value) is list: return ','.join([str(item) for item in value]) elif type(value) is dict: sep = '=' if explode else ',' return ','.join(['{}{}{}'.format(k, sep, v) for k, v in value.items()]) else: return str(value)
def encode(string): """ Encode the string """ result, index = "", 0 while index < len(string): count, j = 1, index + 1 while j < len(string) and string[j] == string[index]: count += 1 j += 1 if count > 1: result += str(count) + string[index] else: result += string[index] index = j return result
def leading_num_key(s): """Keys for sorting strings, based on leading multidigit numbers. A normal string comparision will compare the strings character by character, e.g., "101P" is less than "1P" because "0" < "P". `leading_num_key` will generate keys so that `str.sort` can consider the leading multidigit integer, e.g., "101P" > "1P" because 101 > 1. Parameters ---------- s : string Returns ------- keys : tuple They keys to sort by for this string: `keys[0]` is the leading number, `keys[1]` is the rest of the string. """ pfx = '' sfx = s for i in range(len(s)): if not s[i].isdigit(): break pfx += s[i] sfx = s[i:] if len(pfx) > 0: pfx = int(pfx) else: pfx = 0 return pfx, sfx
def is_email(token): """ Return True for e-mails. Very rough, some other words for instance duygu@SonyCenter return True as well. However, most probably one doesn't want to process that token types anyway. Args: token: single token Returns: Booelan Raises: None Examples: >>> is_email("[email protected]") True >>> is_email("breakfast@Tiffany's") True >>> is_email(",") False >>> is_email("applepie") False """ return "@" in token
def dragon_step(a): """ Call the data you have at this point "a". Make a copy of "a"; call this copy "b". Reverse the order of the characters in "b". In "b", replace all instances of 0 with 1 and all 1s with 0. The resulting data is "a", then a single 0, then "b". """ b = a[::-1] b = ''.join(['0' if c == '1' else '1' for c in b]) return a + '0' + b
def filter_errors(e, select=None, ignore=None, **params): """ Filter a erros by select and ignore options. :return bool: """ if select: for s in select: if e['text'].startswith(s): return True if ignore: for s in ignore: if e['text'].startswith(s): return False return True
def to_tuple(param, low=None, bias=None): """Convert input argument to min-max tuple Args: param (scalar, tuple or list of 2+ elements): Input value. If value is scalar, return value would be (offset - value, offset + value). If value is tuple, return value would be value + offset (broadcasted). low: Second element of tuple can be passed as optional argument bias: An offset factor added to each element """ if low is not None and bias is not None: raise ValueError('Arguments low and bias are mutually exclusive') if param is None: return param if isinstance(param, (int, float)): if low is None: param = - param, + param else: param = (low, param) if low < param else (param, low) elif isinstance(param, (list, tuple)): param = tuple(param) else: raise ValueError('Argument param must be either scalar (int, float) or tuple') if bias is not None: return tuple([bias + x for x in param]) return tuple(param)
def to_int(s, default=0): """ Return input converted into an integer. If failed, then return ``default``. Examples:: >>> to_int('1') 1 >>> to_int(1) 1 >>> to_int('') 0 >>> to_int(None) 0 >>> to_int(0, default='Empty') 0 >>> to_int(None, default='Empty') 'Empty' """ try: return int(s) except (TypeError, ValueError): return default
def wrap_string_in_list(maybe_string): """ This function checks if maybe_string is a string (or anything derived from str). If so, it wraps it in a list. The motivation for this function is that some functions return either a single string or multiple strings as a list. The return value of this function can be iterated over safely. Args: maybe_string (obj): an object which may be a string Returns: either the original object, or maybe_string wrapped in a list, if it was a string """ if isinstance(maybe_string, str): return [maybe_string] return maybe_string
def _extended_gcd(a, b): """ Division in integers modulus p means finding the inverse of the denominator modulo p and then multiplying the numerator by this inverse (Note: inverse of A is B such that A*B % p == 1) this can be computed via extended Euclidean algorithm http://en.wikipedia.org/wiki/Modular_multiplicative_inverse#Computation """ x = 0 last_x = 1 y = 1 last_y = 0 while b != 0: quot = a // b a, b = b, a % b x, last_x = last_x - quot * x, x y, last_y = last_y - quot * y, y return last_x, last_y
def _normalize(obj): """ Normalize dicts and lists :param obj: :return: normalized object """ if isinstance(obj, list): return [_normalize(item) for item in obj] elif isinstance(obj, dict): return {k: _normalize(v) for k, v in obj.items() if v is not None} elif hasattr(obj, 'to_python'): return obj.to_python() return obj
def get_basis(vectors, vector_num, vector_len): """Get vectors basis Args: vectors (:obj:`list` of :obj:`int`): The list of vectors. vector_num (int): The number of vectors in the list. vector_len (int): The length of vectors in the list. Returns: :rtype: (:obj:`list` of :obj:`int`, int): The list of basis vectors and the rank of the basis. """ # Initial rank equals to the current full rank rank = min(vector_len, vector_num) for r in range(rank): vectors = sorted(vectors, reverse=True) index = len(bin(vectors[r])[2:]) - 1 for i in range(vector_num): if (vectors[i] & 1 << index) and (i != r): vectors[i] ^= vectors[r] basis = [vectors[i] for i in range(rank) if vectors[i]] # The final rank equals to the number of rows in basis matrix rank = len(basis) return (basis, rank)
def value_to_string(val, precision=3): """ Convert a number to a human readable string. """ if (not isinstance(val, float)) or (val == 0): text = str(val) elif (abs(val) >= 10.0**(precision)) or \ (abs(val) <= 10.0**(-precision)): text = "{val:.{prec}e}".format(val=val, prec=precision) else: text = "{}".format(val) if len(text) > precision + 2 + (text[0] == '-'): text = "{val:.{prec}f}".format(val=val, prec=precision) return text
def quotes(q): """Allows user to specify if they want pdfcp() to automatically add quotes to both sides of copied pdf text in addition to removing line breaks while pdfcp() is running. Parameters ---------- q : str ("y" or "") Quote append option user input specifying which message to print. Returns ------- "Quotes WILL be appended" : str Message returned if q set to "y" by user. "Quotes WON'T be appended" : str Message returned if q set to anything other than "y" by user. """ # Notifies user if quotes will or won't be appended to copied text if q is "y": print("Quotes WILL be appended") return "Quotes WILL be appended" else: print("Quotes WON'T be appended") return "Quotes WON'T be appended"
def GetTickTexts(ticks): """ GetTickTexts(ticks) Get tick labels of maximally 9 characters (plus sign char). All ticks will be formatted in the same manner, and with the same number of decimals. In exponential notation, the exponent is written with as less characters as possible, leaving more chars for the decimals. The algorithm is to first test for each tick the number of characters before the dot, the number of decimals, and the number of chars for the exponent. Then the ticks are formatted only without exponent if the first two chars (plus one for the dot) are less than 9. Examples are: xx.yyyyyy xxxxxxx.y x.yyyye+z x.yye+zzz """ # For padding/unpadding exponent notation def exp_pad(s, i=1): return s.lstrip('0').rjust(i,'0') # Round 1: determine amount of chars before dot, after dot, in exp minChars1, maxChars1 = 99999, 0 maxChars2 = 0 maxChars3 = 0 for tick in ticks: # Make abs, our goal is to format the ticks such that without # the sign char, the string is smaller than 9 chars. tick = abs(tick) # Format with exponential notation and get exponent t = '%1.0e' % tick i = t.find('e') expPart = t[i+2:] # Get number of chars before dot chars1 = int(expPart)+1 maxChars1 = max(maxChars1, chars1) minChars1 = min(minChars1, chars1) # Get number of chars in exponent maxChars3 = max(maxChars3, len(exp_pad(expPart))) # Get number of chars after the dot t = '%1.7f' % tick i = t.find('.') decPart = t[i+1:] maxChars2 = max(maxChars2, len(decPart.rstrip('0'))) # Round 2: Create actual texts ticks2 = [] if maxChars1 + maxChars2 + 1 <= 9: # This one is easy chars2 = maxChars2 f = '%%1.%if' % chars2 for tick in ticks: # Format tick and store if tick == -0: tick = 0 ticks2.append( f % tick ) elif maxChars1 < 9: # Do the best we can chars2 = 9 - (maxChars1+1) f = '%%1.%if' % chars2 for tick in ticks: # Format tick and store if tick == -0: tick = 0 ticks2.append( f % tick ) else: # Exponential notation chars2 = 9 - (4+maxChars3) # 0.xxxe+yy f = '%%1.%ie' % chars2 for tick in ticks: # Format tick if tick == -0: tick = 0 t = f % tick # Remove zeros in exp i = t.find('e') t = t[:i+2] + exp_pad(t[i+2:], maxChars3) # Store ticks2.append(t) # Done return ticks2
def parse_room_config(config): """ This method provides backwards compatibility with the previous room list as dict format by transforming dicts like `{"main": "#myroom:matrix.org"}` into `{"main": {"alias": "#myroom:matrix.org"}}`. """ new_rooms = {} for name, room in config["rooms"].items(): if isinstance(room, str): new_rooms[name] = {'alias': room} elif isinstance(room, dict): new_rooms[name] = room else: raise TypeError("Elements of the room config dictionary must be strings or dicts") return new_rooms
def fade_copies_left(s): """ Returns the string made by concatenating `s` with it's left slices of decreasing sizes. Parameters ---------- s : string String to be repeated and added together. Returns ------- output : string String after it has had ever decreasing slices added to it. """ # Initialisations s_list = list(s) output_list = [ ] for i in range(len(s)): output_list += s_list s_list = s_list[:-1] output = "".join(output_list) return output
def trsplit(trin): """Split data segments with marked gaps""" rc = trin.split() return rc
def check_bad_next_streams(stream_array): """ make sure all next streams in list are within current stream """ # loop through all but last streams_indexes = range(0, len(stream_array) - 1) for i in streams_indexes: if stream_array[i + 1][0] > stream_array[i][1]: return False return True
def indentify(stuff, rep = 1, indent = '\t'): """ From http://code.activestate.com/recipes/66055-changing-the-indentation-of-a-multi-line-string/#c4 """ lines = [(rep * indent) + line for line in stuff.splitlines()] return "\n".join(lines)
def is_urllib_network_error(exc: BaseException) -> bool: """Is the provided exception from urllib a network-related error? This should be passed an exception which resulted from opening or reading a urllib Request. It returns True for any errors that could conceivably arise due to unavailable/poor network connections, firewall/connectivity issues, etc. These issues can often be safely ignored or presented to the user as general 'network-unavailable' states. """ import urllib.request import urllib.error import http.client import errno import socket if isinstance( exc, (urllib.error.URLError, ConnectionError, http.client.IncompleteRead, http.client.BadStatusLine, socket.timeout)): return True if isinstance(exc, OSError): if exc.errno == 10051: # Windows unreachable network error. return True if exc.errno in { errno.ETIMEDOUT, errno.EHOSTUNREACH, errno.ENETUNREACH, }: return True return False
def relu_grad(z): """ Relu derivative. g'(z) = 0 if g(z) <= 0 g'(z) = 1 if g(z) > 0 """ return 1*(z > 0)
def get_tasks(boards: dict) -> dict: """ Take boards dictionaries and return tasks dictionaries. Extract tasks dictionary from the all boards and merge them all together. Args: ---- boards (dict): A dictionary containing dictionaries of all the boards. Returns ------- A dictionary containing dictionaries of all the tasks. """ tasks = {} for board in boards.values(): tasks.update(board['tasks']) return tasks
def rm_party(senators, party): """ remoe party if no senator in the party """ if senators[party] == 0: del senators[party] return senators
def find_odd_occurred_number_sol2(nums): """ You are given an array of repeating numbers. All numbers repeat in even way, except for one. Find the odd occurring number. - This solution takes less space. - More details: https://youtu.be/bMF2fG9eY0A - Time: O(len(nums)) - Space: worst: O(len(nums)) """ values = set() for value in nums: if value in values: values.remove(value) else: values.add(value) return next(iter(values))
def check_node(left, right) -> int: """ Count 1 for each node found. (Unpacking directly in the parameters is faster) """ return 1 if left is None else 1 + check_node(*left) + check_node(*right)
def generate_managed_policy(resource_name: str, permissions): """Generate an IAM Managed Policy resource""" return { resource_name: { "Type": "AWS::IAM::ManagedPolicy", "Properties": { "PolicyDocument": {"Version": "2012-10-17", "Statement": permissions} }, } }
def integerize(value): """Convert value to integer. Args: value: Value to convert Returns: result: Value converted to iteger """ # Try edge case if value is True: return None if value is False: return None # Try conversion try: result = int(value) except: result = None # Return return result
def spo2SignleMeasurementHandler(data): """ For get this answer need to sent AB 05 FF 31 11 00 FF """ # Only for testing - no detailed parsing, just form answer return bytearray([0xAB, 0x04, 0xFF, 0x31, 0x11, 0x22]) pass
def find_method_label(method, local_align_method=None, srm_components=0, srm_atlas=None, atlas_name="", ha_radius=5, ha_sparse_radius=3, smoothing_fwhm=6): """ Creates a 'method_label' string, used in naming output files with derived results. Ensures that all possible alignments have their own unique file path to avoid naming clashes. Parameters ---------- method: str Desired alignment method. Must be a string in ["anat_inter_subject", "smoothing", "pairwise", "template", "intra_subject", "srm", "HA"] local_align_method: str An alignment method recognized by fmralign. srm_components: int The requested number of components for the Shared Response Model (SRM). This corresponds to the hyperparameter _k_ in the original formulation. ha_radius: int Radius of a searchlight sphere in number of voxels to be used. Default of 5 voxels. ha_sparse_radius: int Radius supplied to scatter_neighborhoods in units of voxels. This is effectively the distance between the centers of mass where hyperalignment is performed in searchlights. Default of 3 voxels. smoothing_fwhm: int Smoothing kernel to apply in the case of the 'smoothing' alignment baseline. srm_atlas: ndarray, optional Probabilistic or deterministic atlas on which to project the data Deterministic atlas is an ndarray of shape [n_voxels,] where values range from 1 to n_supervoxels. Voxels labelled 0 will be ignored. atlas_name: str, optional An optional name for the requested SRM atlas. Returns ------- method_label: str """ method_label = method if method in ["pairwise", "template"]: if local_align_method is not None: method_label += "_{}".format(local_align_method) else: err_msg = ("Requested {} method ".format(method) + "but local_align_method is undefined") raise ValueError(err_msg) if method == "intra_subject": method_label += "_ridge_cv" if method == "smoothing": method_label += "_{:0>2d}".format(smoothing_fwhm) if method in ["srm", "piecewise_srm", "mvica"]: if srm_components: method_label += "_{}".format(srm_components) if srm_atlas is not None: method_label += "_{}".format(atlas_name) else: err_msg = ("Requested SRM but srm_components is zero. Please " + "request a non-zero number of components.") raise ValueError(err_msg) if method == "HA": method_label += "rad_{}_sparse_{}".format(ha_radius, ha_sparse_radius) return method_label
def reduce_file_select_results(results): """Reduces a list of file select results to a single determining result.""" for (result, test), _rule in reversed(results): if result is not None: return result, test return None, None
def index(mask: int) -> int: """: indexw { mask n k n-k+1 index } 1 dup 2dup to n to k to n-k+1 to index 1 0 mask begin dup while 1 and if dup index + to index swap n * k / swap n * k 1 + dup to k / else over + swap n * n-k+1 dup 1 + to n-k+1 / swap then n 1 + to n mask 2/ dup to mask repeat 2drop index ; """ n, k, nmkp1, nCk, nCkm1, index = 1, 1, 1, 0, 1, 1 while mask: if mask & 1: index += nCk nCkm1 *= n nCkm1 //= k k += 1 nCk *= n nCk //= k else: nCk += nCkm1 nCkm1 *= n nCkm1 //= nmkp1 nmkp1 += 1 mask >>= 1 n += 1 return index
def booleanize(value): """ Convert a string to a boolean. :raises: ValueError if unable to convert. :param str value: String to convert. :return: True if value in lowercase match yes, true, or False if no or false. :rtype: bool """ valuemap = { 'true': True, 'yes': True, 'false': False, 'no': False, } casted = valuemap.get(value.lower(), None) if casted is None: raise ValueError(str(value)) return casted
def replace_all(text, replace_dict): """ Replace multiple strings in a text. .. note:: Replacements are made successively, without any warranty on the order \ in which they are made. :param text: Text to replace in. :param replace_dict: Dictionary mapping strings to replace with their \ substitution. :returns: Text after replacements. >>> replace_all("foo bar foo thing", {"foo": "oof", "bar": "rab"}) 'oof rab oof thing' """ for i, j in replace_dict.items(): text = text.replace(i, j) return text
def iou_score(bbox1, bbox2): """Jaccard index or Intersection over Union. https://en.wikipedia.org/wiki/Jaccard_index """ assert len(bbox1) == 4 assert len(bbox2) == 4 s1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]) s2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]) intersection_rows = max(min((bbox2[2], bbox1[2])) - max((bbox2[0], bbox1[0])), 0) intersection_cols = max(min((bbox2[3], bbox1[3])) - max((bbox2[1], bbox1[1])), 0) intersection = intersection_rows * intersection_cols union = s1 + s2 - intersection return 1.0 * intersection / union
def make_required_test_packages(): """Prepare extra packages needed for 'python setup.py test'.""" return [ 'apache-airflow>=1.10,<2', 'docker>=4.0.0,<5.0.0', # LINT.IfChange 'kfp>=0.1.30,<0.2; python_version >= "3.0"', # LINT.ThenChange( # testing/github/common.sh, # testing/github/ubuntu/image/image.sh, # testing/kubeflow/common.sh # ) 'pytest>=5.0.0,<6.0.0', 'tensorflow>=1.14,<2', 'tzlocal>=1.5,<2.0', ]
def flatten(elements): """Flatten a collection of collections""" if isinstance(elements, list): return [item for element in elements for item in flatten(element)] elif isinstance(elements, dict): return flatten(list(elements.values())) else: return [elements]
def flatten_list(tier_list): """ Given a list of lists, this returns a flat list of all items. :params list tier_list: A 2D list. :returns: A flat list of all items. """ if tier_list is None: return [] flat_list = [item for sublist in tier_list for item in sublist] return flat_list
def oppositeColour(colour): """ Returns the opposite colour to that given """ if colour == "RED": return "BLUE" elif colour == "BLUE": return "RED" else: return "NONE"
def most_recent_unbanned(booru_results): """ Get the first booru post from an artist that isn't banned. That's because posts from banned artists don't have the id field. :param booru_results: List<dict>: A list of booru results. :return: dict: The first booru result that is not banned. """ for result in booru_results: if "id" in result: return result return None
def set_abort_status(status): """Terminate MPI execution environment at Python exit. Terminate MPI execution environment at Python exit by calling ``MPI.COMM_WORLD.Abort(status)``. This function should be called within an ``except`` block. Afterwards, exceptions should be re-raised. """ import sys status = (status if isinstance(status, int) else 0 if status is None else 1) pkg = __package__ or __name__.rpartition('.')[0] mpi = sys.modules.get(pkg + '.MPI') if mpi is not None and status: # pylint: disable=protected-access mpi._set_abort_status(status) return sys.exc_info()
def remote_resources_data_to_metax(resources): """ Converts external resources from qvain light schema to metax schema. Arguments: data {object} -- External resources. Returns: object -- Object containing external resources array that complies with Metax schema. """ metax_remote_resources = [] for resource in resources: metax_remote_resources_object = {} metax_remote_resources_object["use_category"] = {} metax_remote_resources_object["access_url"] = {} metax_remote_resources_object["download_url"] = {} metax_remote_resources_object["title"] = resource["title"] metax_remote_resources_object["access_url"]["identifier"] = resource["accessUrl"] metax_remote_resources_object["download_url"]["identifier"] = resource["downloadUrl"] metax_remote_resources_object["use_category"]["identifier"] = resource["useCategory"]["value"] metax_remote_resources.append(metax_remote_resources_object) return metax_remote_resources
def vector_multiply(vector_in, scalar): """ Multiplies the vector with a scalar value. This operation is also called *vector scaling*. :param vector_in: vector :type vector_in: list, tuple :param scalar: scalar value :type scalar: int, float :return: updated vector :rtype: tuple """ scaled_vector = [v * scalar for v in vector_in] return tuple(scaled_vector)
def fans(shape): """Returns fan_in and fan_out according to the shape. Assumes the len of shape is 2 or 4. """ assert(len(shape) == 2 or len(shape) == 4) if len(shape) == 2: return shape[0], shape[1] else: S = shape[0] * shape[1] return S * shape[2], S * shape[3]
def split_command(command): """ :param command: the command introduced by the user :return: the command word and command parameters """ tokens = command.split(maxsplit=1) # splits the input in the command word and command parameters command_word, command_params = None, None command_word = tokens[0] if len(tokens) == 2: # the parameters might not exist command_params = tokens[1] command_word = command_word.strip().casefold() # trying to convert the command_word to small cases and to remove any additional spaces return command_word, command_params
def stretch_array(data, newlength): """Stretch an array to a new length.""" oldlength = len(data) assert oldlength <= newlength, "Can't shrink in stretch function" factor = float(newlength) / float(oldlength) result = bytearray(newlength) i = 0 offset = 0.0 for byte in data: offset += factor while offset >= 1.0: result[i] = byte i += 1 offset -= 1.0 return result
def group_codon_usages_by_ids(codon_usages, ids, field='GeneId'): """Sorts codon usages into 2 lists: non-matching and matching ids.""" result = [[],[]] for c in codon_usages: result[getattr(c, field) in ids].append(c) return result
def search_codetree_isleftsub(tword,codetree): """ Stored in codetree having any value terminal node (i.e., reaching terminal node) """ pos = 0 while True: s = tword[pos] if s not in codetree: return 0 elif pos==len(tword)-1: return 1 else: pos += 1 codetree = codetree[s][1]