content
stringlengths
42
6.51k
def integer_as_number_of_bits(size): """Returns the minimum number of bits needed to fit given positive integer. """ if size == 0: return 0 else: return size.bit_length()
def _key(node, terminal): """ Combine *node* and *terminal* to a key into the results set for the current template. Returns a string unique within the template that can be used in JSON. """ return ":".join((str(node), str(terminal)))
def str2bool(v): """ String to Boolean """ # because argparse does not support to parse "true, False" as python # boolean directly return v.lower() in ("true", "t", "1")
def Ncv(c, H, D): """ Vertical uplift factor for sand """ if c == 0: return 0 return min(2 * H / D, 10)
def para_size_greater_than_n(para_list, n = 0): """Given a list of texts, prints the number of characters of each paragraph. Keyword arguments: text_list -- a list of texts n -- return paragraphs of size > n characters """ #for para in para_list: #print(len(para)) if n > 0: return [para for para in para_list if len(para)>n]
def get_data_list_of_doubly_linked_list(sll): """Converts a DoublyLinkedList object into a Python list.""" rval = [] while sll: rval.append(sll.data) sll = sll.next return rval
def distinct_pos(s): """Returns the amount of distinct positions that are present in the goal""" seen = set() pos = 0 for atom in s: key = atom.val() if key not in seen: pos += 1 seen.add(key) return pos
def _pass_through(req_line, permit_urls=False): """Identify unparsable lines.""" if permit_urls: return (req_line.startswith('http://tarballs.openstack.org/') or req_line.startswith('-f')) else: return (req_line.startswith('http://tarballs.openstack.org/') or req_line.startswith('-e') or req_line.startswith('-f'))
def is_true(s): """Case insensitive string parsing helper. Return True for true (case insensitive matching), False otherwise.""" return s.lower() == 'true'
def decode(s): """Decode string in format k[str] to str * k >>> decode('3[a2[c]]') 'accaccacc' """ stack = [] for c in s: if c is ']': seg, num = '', '' while stack: cur = stack.pop() if cur is '[': break seg = cur + seg while stack and stack[-1].isdigit(): num = stack.pop() + num stack.append(seg * int(num)) else: stack.append(c) return ''.join(stack)
def countInversions(state): """ Count the number of inversions in the given state :param state: Iterable (list of strings or ints expected, 'b' element accounted for) :return: number of inversions in the given state """ n = len(state) invs = 0 # Count inversions for i in range(n - 1): for j in range(i + 1, n): # Ignore blank tile 'b' if 'b' not in (state[i], state[j]): if state[i] > state[j]: invs += 1 return invs
def prepare_entity_html(entity_groupings, binary=True): """Creates a formatted HTML string, underlining detected entities.""" string_groupings = [] for entity, flag in entity_groupings: if flag: if binary: string_groupings.append( f'<u style="background-color:DodgerBlue;color:white;">{entity}</u>' ) else: string_groupings.append( f'<u style="background-color:DodgerBlue;color:white;">{entity}</u> <span style="background-color:LightGray;">({flag})</span>' ) else: string_groupings.append(entity) formatted_text = ' '.join(string_groupings) return formatted_text
def metric_passes(expected: float, got: float, tolerance: int) -> bool: """ Determine if a test result meets a particular threshold. Compares the parsed value with the requested baseline for the same test and return a boolean of whether or not it is greater than expected. If a tolerance is passed, any value that is N-percent or higher below the requested tolerance of N will still be marked as passing. Parameters ---------- expected : float A ``float`` of the baseline value to compare against. got : float A ``float`` of the test result that was parsed. tolerance : int An ``int`` of the percentage below the threshold to still mark as passing. Returns ------- bool Returns a ``boolean`` which evaluates to `True` when the parsed value is greater than the baseline and `False` otherwise. """ if tolerance > 0: # If user passes a 5% tolerance, multiply the expected value by 5% less # than current value to get the tolerance. expected = (1 - tolerance / 100) * expected if got > expected: return True else: return False
def in2s(nms # list of indices ): """ Convert the list into a list of couples of two elements. :: >>> in2s([1, 2, 3, 4])==[(1, 2), (3, 4)] True """ return list(zip(nms[::2], nms[1::2]))
def calcShiftAUC(auc, y0, tF): """ Calculate the area under the curve minus the area below the starting OD """ return auc - (y0 * tF)
def unqualify(name: str) -> str: """Return an Unqualified name given a Qualified module/package Name""" return name.rsplit(".", maxsplit=1)[-1]
def get_class_type(context): """extract the Class related to the type, in C++ format Args: context (dict): context (related to an interface description) Returns: str: The class extracted from the type Examples: >>> context = {name: "misc", type={std_msgs::String}, desc="an interface"} >>> get_class_type(context) String """ return context['type'].split("::")[1]
def secondsToMMSS(secs): """Convert number of seconds to the string ``mm:ss``. Note: If the number of minutes is greater than 100, it will be displayed as such. Args: secs (int): Number of seconds. Returns str: String in the format of ``mm:ss``. """ secs = int(secs) minutes, seconds = divmod(secs, 60) return '{:02d}:{:02d}'.format(minutes, seconds)
def myeval(e, g=None, l=None): """ Like `eval` but returns only integers and floats """ r = eval(e, g, l) if type(r) in [int, float]: return r raise ValueError('r=%r' % (r))
def check_pid(pid): """ Check that a pid is of type str. Pids are generated as uuid4, and this check is done to make sure the programmer has converted it to a str before attempting to use it with the DataONE client. :param pid: The pid that is being checked :type pid: str, int :return: Returns the pid as a str, or just the pid if it was already a str :rtype: str """ if not isinstance(pid, str): return str(pid) else: return pid
def _get_unique_smoothie_responses(responses): """ Find the number of truly unique responses from the smoothie, ignoring - Responses that are only different because of interjected \r\n - Responses that are only different by \r or \n replacing a single char Both of these errors are results of race conditions between smoothie terminal echo mode and responses, and do not indicate serial failures. """ uniques = list(set(responses)) # Eliminate exact repetitions # eliminate "uniques" that are really just the second \r\n racing # with the smoothie's return values true_uniques = [uniques[0]] for unique in uniques[1:]: if len(unique) != len(uniques[0]): true_uniques.append(unique) continue for a, b in zip(uniques[0], unique): if a in '\r\n': continue elif b in '\r\n': continue elif a != b: true_uniques.append(unique) break return true_uniques
def is_filelike_object(fobj): """ Check if an object is file-like in that it has a read method. :param fobj: the possible filelike-object. :returns: True if the object is filelike. """ return hasattr(fobj, 'read')
def format_dict_where_to_sql( ColsSearch ): """ in: {"Col1":1, "Col2":2} out: ( "Col1 = %s AND Col2 = %s", [1, 2] ) """ sqls = [] values = [] for col, value in ColsSearch.items(): sqls.append( "`{}` = %s".format(col) ) values.append( value ) sql = " AND ".join(sqls) return (sql, values)
def trim(data): """removes spaces from a list""" for i in range(len(data)): data[i] = data[i].replace(' ', '') return data
def check_direction(lch, hs, f_dir): """ Make sure that the path between the term and the lowest common head is in a certain direction :param lch: the lowest common head :param hs: the path from the lowest common head to the term :param f_dir: function of direction :return: """ return any(modifier not in f_dir(head) for head, modifier in zip([lch] + hs[:-1], hs))
def genotype_from_likelyhood_index(p,n,index): """ figuring out the allele number corresponding to likelihood position ploidy P and N alternate alleles https://samtools.github.io/hts-specs/VCFv4.3.pdf :param p: the ploidy as int :param n: alt alleles as int :param index: :return: list with genotype numbers as strings (it's how pyvcf has it) """ def recursive_order(_p, _n, alleles, suffix = []): for a in range(_n): if _p == 1: alleles.append([str(a)]+suffix) elif _p > 1: recursive_order(_p-1, a+1, alleles, [str(a)]+suffix) alleles_list = [] recursive_order(p,n,alleles_list) return alleles_list[index]
def tf_score(word, sentence): """ Formula: Number of times term w appears in a document) / (Total number of terms in the document). """ word_frequency_in_sentence = 0 len_sentence = len(sentence) for word_in_sentence in sentence.split(): if word == word_in_sentence: word_frequency_in_sentence = word_frequency_in_sentence + 1 tf = word_frequency_in_sentence / len_sentence return tf
def bitmask(n): """ Create a bitmask of `n` ones. """ return (1 << n) - 1
def parse_cnt(feature): """ returns dict 'id', 'category' - required keys 'dist_meters' - distance from point in search """ res = { 'id' : feature['id'], 'category' : 'adm_country', 'cnt_name' : feature['properties']['name'], 'admin_center_name' : feature['properties']['admin_center'], 'admin_center_id' : feature['properties']['admin_center_id'], 'dist_meters' : feature['properties']['dist_meters'] } try: res['geometry'] = feature['geometry'] except: pass return res
def g(i: int, k: int) -> float: """ Stage cost function for $\delta_N^i$ and $\delta_M^k$ :return: cost of a transition """ B = [0, 1] A = [14, 8, 14, 2, 17, 8, 1, 13, 3, 14, 11, 14, 19, 2, 19, 11, 15, 10, 16, 6, 4, 8, 19, 12, 3, 14, 17, 8, 16, 11, 12, 11] return A[i - 1] + B[k - 1]
def analyze_arguments(arguments): """ Checks given arguments and passes correct ones to the compilation script """ accepted_arg_prefix = ['--use_openbabel', '--use_cython', '--cython_compiler', '--overwrite'] def _split(arg): pos = arg.find('=') prefix = arg[:pos] suffix = arg[pos+1:] return (None, None) if prefix not in accepted_arg_prefix else (prefix, suffix) # Default compiler arguments use_openbabel = False use_cython = False cython_compiler = 'msvc' overwrite = False if len(arguments) != 0: # Arguments are given for entry in arguments: data = _split(entry) if data[0] == '--use_cython': use_cython = data[1] elif data[0] == '--cython_compiler': cython_compiler = data[1] elif data[0] == '--use_openbabel': use_openbabel = data[1] elif data[0] == '--overwrite': overwrite = data[1] return use_openbabel, use_cython, cython_compiler, overwrite
def mod_exp(val, exp, modulus): """Computes an exponent in a modulus. Raises val to power exp in the modulus without overflowing. Args: val (int): Value we wish to raise the power of. exp (int): Exponent. modulus (int): Modulus where computation is performed. Returns: A value raised to a power in a modulus. """ return pow(int(val), int(exp), int(modulus))
def template_data_nested_json_to_flat_json(template_data_value): """ Helper function to convert nested JSON of template data to flat JSON. """ def process_value_parameter(data, parameter): # Just process the first layer of attributes. No value is needed # from any nested attributes as they must be virtual for attr in parameter.get('attributes', []): if 'virtual' not in attr: data.append(attr['value']) # Associated Field value appears before the value of the owner node data.append(parameter['value']) def process_members(data, members): for parameter in members: if 'value' in parameter: process_value_parameter(data, parameter) else: if 'factor' in parameter: process_value_parameter(data, parameter['factor']) if 'members' in parameter: if parameter['id'].startswith('1'): # Replication for members in parameter['members']: process_members(data, members) else: process_members(data, parameter['members']) data_all_subsets = [] for nested_subset_data in template_data_value: flat_subset_data = [] process_members(flat_subset_data, nested_subset_data) data_all_subsets.append(flat_subset_data) return data_all_subsets
def cumarray_to_array(ar): """Convert cumulative array to normal array. Args: ar (List): List of numbers """ ans = [] for i, x in enumerate(ar): ans.append(x if i == 0 else (ar[i] - ar[i - 1])) return ans
def locate_registry(locate): """ :param locate: 'factory.dev1.registry' :return: 'dev1' """ paths = locate.split('.') for k in paths: if k in ['factory', 'registry']: paths.remove(k) return paths[0] if paths else []
def set_of_feeds_from_list_of_dics(connections_list_of_dics): """ """ list_of_feeds=[] for connection_dic in connections_list_of_dics: if (connection_dic['from type']=='feed'): list_of_feeds.append(connection_dic['from temp index']) return(set(list_of_feeds))
def transform_back_into_seq(true_indices): """ Given the indices of the prime, returns a sequence of patterns. """ reversed_indices = {} for key in true_indices: for val in true_indices[key]: reversed_indices[val] = key seq = list() keys = list(reversed_indices.keys()) keys.sort() for val in keys: seq.append(reversed_indices[val]) return seq
def chimera_elimination_order(m, n=None, t=None): """Provides a variable elimination order for a Chimera graph. A graph defined by `chimera_graph(m,n,t)` has treewidth :math:`max(m,n)*t`. This function outputs a variable elimination order inducing a tree decomposition of that width. Parameters ---------- m : int Number of rows in the Chimera lattice. n : int (optional, default m) Number of columns in the Chimera lattice. t : int (optional, default 4) Size of the shore within each Chimera tile. Returns ------- order : list An elimination order that induces the treewidth of chimera_graph(m,n,t). Examples -------- >>> G = dnx.chimera_elimination_order(1, 1, 4) # a single Chimera tile """ if n is None: n = m if t is None: t = 4 index_flip = m > n if index_flip: m, n = n, m def chimeraI(m0, n0, k0, l0): if index_flip: return m*2*t*n0 + 2*t*m0 + t*(1-k0) + l0 else: return n*2*t*m0 + 2*t*n0 + t*k0 + l0 order = [] for n_i in range(n): for t_i in range(t): for m_i in range(m): order.append(chimeraI(m_i, n_i, 0, t_i)) for n_i in range(n): for m_i in range(m): for t_i in range(t): order.append(chimeraI(m_i, n_i, 1, t_i)) return order
def pyro_obj_to_auto_proxy(obj): """reduce function that automatically replaces Pyro objects by a Proxy""" daemon = getattr(obj, "_pyroDaemon", None) if daemon: # only return a proxy if the object is a registered pyro object return daemon.proxyFor(obj) return obj
def format_list(list): """format a list into a space-separated string""" return " ".join(str(tok) for tok in list)
def sort_points_by_Y(list_of_points): """Given a list of 2D points (represented as Point objects), uses "sorted" with the "key" argument to create and return a list of the SAME (not copied) points sorted in decreasing order based on their Y coordinates, without modifying the original list.""" return sorted(list_of_points, key=lambda tup: tup.getY(), reverse=True)
def packItem(call_name, method, tags, summary, description, params, query_metadata, extraMetadata): """Generate a swagger specification item using all the given parameters.""" item = { 'call_name': call_name, 'method': method, 'tags': tags, 'summary': summary, 'description': description, 'params': params, 'item_properties': None, 'query': query_metadata['query'], 'original_query': query_metadata.get('original_query', query_metadata['query']) } for extraField in extraMetadata: if extraField in query_metadata: item[extraField] = query_metadata[extraField] return item
def reduce_to_summary(cals, is_dict=False): """Reduce calendar json for summarys or dict of summary:id pairs.""" if is_dict: return {x['summary']: x['id'] for x in cals} else: return map(lambda d: d['summary'], cals)
def reverse(items): #""" is for module documentation """reverse reverses the items in a list of items Parameters: items: list of items (can be a string) """ reverseList = [] itemSize = len(items) for i in range(itemSize, 0, -1): reverseList.append(items[i-1]) return reverseList
def removeNCharsFromCol(data, n, col, start): """ Removes n characters from the value of a given column for every row either from the start or the end of the string :param data: The data to process :param n: The number of characters :param col: The index of the column to alter :param start: Remove from start (True) or end (False) """ for i in range(len(data)): try: data[i][col] = data[i][col][n:] if start else data[i][col][:-n] except IndexError: pass # Empty field return data
def _nx_lookup_attrs(to_set, record, graph): """ Helper to get attrs based on set input. :param node_alias: Dict. :param graph: networkx.Graph :param to_set: List of dictionaries. :param path: List. :returns: Dict. """ attrs = {} for i, attr in enumerate(to_set): key = attr.get("key", i) value = attr.get("value", "") if not value: lookup = attr.get("value_lookup", "") if lookup: alias, lookup_key = lookup.split(".") node = record[alias] value = graph.node[node].get(lookup_key, "") attrs[key] = value return attrs
def comma_code(collection): """ Take an iterable collection and returns the collection as a string formatted using the Oxford comma :param collection: collection of values to convert to readable string """ if len(collection) == 0: return '' elif len(collection) == 1: return str(collection[0]) # list comprehension used to explicitly cast items to str str() converts # slice to list of chars, this implementation allows for a mixed list return f"{', '.join([str(item) for item in collection[:-1]])}, and {collection[-1]}"
def newton(n, error): """Newton's method with poor initial guess of n.""" x = n while abs(x ** 2 - n) > error: x = x - (x ** 2 - n) / (2 * x) return x
def stringify_location(chrom, start, stop, strand, region=None): """Convert genome location to a string, optionally prefixing with region""" if region is not None: return '{0}:{1}:{2}-{3}:{4}'.format(region, chrom, start, stop, strand) else: return '{0}:{1}-{2}:{3}'.format(chrom, start, stop, strand)
def color_code_severity_widget(widget, name): """ Utility function to colour-code Severity options """ for option, color in zip(widget, ["green", "yellow", "orange", "red"]): option[0].__setitem__("_style", "background-color:%s;" % color) option[0][0].__setitem__("_name", name) return widget
def _parse_hyphenated_string(s): """Parses a hyphenated range into a list of integers.""" # In: "2004-2007" # Out: [2004, 2005, 2006, 2007] list_of_lists = [list(range(*[int(second) + int(first) for second, first in enumerate(substring.split('-'))])) if '-' in substring else [int(substring)] for substring in s.split()] return [item for sublist in list_of_lists for item in sublist]
def islambda(func): """ Test if the function func is a lambda ("anonymous" function) """ return getattr(func, 'func_name', False) == '<lambda>'
def _qualname(obj): """Get the fully-qualified name of an object (including module).""" return obj.__module__ + '.' + obj.__qualname__
def get_hda_paths(nodes): """Finds filesystem paths for specified HDAs. """ hdas = [] for node in nodes: d = node.type().definition() if d: path = d.libraryFilePath() if path != 'Embedded': hdas.append(path) return hdas
def undo_transforms(y, transformers): """Undoes all transformations applied.""" # Note that transformers have to be undone in reversed order for transformer in reversed(transformers): if transformer.transform_y: y = transformer.untransform(y) return y
def base64encode(string, layer = 1): """ >>> base64encode('hello') 'aGVsbG8=' """ from binascii import b2a_base64 if(layer > 0): return base64encode( b2a_base64( string.encode( 'UTF-8')).decode().replace( "\n", ""), layer - 1) return string
def axis_label(label): """ Replaces None with an empty string for axis labels. """ return '' if label is None else label
def split_chunk_for_display(raw_bytes): """ Given some raw bytes, return a display string Only show the beginning and end of largish (2x CONTENT_CHUNK_SIZE) arrays. :param raw_bytes: :return: display string """ CONTENT_CHUNK_SIZE = 50 # Content repeats after chunks this big - used by echo client, too if len(raw_bytes) > 2 * CONTENT_CHUNK_SIZE: result = repr(raw_bytes[:CONTENT_CHUNK_SIZE]) + " ... " + repr(raw_bytes[-CONTENT_CHUNK_SIZE:]) else: result = repr(raw_bytes) return result
def _is_short_form(acro: str, full: str) -> bool: """ Check whether a given expansion is shorter than the acronym. :param acro: :param full: :return: """ if len(full.split()) < len(acro): return True return False
def atLeastGate(argumentValues,k): """ Method that evaluates the ATLEAST gate @ In, argumentValues, list, list of values @ In, k, float, max number of allowed events @ Out, outcome, float, calculated outcome of the gate """ if argumentValues.count(1) >= k: outcome = 1 else: outcome = 0 return outcome
def get_dict(tokens, ngram, gdict=None): """ get_dict """ token_dict = {} if gdict is not None: token_dict = gdict tlen = len(tokens) for i in range(0, tlen - ngram + 1): ngram_token = "".join(tokens[i:(i + ngram)]) if token_dict.get(ngram_token) is not None: token_dict[ngram_token] += 1 else: token_dict[ngram_token] = 1 return token_dict
def cycle_dist(x, y, n): """Find Distance between x, y by means of a n-length cycle. Example: cycle_dist(1, 23, 24) = 2 cycle_dist(5, 13, 24) = 8 cycle_dist(0.0, 2.4, 1.0) = 0.4 cycle_dist(0.0, 2.6, 1.0) = 0.4 """ dist = abs(x - y) % n if dist >= 0.5 * n: dist = n - dist return dist
def build_merge(input_names, name, concat_axis): """function which adds concat merging of input_names""" return { "class_name": "Merge", "name": name, "config": { "output_mask": None, "output_mask_type": "raw", "dot_axes": -1, "output_shape": None, "concat_axis": concat_axis, "mode": "concat", "name": name, "output_shape_type": "raw", "arguments": {}, "mode_type": "raw" }, "inbound_nodes": [ [[nm, 0, 0] for nm in input_names] ] }
def _format_servers_list_networks(networks): """Return a formatted string of a server's networks :param server: a Server.networks field :rtype: a string of formatted network addresses """ output = [] for (network, addresses) in networks.items(): if not addresses: continue addresses_csv = ', '.join(addresses) group = "%s=%s" % (network, addresses_csv) output.append(group) return '; '.join(output)
def path(index, height): """Return the path from a leaf to the root of a binary hash tree. Keyword arguments: index -- the leaf's index, in range [0, 2^height - 1] height -- the height of the binary hash tree Returns: The path from the leaf at the given index to the root of the binary tree as a list of nodes. Each node is represented as a tuple consisting of the node's layer, and the node's index within this layer. """ # Current layer layer = 0 path = [] while layer < height: path.append((layer, index)) layer += 1 index >>= 1 return path
def mean(values): """Returns the mean of the values. Args: Values: A list of values. Returns: The mean. """ return sum(values) / len(values)
def gen_explicit_map_nn_sequential(_, args_pt): """ Generate explicit_map for nn.Sequential. Args: args_pt (dict): Args for APIPt. Returns: dict, map between frames. """ args = args_pt['*args'] return {"*args": "[{}]".format(args)}
def gen_cube_vector(x, y, z, x_mult=1, y_mult=1, z_mult=1): """Generates a map of vector lengths from the center point to each coordinate x - width of matrix to generate y - height of matrix to generate z - depth of matrix to generate x_mult - value to scale x-axis by y_mult - value to scale y-axis by z_mult - value to scale z-axis by """ cX = (x - 1) / 2.0 cY = (y - 1) / 2.0 cZ = (z - 1) / 2.0 def vect(_x, _y, _z): return int(max(abs(_x - cX), abs(_y - cY), abs(_z - cZ))) return [[[vect(_x, _y, _z) for _z in range(z)] for _y in range(y)] for _x in range(x)]
def subt(intf, ints): """ overpython.subt(intf, ints) Subtract intf with ints. Raises ValueError if intf/ints is a string. """ try: return float(intf) - float(ints) except ValueError: raise ValueError("%s/%s is not a number" % (intf, ints))
def is_empty_list(l): """Check if a list only contains either empty elements or whitespace """ return all('' == s or s.isspace() for s in l)
def get_prior_scale(df, target_variance): """ This function solves for the scale parameter need for a scaled inverse chi-squard distribution, give degrees of freedom (df) and the desired variance (i.e. the mode of the distribution, is this function is intended to determine the prior over a Guassian variance). The mode of a scaled-inverse chi-squared is defined: (see Gelman, et al., Bayesian Data Analysis 2004) mode(theta) = df / (df + 2) * scale hense, if we set mode(theta) to our target, then the scale is scale = target_variance * (df + 2) / df """ return target_variance * (df + 2) / df
def left(n): """Is node n a left descendant of the root in a zero-indexed boolean tree?""" while n > 2: n = (n-1)//2 return n == 1
def _construct_resource_name(project_id, location, dataset_id, fhir_store_id, resource_id): """Constructs a resource name.""" return '/'.join(['projects', project_id, 'locations', location, 'datasets', dataset_id, 'fhirStores', fhir_store_id, 'resources', resource_id])
def usage15minTo1hTransform(usage_list): """Transforms series of water usage in 15 minutes intervals into 1 hour intervals. :param usage_list: list of numbers :return: list of numbers(usage hourly) """ usage = [] for i in range(0, len(usage_list) - 1, 4): h = float(usage_list[i]) + float(usage_list[i + 1]) + float(usage_list[i + 2]) + float(usage_list[i + 3]) usage.append(round(h, 2)) return usage
def get_plan_quality(plan_code): """Define the quality of plan in function of its code. Args: plan_code (str): The plan's code like "bcmaea15_3". Return: quality (str): The plan's quality. """ if plan_code[-2:] == "_2": quality = "Medium" elif plan_code[-2:] == "_3": quality = "High" else: quality = "Normal" return quality
def is_ascii(s): """ checks if a string is ascii """ try: s.encode("ascii") return True except UnicodeEncodeError: return False
def make_url_given_id(expid): """ Get url of JSON file for an experiment, give it's ID number :param expid: int with experiment ID number """ return "https://neuroinformatics.nl/HBP/allen-connectivity-viewer/json/streamlines_{}.json.gz".format( expid )
def bucketFeatByCount(featCount): """Bucket the counts by powers of two.""" for i in range(11): size = 2 ** i if featCount <= size: return size return -1
def code_span(text: str) -> str: """Returns the text surrounded by <code> tags. :param text: The text that should appear as a code span """ return f'<code>{text}</code>'
def make_str_from_column(board, column_index): """ (list of list of str, int) -> str Return the characters from the column of the board with index column_index as a single string. >>> make_str_from_column([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 1) 'NS' """ str_col='' for strList in board: str_col += strList[column_index] return str_col
def _check_shapes(in_shape, out_shape): """ Check that valid shapes have been supplied by the caller. """ try: iter(in_shape) iter(out_shape) except: raise TypeError("Shape values must be iterables.") if len(in_shape) not in (2, 3, 4): raise ValueError("Shapes must be of length 2, 3 or 4 (c, x, [y], [z]).") # must be same dimensionality if len(in_shape) != len(out_shape): raise ValueError("Input and output shapes must be of the same number of dimensions.") # must all be > 0 if any(map(lambda x : x < 1, in_shape)) or any(map(lambda x : x < 1, out_shape)): raise ValueError("Input and output shapes must consist of entries > 0") return True
def quick_sort(array, low=0, high=None): """ Divide and conquer sorting algorithm that is in place and not stable. O(n log n) time and O(log n) space in the average case """ def partition(low, high): # the first element will be the pivot pivot = low # swap elements left and right, based on pivot's value left = pivot + 1 right = high while right > left: # find an element > pivot while left < len(array) - 1 and array[left] < array[pivot]: left += 1 # find an element < pivot while right > 0 and array[right] > array[pivot]: right -= 1 # swap if left < len(array) - 1 and right > 0: array[left], array[right] = array[right], array[left] # print(f"Just swapped", array) # left, right = right, left break # print(f"Left is {left} and right is {right}" ) # move pivot into the sorted position if left < len(array) and right > 0: array[pivot], array[left] = array[left], array[pivot] # return the new location of the pivot # print(f"Returning {left}, array is {array}") return left # init the subrange over the whole array if high is None: high = len(array) - 1 # base case case: zero or 1 elements: if high - low < 2: return array # divide & conquer: partition elements around the pivot pivot = partition(low, high) print(f"pivot is now {pivot}") # continue: partition both sides of the (sorted) pivot quick_sort(array, low, pivot - 1) quick_sort(array, pivot, high) return array
def clean(items): """trim trailing blanks in items items is a dictionary of numeric or string value keys and associated values""" keys = list(items.keys()) if isinstance(keys[0], str): # types must be homogeneous return dict(list(zip([item.rstrip() for item in keys], list(items.values())))) else: return items
def rekey(batch, key_map): """Rekeys a batch using key_map. key_map specifies new_key: old_key pairs. Args: batch: a dictionary to modify. key_map: a dictionary that new keys to old keys. So if you want your new dataset to have keys 'inputs' and 'targets', where the old task just had 'targets, pass {'inputs': 'targets', 'targets': 'targets'}. Returns: a new batch dict. """ return {key: batch[value] for key, value in key_map.items()}
def listify(maybe_list): """Make maybe list a list if it is not. :param maybe_list: A variable that may be a list. :returns: A list.""" return [maybe_list] if not isinstance(maybe_list, list) else maybe_list
def first(iterator, default=None): """Return first member of an `iterator` Example: >>> def it(): ... yield 1 ... yield 2 ... yield 3 ... >>> first(it()) 1 """ return next(iterator, default)
def getUserRoles(username, password, authProfile="", timeout=60000): """Fetches the roles for a user from the Gateway. This may not be the currently logged in user. Requires the password for that user. If the authentication profile name is omitted, then the current project's default authentication profile is used. Args: username (str): The username to fetch roles for. password (str): The password for the user. authProfile (str): The name of the authentication profile to run against. Optional. Leaving this out will use the project's default profile. timeout (int): Timeout for client-to-gateway communication. Optional. (default: 60,000ms) Returns: tuple[str]: A list of the roles that this user has, if the user authenticates successfully. Otherwise, returns None. """ print(username, password, authProfile, timeout) return "Administrator", "Developer"
def omega(delta_lambda): """Calculate the Buchdahl chromatic coordinate.""" return delta_lambda/(1 + 2.5*delta_lambda)
def flatten_json(json): """ Flatten nested json to return a dict without nested values. Lists without nested values will be ignored, and lists of dicts will only return the first key value pair for each key. Useful for passing nested json to validation methods. """ out = {} def flatten(x, name=''): if type(x) is dict: for k, v in x.items(): flatten(v, k) elif type(x) is list: for a in x: flatten(a) elif name != '' and name not in out: out[name] = x flatten(json) return out
def genwhctrs(anchor): """Return width, height, x center, and y center for an anchor (window). """ base_w = anchor[2] - anchor[0] + 1 # 15 + 1 base_h = anchor[3] - anchor[1] + 1 x_ctr = anchor[0] + 0.5 * (base_w - 1) y_ctr = anchor[1] + 0.5 * (base_h - 1) return base_w, base_h, x_ctr, y_ctr
def divides(i, j): """True if j divides i""" if j is 0: return False elif i % j: return False else: return True
def predictors_validate(predictors, data=None): """Validates the predictors and ensures that they are type list(str) Optionally checks that the predictors are columns in the data set. Only performs this check if the data parameter is not None Parameters ---------- predictors: list(str) or str the predictor(s) to validate data : pd.DataFrame or None, optional the data set to validate the predictors are in Returns ------- list(str) validated predictors Raises ------ ValueError if a predictor is named 'all' or 'none' if a predictor is not a column in the data set Examples -------- >>> predictors_validate('famhistory') ['famhistory'] >>> predictors_validate(['famhistory', 'marker']) ['famhistory', 'marker'] >>> predictors_validate('all') Traceback (most recent call last): ... ValueError: predictor cannot be named 'all' or 'none' """ if isinstance(predictors, str): # single predictor predictors = [predictors] #convert to list #cant't use 'all' or 'none' columns as predictors for predictor in predictors: if predictor in ['all', 'none']: raise ValueError("predictor cannot be named 'all' or 'none'") #check that predictors are columns in the data if data is not None: for predictor in predictors: if predictor not in data.columns: raise ValueError("predictor must be a column in the dataframe") else: pass # skip check return predictors
def topk(l, k, key=lambda x: x): """Returns a sublist with the top k elements from a givenlist. Accepts key""" idx, _ = zip(*sorted(enumerate(l), key = lambda x: key(x[1]), reverse=True)) return [l[i] for i in idx[0:k]]
def fuzzer_and_benchmark_to_key(fuzzer: str, benchmark: str) -> str: """Returns the key representing |fuzzer| and |benchmark|.""" return fuzzer + ' ' + benchmark
def get_david_education_variable(x): """ Code education the way David did, for purpose of replicating his results. """ #gen byte educat = 1 if v00edcv==0 | v00edcv==1 #replace educat = 2 if v00edcv==2 #replace educat = 3 if v00edcv>=3 & v00edcv<=5 #label define educat 1 "<=HS" 2 "Some college" 3 "College grad" mapping = {'0: Less than high school graduate':'1:<=HS', '1: High school graduate':'1:<=HS', '2: Some college':'2:Some college', '3: College graduate':"3:College grad", '4: Some graduate school':'3:College grad', '5: Graduate degree':"3:College grad"} return mapping[x]
def msec2time(msec): """Convert milliseconds to human readable time strings""" if msec >= 0: hours = int(msec / 3600000) remainder = msec % 3600000 minutes = int(remainder / 60000) remainder = remainder % 60000 secs = remainder / 1000 return "%02d:%02d:%05.2f" % (hours, minutes, secs) else: return "NONE"
def convert_time(time): """ This function converts time in a form of integers to a hh:mm:ss format Returns an array | Ex: 01:32:34 """ time_list = [60*60,60,1] output_list = [] for i in time_list: amount, remainder = divmod(time, i) if len(str(amount)) < 2: output_list.append(f'0{str(amount)}') else: output_list.append(str(amount)) time = remainder return ':'.join(output_list)
def polariser_trans(cells1, cells2): """Transmission of polariser cell.""" try: result = cells2[2] / cells1[2] * 100 except ZeroDivisionError: result = float('Inf') return result
def access(tree, seq): """Return tree[seq[0][seq[1]][...] with the side-effect of creating all nodes in the path.""" if len(seq) == 0: return None if len(seq) == 1: return tree[seq[0]] return access(tree[seq[0]], seq[1:])
def find_cep(cep, re_format): """ :param cep: number of the CEP. :param re_format: CEP return format (json|xml|PIPED) :info: https://viacep.com.br/ """ path = f"{cep}/{re_format}" return path