content
stringlengths
42
6.51k
def toVTKString(text): """This method is deprecated. It converted unicode string into VTK string, but since now VTK assumes that all strings are in UTF-8 and all strings in Slicer are UTF-8, too, conversion is no longer necessary. The method is only kept for backward compatibility and will be removed in the future. """ import logging logging.warning("toVTKString is deprecated! Conversion is no longer necessary.") return text
def get_reducer(article: str, num_reducers: int) -> int: """We map an article title to a reducer. This is done via hashing.""" return (sum([ord(s) for s in article]) + len(article)) % num_reducers
def name_to_filename(name: str) -> str: """Name to Filename.""" alnum_name = "".join([c for c in name.strip() if c.isalnum() or c == " "]) return alnum_name.replace(" ", "_")
def CreateMatrix (rows: list): """ Create a matrix. """ assert len (rows) > 0, "matrix must not be empty" rl = len (rows[0]) assert rl > 0, "matrix must not be empty" for i in range (1, len (rows)): assert rl == len (rows[i]), "all rows must have the same size" rowSet = [] for r in rows: rowSet.append (tuple(r)) return tuple(rowSet)
def __get_version(versions, build_version, is_version): """Evaluates entered build_version and returns if supported Args: version: A dictionary containing a list of supported build version. build_version: The user entered build_version. is_version: Boolean: Whether the provided build_version is a version or a label. Returns: int or string depending on return_label Raises error in the event of an unsupported build type """ if is_version: # Check if supplied build matches a version if any(v['version'] == int(build_version) for v in versions): return build_version raise ValueError('Build version not supported') else: # Check if supplied build version matches a label build_version = next((v['version'] for v in versions if (v['label'] == str(build_version))), None) if build_version: return build_version raise ValueError('Build label not supported')
def getVerticesFaces(faceVertices): """given a list with all the vertices indices for each face returns a list for each vertex and wich faces are conneted to it Args: faceVertices (list): [description] Returns: list: list of faces connected to the vertices """ result = list() # cast them into a dict in order to not worry about the initial list size vertexFaces = dict() for f, fVtx in enumerate(faceVertices): for vtx in fVtx: vertexFaces.setdefault(vtx, set()).add(f) # convert dict to list for faster post iteration indices = list(vertexFaces.keys()) indices.sort() for idx in indices: result.append(vertexFaces[idx]) return result
def isPalindrome(x): """ :type x: int :rtype: bool """ if x >= 0: y = str(x) l = len(y) for i in range(l // 2): if y[i] == y[l - i - 1]: continue else: return False else: return True else: return False
def map_dict(fn, d): """takes a dictionary and applies the function to every element""" return type(d)(map(lambda kv: (kv[0], fn(kv[1])), d.items()))
def is_image(path): """OpenCV supported image formats""" extensions = [ 'bmp', 'dib', 'jpeg', 'jpg', 'jpe', 'jp2', 'png', 'webp' 'pbm', 'pgm', 'ppm', 'pxm', 'pnm', 'pfm', 'sr', 'ras', 'tiff', 'tif', 'exr', 'hdr', 'pic', ] return any(path.lower().endswith('.' + i) for i in extensions)
def _flatten(list_of_lists): """ Flattens the provided list of lists. """ return [val for sublist in list_of_lists for val in sublist]
def convert(f, x: float) -> float: """Returns the value of an expression given x.""" if type(f) is str: return eval(f) return f(x)
def next_cma(new_value, list_len, old_cma): """ Calculate next cumulative moving average 'list_len' is the length of the currently being averaged list before adding the new value """ return (new_value + list_len * old_cma) / (list_len + 1)
def freq_by_location(d): """ Takes in a dictionary of novel objects mapped to relative frequencies. Returns a dictionary with frequencies binned by publication location into lists list names key: - *location_UK* - published in the United Kingdom - *location_US* - published in the US - *location_other* - published somewhere other than the US and England :param d: dictionary :return: dictionary >>> from gender_analysis import document >>> from pathlib import Path >>> from gender_analysis import common >>> from gender_analysis.analysis.gender_frequency import freq_by_location >>> novel_metadata = {'author': 'Austen, Jane', 'title': 'Persuasion', 'date': '1818', ... 'country_publication': 'United Kingdom', 'filename': 'austen_persuasion.txt', ... 'filepath': Path(common.TEST_DATA_PATH, 'sample_novels', 'texts', 'austen_persuasion.txt')} >>> austen = document.Document(novel_metadata) >>> novel_metadata2 = {'author': 'Hawthorne, Nathaniel', 'title': 'Scarlet Letter', 'date': '1900', ... 'country_publication': 'United States', 'filename':'hawthorne_scarlet.txt', ... 'filepath': Path(common.TEST_DATA_PATH, 'sample_novels', 'texts', 'hawthorne_scarlet.txt')} >>> scarlet = document.Document(novel_metadata2) >>> d = {scarlet:0.5, austen:0.3} >>> freq_by_location(d) {'United States': [0.5], 'United Kingdom': [0.3]} """ data = {} for k, v in d.items(): location = getattr(k, 'country_publication', None) if location is None: continue if location not in data: data[location] = [] data[location].append(v) return data
def _get_name_info(name_index, name_list): """Helper to get optional details about named references Returns the dereferenced name as both value and repr if the name list is defined. Otherwise returns the name index and its repr(). """ argval = name_index if name_list is not None: argval = name_list[name_index] argrepr = argval else: argrepr = repr(argval) return argval, argrepr
def article_xml_from_filename_map(filenames): """ Given a list of file names, return the article xml file name """ for file_name in filenames: if file_name.endswith(".xml"): return file_name return None
def compare_lists(llist1, llist2): """ Compares two singly linked list to determine if they are equal.""" # If list 1 is empty and list 2 is not empty. if not llist1 and llist2: # Return zero because the lists are not equal. return 0 # If list 2 is empty and list 1 is not empty. if not llist2 and llist1: # Return zero because the lists are not equal. return 0 # Set node 1 equal to the head of list 1. node1 = llist1 # Set node 2 equal to the head of list 2. node2 = llist2 # While node 1 and node 2 are truthy. while node1 and node2: # If the data in each node is not equal. if node1.data != node2.data: # Return zero because the lists are not equal. return 0 # If list 1 has a next node and list 2 does not. if node1.next and not node2.next: # Return zero because the lists are not equal. return 0 # If list 2 has a next node and list 1 does not. if node2.next and not node1.next: # Return zero because the lists are not equal. return 0 # Set node 1 equal to the next node. node1 = node1.next # Set node 2 equal to the next node. node2 = node2.next # Return 1 denoting they are equal. return 1
def check_auth(username, password): """This function is called to check if a username / password combination is valid. """ return username == 'menrva' and password == 'menrva'
def parse_date(d): """Parse date string to (yyyy, MM, dd) :param d: the date string to parse :returns: parsed date as tuple or None on error """ date_fields = d.split('-') if date_fields and len(date_fields) == 3: return (int(date_fields[0]), int(date_fields[1]), int(date_fields[2])) else: return None
def d_theta_inv(y, alpha): """ (theta')^(-1) (y) = alpha * y / (1 - |y|) Alternatives: In Baus et al 2013 b(beta) = (theta')^(-1) (beta*eta) [eq 12] In Nikolova et al 2013 b(y) = (theta')^(-1) (y) [eq 12] In Nikolova et al 2014 xi(t) = (theta')^(-1) (t) [eq 4] Baus et al 2013, table 1, Theta_2 with b(beta) given in table 2. Nikolova et al 2014, table 1, theta_2. """ assert -1 < y < 1 and alpha > 0 return alpha * y / (1 - abs(y))
def dict_list_to_bins(dict_list: list, bin_key: str, comparison_key=None) -> dict: """ A dictionary binning function which reduces a set of data to a set of bins. :param dict_list: a list of dictionaries :param bin_key: a key for binning :param comparison_key: a key for counting :return: a dictionary """ dict_bins = {} for item in dict_list: bin_val = item[bin_key] if dict_bins.get(bin_val, None): if comparison_key: dict_bins[bin_val] += float(item[comparison_key]) else: dict_bins[bin_val] += 1 else: if comparison_key: dict_bins[bin_val] = float(item[comparison_key]) else: dict_bins[bin_val] = 1 return dict_bins
def _is_string_like(obj): """ Check whether obj behaves like a string. """ try: obj + '' except (TypeError, ValueError): return False return True
def gen_data_source_filter(data_sources): """ generates a SPARQL Filter clause aimed at limiting the possible values of a ?source variable """ filter_clause = '' if len(data_sources) > 0 : filter_clause = 'FILTER ( \n' for ds in data_sources : filter_clause += ' (str(lcase(?source)) = "' + ds.lower() + '" ) || \n' k = filter_clause.rfind("|| ") filter_clause = filter_clause[:k] filter_clause += '\n) .' return filter_clause
def prep_for_jinja(images): """ Prepares svg `images` for jinja rendering Parameters ---------- images : list-of-str Returns ------- outputs : list-of-tuple """ outputs = [] for im in images: with open(im, 'r') as src: content = src.read() outputs.append((im, content)) return outputs
def chain(_input, funcs): """Execute recursive function chain on input and return it. Side Effects: Mutates input, funcs. Args: _input: Input of any data type to be passed into functions. funcs: Ordered list of funcs to be applied to input. Returns: Recusive call if any functions remaining, else the altered input. """ return chain(funcs.pop(0)(_input), funcs) if funcs else _input
def make_query(specific_table, offset): """ Generate a query to retrieve data from database. :param specific_table: Name of table to retrieve data from. :param offset: Optional offset to start from. """ query = 'select DISTINCT * from `{}`'.format(specific_table) if isinstance(offset, int): unlimited = "18446744073709551615" # workaround for MySQL requiring a limit when using offset query += " LIMIT {}, {};".format(str(offset), unlimited) return query
def make_present_participles(verbs): """Make the list of verbs into present participles E.g.: empower -> empowering drive -> driving """ res = [] for verb in verbs: parts = verb.split() if parts[0].endswith("e"): parts[0] = parts[0][:-1] + "ing" else: parts[0] = parts[0] + "ing" res.append(" ".join(parts)) return res
def switchAndTranslate(gm1, gm2, v1, v2, wrapModulo): """ Transforms [v1,v2] and returns it such that it is in the same order and has the same middle interval as [gm1, gm2] """ assert(v1 < v2) # keep the same middle of the interval if (wrapModulo): gmMiddle = float(gm1 + gm2) / 2.0 half = float(v2 - v1) / 2.0 v1 = gmMiddle - half v2 = gmMiddle + half # if gm margins are increasing and dataset bounds are decreasing # or the other way around switch them if ((gm1 - gm2) * (v1 - v2) < 0): v1, v2 = v2, v1 return [v1, v2]
def flatten_lists(*lists): """Flatten several lists into one list. Examples -------- >>> flatten_lists([0, 1, 2], [3, 4, 5]) [0, 1, 2, 3, 4, 5] Parameters ---------- lists : an arbitrary number of iterable collections The type of the collections is not limited to lists, they can also be sets, tuples, etc. Returns ------- flat_list : a flattened list """ return [x for sublist in lists for x in sublist]
def mean(my_list): """ return the mean of a list Parameters ---------- my_list : list of numbers to take average of Returns ------- average : flt The mean of the list Examples -------- >>>md.mean([1.0, 2.0, 3.0]) 2.0 """ if not isinstance(my_list, list): raise TypeError("Mean: {} is not a list".format(my_list)) if len(my_list) == 0: raise ZeroDivisionError("Mean: the input list contains no elements") average = sum(my_list) / len(my_list) return average
def data_str(v): """ Get a string representation of a data value: v itself if not list or dict or tuple len(v) otherwise Args: v (Any): value to print Returns: str: string for v """ return v if not isinstance(v, (list, dict, tuple)) else "{} items".format(len(v))
def _reconstruct_path(came_from, start, goal): """ This method is used to construct the path from start edge to goal edge """ current = goal path = [] while current != start: path.append(current) current = came_from.get(current) path.append(start) path.reverse() return path
def map_range_constrained_int(x, in_min, in_max, out_min, out_max): """Map value from one range to another - constrain input range.""" if x < in_min: x = in_min elif x > in_max: x = in_max return int( (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min )
def CombineIntervallsLarge(intervalls): """combine intervalls. Overlapping intervalls are concatenated into larger intervalls. """ if not intervalls: return [] new_intervalls = [] intervalls.sort() first_from, last_to = intervalls[0] for this_from, this_to in intervalls[1:]: if this_from > last_to: new_intervalls.append((first_from, last_to)) first_from, last_to = this_from, this_to continue if last_to < this_to: last_to = this_to new_intervalls.append((first_from, last_to)) return new_intervalls
def mergeDictsItems(final, aux): """ Adds elements from aux dictionary which are not in final dictionary to it. @param {Object.<*>} final Final dictionary. @param {Object.<*>} aux Auxiliary dictionary. @return {Object.<*>} Merged dictionary. """ for key in aux.keys(): if not final.__contains__(key): final[key] = aux[key] elif type(final[key]) == dict: final[key] = mergeDictsItems(final[key], aux[key]) return final
def has_no_e(word, x): """ Check for the presence of the given letter in given word. Return True if none, else return False. word = word to check x = letter to check for """ for char in word[:]: if char == x: return False return True
def split_scopes(scopes): """ Splits "foo, bar, baz" into ["foo", "bar", "baz"] """ return list(map(lambda s: s.strip(), scopes.split(', ')))
def recursive_update(original_dict: dict, new_dict: dict) -> dict: """Recursively update original_dict with new_dict""" for new_key, new_value in new_dict.items(): if isinstance(new_value, dict): original_dict[new_key] = recursive_update( original_dict.get(new_key, {}), new_value ) else: original_dict[new_key] = new_value return original_dict
def dmka(D, Ds): """Multi-key value assign Multi-key value assign Parameters ---------- D : dict main dict. Ds : dict sub dict """ for k, v in Ds.items(): D[k] = v return D
def red_cube_path(blue_path): """Return the corresponding red cube path matched to a blue cube path.""" start = blue_path.rfind('blue') red_path = blue_path[:start] + 'red' + blue_path[start+4:] return red_path
def DetermineType(value): """Determines the type of val, returning a "full path" string. For example: DetermineType(5) -> __builtin__.int DetermineType(Foo()) -> com.google.bar.Foo Args: value: Any value, the value is irrelevant as only the type metadata is checked Returns: Type path string. None if type cannot be determined. """ object_type = type(value) if not hasattr(object_type, '__name__'): return None type_string = getattr(object_type, '__module__', '') if type_string: type_string += '.' type_string += object_type.__name__ return type_string
def ex_obj_to_inq(objType): """ Return the ex_inquiry string corresponding to the specified objType. This can be passed to the ex_inquiry_map() function to get the number of objects of the specified objType """ entity_dictionary = { 'EX_ASSEMBLY': 'EX_INQ_ASSEMBLY', 'EX_BLOB': 'EX_INQ_BLOB', 'EX_EDGE_BLOCK': 'EX_INQ_EDGE_BLK', 'EX_FACE_BLOCK': 'EX_INQ_FACE_BLK', 'EX_ELEM_BLOCK': 'EX_INQ_ELEM_BLK', 'EX_NODE_SET': 'EX_INQ_NODE_SETS', 'EX_EDGE_SET': 'EX_INQ_EDGE_SETS', 'EX_FACE_SET': 'EX_INQ_FACE_SETS', 'EX_ELEM_SET': 'EX_INQ_ELEM_SETS', 'EX_SIDE_SET': 'EX_INQ_SIDE_SETS', 'EX_NODE_MAP': 'EX_INQ_NODES', 'EX_EDGE_MAP': 'EX_INQ_EDGE', 'EX_FACE_MAP': 'EX_INQ_FACE', 'EX_ELEM_MAP': 'EX_INQ_ELEM', } return entity_dictionary.get(objType, -1)
def _comma_separator(i, length): """A separator for an entirely comma-separated list given current item index `i` and total list length `length`. `None` if there should be no separator (last item). """ if length == 1: return None elif i != length - 1: return ", " else: return None
def pixel_scale_from_instrument(instrument): """ Returns the pixel scale from an instrument type based on real observations. These options are representative of VRO, Euclid, HST, over-sampled HST and Adaptive Optics image. Parameters ---------- instrument : str A string giving the resolution of the desired instrument (VRO | Euclid | HST | HST_Up | AO). """ if instrument in "vro": return (0.2, 0.2) elif instrument in "euclid": return (0.1, 0.1) elif instrument in "hst": return (0.05, 0.05) elif instrument in "hst_up": return (0.03, 0.03) elif instrument in "ao": return (0.01, 0.01) else: raise ValueError("An invalid instrument was entered - ", instrument)
def stop_if_mostly_diverging(errdata): """This is an example stop condition that asks Relay to quit if the error difference between consecutive samples is increasing more than half of the time. It's quite sensitive and designed for the demo, so you probably shouldn't use this is a production setting """ n_increases = sum([ abs(y) - abs(x) > 0 for x, y in zip(errdata, errdata[1:])]) if len(errdata) * 0.5 < n_increases: # most of the time, the next sample is worse than the previous sample # relay is not healthy return 0 else: # most of the time, the next sample is better than the previous sample # realy is in a healthy state return -1
def sort_by_index_with_for_loop(index, array): """ Sort the array with the given index and return a list of (<element>, <original index>, <new index>) tuples. Parameters: index: List of length n that contains interger 0 to n-1. array: List of length n. Returns: A list of length n containing (<element>, <original index>, <new index>) tuples or an empty list if n is 0. >>> sort_by_index_with_for_loop([ 0, 4, 2, 3, 1], ["zero", "four", "two", \ "three", "one"]) [('zero', 0, 0), ('one', 4, 1), ('two', 2, 2), ('three', 3, 3), \ ('four', 1, 4)] """ sorted_list = [] for i in range(0, len(array)): sorted_list.append((array[index[i]], index[i], i)) return sorted_list
def is_void(data): """Detect nulls in other types""" if data in ('', 0, []): return 'is empty' else: return 'have data'
def lande_g(L,S,J): """Calculates the lande g factor given L, S and J .. math:: g = 1 + \\frac{J(J+1) + S(S+1) - L(L+1)}{2J(J+1)} Reference: Sobel'man, I.I. Introduction to the Theory of Atomic Spectra. 1972. pp. 277 Args: L (float): L number S (float): S number J (float): J number Returns: float: lande g factor """ return 1. + (J*(J+1.) + S*(S+1.) - L*(L+1.)) / (2.*J*(J+1.))
def bbox2center(bbox): """docstring for bbox2center""" return [int((bbox[0]+bbox[2])/2), int((bbox[1]+bbox[3])/2)]
def asbool(s): """ Convert a string to its boolean value """ if s.lower() == 'true': return True elif s.lower() == 'false': return False elif s.isdigit(): return bool(int(s)) else: raise ValueError('must be integer or boolean: %r' % s)
def is_int(text: str) -> bool: """ Does the given type represent an int? """ return text == "i64"
def parse_chunks(chunks): """Parse chunks and extract information on individual streams.""" streams = [] for chunk in chunks: if chunk["tag"] == 2: # stream header chunk streams.append(dict(stream_id=chunk["stream_id"], name=chunk.get("name"), # optional type=chunk.get("type"), # optional source_id=chunk.get("source_id"), # optional created_at=chunk.get("created_at"), # optional uid=chunk.get("uid"), # optional session_id=chunk.get("session_id"), # optional hostname=chunk.get("hostname"), # optional channel_count=int(chunk["channel_count"]), channel_format=chunk["channel_format"], nominal_srate=float(chunk["nominal_srate"]))) return streams
def intWithUnit(s): """Convert string to int, allowing unit suffixes. This is used as 'type' for argparse.ArgumentParser. """ if len(s) == 0: return int(s) index = "BkMGTPE".find(s[-1]) if index >= 0: return int(float(s[:-1]) * (1 << (index * 10))) else: return int(s)
def parse_version(version): """ Convert a version string into a tuple of integers suitable for doing comparisons on. """ return tuple(int(x) for x in version.split('.'))
def encode_ignore(text): """ encode a bytes into a str with utf-8, 'ignore see also tostring. """ if not isinstance(text, str): text = str(text.encode('utf-8', 'ignore')) else: pass return text
def cmp(x, y): """ Replacement for built-in function cmp that was removed in Python 3 Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y. """ return (x > y) - (x < y)
def representable(val: int, bits: int, signed: bool = True, shift: int = 0) -> bool: """ Checks if the value is representable with the given number of bits Will return True if it is possible to encode the value in 'val' with the number of bits given in 'bits'. Additionally, it can be specified if a sign has to be encoded or if the value can be rightshifted before encoding. If encoding the value is not possible, the function will return False. Parameters: val (int): The value to be encoded. bits (int): The amount of bits available for encoding. signed (bool): Does the encoding include a sign or can we encode the unsigned value. shift (int): By which amount is the value supposed to be rightshifted before encoding. Returns: bool: Can the value be encoded with the given bits and parameters. """ if val % (1 << shift) != 0: return False val >>= shift if signed: return -2**(bits-1) <= val < 2**(bits-1) else: return 0 <= val < 2**bits
def compute_input_and_target_lengths(inputs_length, noise_density, mean_noise_span_length): """This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2466>`__ . Training parameters to avoid padding with random_spans_noise_mask. When training a model with random_spans_noise_mask, we would like to set the other training hyperparmeters in a way that avoids padding. This function helps us compute these hyperparameters. We assume that each noise span in the input is replaced by extra_tokens_per_span_inputs sentinel tokens, and each non-noise span in the targets is replaced by extra_tokens_per_span_targets sentinel tokens. This function tells us the required number of tokens in the raw example (for split_tokens()) as well as the length of the encoded targets. Note that this function assumes the inputs and targets will have EOS appended and includes that in the reported length. Args: inputs_length: an integer - desired length of the tokenized inputs sequence noise_density: a float mean_noise_span_length: a float Returns: tokens_length: length of original text in tokens targets_length: an integer - length in tokens of encoded targets sequence """ def _tokens_length_to_inputs_length_targets_length(tokens_length): num_noise_tokens = int(round(tokens_length * noise_density)) num_nonnoise_tokens = tokens_length - num_noise_tokens num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length)) # inputs contain all nonnoise tokens, sentinels for all noise spans # and one EOS token. _input_length = num_nonnoise_tokens + num_noise_spans + 1 _output_length = num_noise_tokens + num_noise_spans + 1 return _input_length, _output_length tokens_length = inputs_length while _tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0] <= inputs_length: tokens_length += 1 inputs_length, targets_length = _tokens_length_to_inputs_length_targets_length(tokens_length) # minor hack to get the targets length to be equal to inputs length # which is more likely to have been set to a nice round number. if noise_density == 0.5 and targets_length > inputs_length: tokens_length -= 1 targets_length -= 1 return tokens_length, targets_length
def create_headers(bearer_token): """Returns authorization header Args: bearer_token: Bearer token """ headers = {"Authorization": "Bearer {}".format(bearer_token)} return headers
def _is_hex_str(value, chars=40): # type: (str, int) -> bool """Check if a string is a hex-only string of exactly :param:`chars` characters length. This is useful to verify that a string contains a valid SHA, MD5 or UUID-like value. >>> _is_hex_str('0f1128046248f83dc9b9ab187e16fad0ff596128f1524d05a9a77c4ad932f10a', 64) True >>> _is_hex_str('0f1128046248f83dc9b9ab187e16fad0ff596128f1524d05a9a77c4ad932f10a', 32) False >>> _is_hex_str('0f1128046248f83dc9b9ab187e1xfad0ff596128f1524d05a9a77c4ad932f10a', 64) False >>> _is_hex_str('ef42bab1191da272f13935f78c401e3de0c11afb') True >>> _is_hex_str('ef42bab1191da272f13935f78c401e3de0c11afb'.upper()) True >>> _is_hex_str('ef42bab1191da272f13935f78c401e3de0c11afb', 64) False >>> _is_hex_str('ef42bab1191da272f13935.78c401e3de0c11afb') False """ if len(value) != chars: return False try: int(value, 16) except ValueError: return False return True
def laglongToCoord(theta: float, phi: float): """Convert lagtitude and longitude to xyz coordinate.""" from math import cos, sin, pi theta, phi = theta/180*pi, phi/180*pi return sin(theta)*cos(phi), sin(phi), cos(theta)*cos(phi)
def subcloud_status_db_model_to_dict(subcloud_status): """Convert subcloud status db model to dictionary.""" if subcloud_status: result = {"subcloud_id": subcloud_status.subcloud_id, "sync_status": subcloud_status.sync_status} else: result = {"subcloud_id": 0, "sync_status": "unknown"} return result
def count_frequency(word_list): """ Counts frequency of each word and returns a dictionary which maps the word to their frequency """ D = {} for new_word in word_list: if new_word in D: D[new_word] = D[new_word] + 1 else: D[new_word] = 1 return D
def getBorders(faces): """ Arguments: faces ([[vIdx, ...], ...]): A face representation Returns: set : A set of vertex indexes along the border of the mesh """ edgePairs = set() for face in faces: for f in range(len(face)): edgePairs.add((face[f], face[f-1])) borders = set() for ep in edgePairs: if (ep[1], ep[0]) not in edgePairs: borders.update(ep) return borders
def _tags_from_list(tags): """ Returns list of tags from tag list. Each tag in the list may be a list of comma separated tags, with empty strings ignored. """ if tags is not None: for tag in tags: return [t for t in tag.split(",") if t != ""] return []
def reorganize_data(texts): """ Reorganize data to contain tuples of a all signs combined and all trans combined :param texts: sentences in format of tuples of (sign, tran) :return: data reorganized """ data = [] for sentence in texts: signs = [] trans = [] for sign, tran in sentence: signs.append(sign) trans.append(tran) data.append((signs, trans)) return data
def get_scanner(time, height): """Returns the position of the scanner in a layer with a given depth after a specified number of picoseconds has passed""" # Use triangle wave to determine the position offset = time % ((height - 1) * 2) if offset > height - 1: position = 2 * (height - 1) - offset else: position = offset return position
def raise_skip_event(events, event_name, *event_args): """Execute all functions defined for an event of a parser. If a function returns ``False``, this function will return ``True`` meaning that an event is trying to skip the associated function. Args: events (dict): Dictionary with all events defined. event_name (str): Name of the event whose functions will be executed. *event_args: Arguments propagated to the events functions. Returns: bool: ``True`` if an event function returns ``False``, ``False`` otherwise. """ try: pre_events = events[event_name] except KeyError: return False skip = False for event in pre_events: if event(*event_args) is False: skip = True return skip
def get_thresholds(threshs_d: dict) -> tuple: """ Parameters ---------- threshs_d : dict Thresholds configs Returns ------- names : list Name for the threshold thresh_sam : int Samples threshold thresh_feat : int Features threshold """ names = [] if 'names' in threshs_d: names = threshs_d['names'] thresh_sam = 0 if 'samples' in threshs_d: thresh_sam = threshs_d['samples'] thresh_feat = 0 if 'features' in threshs_d: thresh_feat = threshs_d['features'] return names, thresh_sam, thresh_feat
def is_complex(setting_value): """ returns True if the setting is 'complex' """ return '!!' in setting_value
def countBits(value): """ Count number of bits needed to store a (positive) integer number. >>> countBits(0) 1 >>> countBits(1000) 10 >>> countBits(44100) 16 >>> countBits(18446744073709551615) 64 """ assert 0 <= value count = 1 bits = 1 while (1 << bits) <= value: count += bits value >>= bits bits <<= 1 while 2 <= value: if bits != 1: bits >>= 1 else: bits -= 1 while (1 << bits) <= value: count += bits value >>= bits return count
def strides_from_shape(ndim, shape, itemsize, layout): """Calculate strides of a contiguous array. Layout is 'C' or 'F' (Fortran).""" if ndim == 0: return () if layout == 'C': strides = list(shape[1:]) + [itemsize] for i in range(ndim - 2, -1, -1): strides[i] *= strides[i + 1] else: strides = [itemsize] + list(shape[:-1]) for i in range(1, ndim): strides[i] *= strides[i - 1] return strides
def only_given_keys(dictionary, keys): """ Outputs a dictionary with the key:values of an original dictionary, but only with items whose keys are specified as a parameter. """ res = dict(dictionary) for key in dictionary: if key not in keys: del res[key] return res
def stripExtra(name): """This function removes paranthesis from a string *Can later be implemented for other uses like removing other characters from string Args: name (string): character's name Returns: string: character's name without paranthesis """ startIndexPer=name.find('(') start = 0 if(startIndexPer!=-1): start = startIndexPer if(start==0): return name else: return name[0:start-1]
def int_nthstr(n): """ Formats an ordinal. Doesn't handle negative numbers. >>> nthstr(1) '1st' >>> nthstr(0) '0th' >>> [nthstr(x) for x in [2, 3, 4, 5, 10, 11, 12, 13, 14, 15]] ['2nd', '3rd', '4th', '5th', '10th', '11th', '12th', '13th', '14th', '15th'] >>> [nthstr(x) for x in [91, 92, 93, 94, 99, 100, 101, 102]] ['91st', '92nd', '93rd', '94th', '99th', '100th', '101st', '102nd'] >>> [nthstr(x) for x in [111, 112, 113, 114, 115]] ['111th', '112th', '113th', '114th', '115th'] """ assert n >= 0 if n % 100 in [11, 12, 13]: return '%sth' % n return {1: '%sst', 2: '%snd', 3: '%srd'}.get(n % 10, '%sth') % n
def armstrong_number(number): """ Check if number is Armstrong number """ calc = number sum_ = 0 while calc > 0: dig = calc % 10 sum_ += dig ** 3 calc //= 10 if number == sum_: return True else: return False
def bisect(sequence, value, key=None, side='left'): """ Uses binary search to find index where if given value inserted, the order of items is preserved. The collection of items is assumed to be sorted in ascending order. Args: sequence: list or tuple Collection of items ordered by searched value. value: int or float Value to be searched. key: callable or None Function to be used to get specific value from item. side: str If 'left' is used, index of the first suitable location is returned. If 'right' is used, the last such index is returned. Returns: int Index of the exact or next higher item. """ has_key = key is not None lo = 0 hi = len(sequence) if side == 'left': while lo < hi: mid = (lo + hi) // 2 if value <= (key(sequence[mid]) if has_key else sequence[mid]): hi = mid else: lo = mid + 1 elif side == 'right': while lo < hi: mid = (lo + hi) // 2 if value < (key(sequence[mid]) if has_key else sequence[mid]): hi = mid else: lo = mid + 1 else: message = "Unknown side specified! -> '%s'" % side raise ValueError(message) return lo
def isDict(val): """Returns true if the passed value is a python dictionary type, otherwise false. **Parameters:** * val - value to test **Returns:** True if the passed value is a dictionary, otherwise false.""" return isinstance(val, dict)
def unpack(cursor): """Returns data in a database cursor object as list""" return [data for data in cursor]
def format_regex_stack(regex_stack): """Format a list or tuple of regex url patterns into a single path.""" import re formatted = ''.join(regex_stack) formatted = re.sub('\([^<]*(<[^>]*>).*?\)', '\\1', formatted) formatted = formatted.replace('^$','/') formatted = formatted.replace('^','/') formatted = formatted.replace('?$','/') formatted = formatted.replace('$','/') formatted = formatted.replace('//','/') return formatted
def to_param(m): """ Converts testkit parameter format to driver (python) parameter """ value = m["data"]["value"] name = m["name"] if name == "CypherNull": return None if name == "CypherString": return str(value) if name == "CypherBool": return bool(value) if name == "CypherInt": return int(value) if name == "CypherFloat": return float(value) if name == "CypherString": return str(value) if name == "CypherBytes": return bytearray([int(byte, 16) for byte in value.split()]) if name == "CypherList": return [to_param(v) for v in value] if name == "CypherMap": return {k: to_param(value[k]) for k in value} raise Exception("Unknown param type " + name)
def pack_params(params): """ returns a tuple to be hashed, convert function into their name """ lp = [] for x in params: if callable(x): lp.append(x.__name__) else: lp.append(x) return tuple(lp)
def _strip_unbalanced_punctuation(text, is_open_char, is_close_char): """Remove unbalanced punctuation (e.g parentheses or quotes) from text. Removes each opening punctuation character for which it can't find corresponding closing character, and vice versa. It can only handle one type of punctuation (e.g. it could strip quotes or parentheses but not both). It takes functions (is_open_char, is_close_char), instead of the characters themselves, so that we can determine from nearby characters whether a straight quote is an opening or closing quote. Args: text (string): the text to fix is_open_char: a function that accepts the text and an index, and returns true if the character at that index is an opening punctuation mark. is_close_char: same as is_open_char for closing punctuation mark. Returns: The text with unmatched punctuation removed. """ # lists of unmatched opening and closing chararacters opening_chars = [] unmatched_closing_chars = [] for idx, c in enumerate(text): if is_open_char(text, idx): opening_chars.append(idx) elif is_close_char(text, idx): if opening_chars: # this matches a character we found earlier opening_chars.pop() else: # this doesn't match any opening character unmatched_closing_chars.append(idx) char_indices = [i for (i, _) in enumerate(text) if not(i in opening_chars or i in unmatched_closing_chars)] stripped_text = "".join([text[i] for i in char_indices]) return stripped_text
def funcname(func): """Get the name of a function.""" while hasattr(func, "func"): func = func.func try: return func.__name__ except Exception: return str(func)
def calc_sum(num): """returns sum of a list """ sum_num = 0 for t in num: sum_num = sum_num + t return sum_num
def findClosestMultipleAboveThreshold(num: int, threshold: int) -> int: """ Returns the number's closest multiple above the threshold """ while True: if threshold % num == 0: return threshold else: threshold += 1
def getInitial(name): """Get initial from name e.g. "Jane" >> "J. " Parameters ---------- name :str Name to retrieve initial from Returns ------- str Initialised name """ return name[0] + '. '
def get_threshold_multiplier(total_nobs, nob_limits, multiplier_values): """ Find the highest value of i such that total_nobs is greater than nob_limits[i] and return multiplier_values[i] :param total_nobs: total number of neighbour observations :param nob_limits: list containing the limiting numbers of observations in ascending order first element must be zero :param multiplier_values: list containing the multiplier values associated. :type total_nobs: integer :type nob_limits: List[integer] :type multiplier_values: List[float] :return: the multiplier value :rtype: float This routine is used by the buddy check. It's a bit niche. """ assert len(nob_limits) == len(multiplier_values), \ "length of input lists are different" assert min(multiplier_values) > 0, \ "multiplier value less than zero" assert min(nob_limits) == 0, \ "nob_limit of less than zero given" assert nob_limits[0] == 0, \ "lowest nob_limit not equal to zero" if len(nob_limits) > 1: for i in range(1, len(nob_limits)): assert nob_limits[i] > nob_limits[i - 1], \ "nob_limits not in ascending order" multiplier = -1 if total_nobs == 0: multiplier = 4.0 for i in range(0, len(nob_limits)): if total_nobs > nob_limits[i]: multiplier = multiplier_values[i] assert multiplier > 0, "output multiplier less than or equal to zero " return multiplier
def get_polygons_neighborhood(polygons): """Returns for each polygon a set of intersecting polygons.""" nb_polygons = len(polygons) neighborhoods = [set() for c in range(nb_polygons)] for ix1, p1 in enumerate(polygons): for ix2 in range(ix1 + 1, nb_polygons): p2 = polygons[ix2] if p1.intersects(p2): neighborhoods[ix1].add(ix2) neighborhoods[ix2].add(ix1) return neighborhoods
def create_array(data_list): """ Returns all texts from a given soup. :param data_list: Soup array with all headlines/conversations embedded as text. :return: Array of headlines/conversations, retrieved from the soup. """ result_array = [] for li in data_list: if li.text != "": result_array.append(str(' '.join(li.text.split()))) # Removes tabs, newlines, and gets text from HTML return result_array
def _warning_on_one_line(message, category, filename, lineno, file=None, line=None): """Formats warning messages to appear on one line.""" return '%s:%s: %s: %s\n' % (filename, lineno, category.__name__, message)
def write_ready_analogy(list_analogies, description, file): """ Print a ready analogy to a file :param list_analogies: list of list of 4 strings :param description: string describing the analogy category :param file: file to print to :return: number of printed analogies """ # Helper variables for printing format line = "{}\n{}\n{}\n{}\n\n" # Get analogy parameters num_ops = len(list_analogies) # Print to file with open(file, "a") as f: f.write("\n\n# " + description + ": " + str(num_ops) + "\n\n") for i in range(num_ops): f.write( line.format( list_analogies[i][0], list_analogies[i][1], list_analogies[i][2], list_analogies[i][3], ) ) # Return number of generated analogies return num_ops
def set_recursive(config, recursive): """ set global recursive setting in config """ config['recursive'] = recursive return True
def year_to_rating(movie_map): """Given a dictionary, returns new dictionary that maps release year to a list of IMBD ratings for each of those years. Parameter: movie_map: a dictionary that maps movieIDs to genre(s), release year, IMBD rating, female character count and male character count. Returns: year_to_rating_map: a dictionary that maps release year to a list of IMBD ratings for each of those year. """ year_to_rating_map = {} movie_id = movie_map.keys() for movie in movie_id: # Locates release year and IMBD rating for a given movie year = movie_map[movie]['release_year'] rating = movie_map[movie]['imbd_rating'] # Creates new mapping from genre to release year to empty list # if release year not in dictionary if year not in year_to_rating_map: year_to_rating_map[year] = [] # Locates IMBD rating list mapped to given release year then appends # newly calculated rating to list rating_list = year_to_rating_map[year] rating_list.append(rating) return year_to_rating_map
def error_max_retry(num_retries, err_string): """error_max_retry message""" return "Unable to retrieve artifact after {} retries: {}".format(num_retries, err_string)
def get_vcs_uri_for_branch(data, branch=None): """ @param data: rosdoc manifest data @param branch: source branch type ('devel' or 'release') """ ri_entry = None if branch: branch_data = data.get('rosinstalls', None) if branch_data: ri_entry = branch_data.get(branch, None) vcs_type = list(ri_entry.keys())[0] return ri_entry[vcs_type]['uri'] return data.get('vcs_uri', '')
def retrieve_uuid(uuid_appended_id): """Retrieves the uuid of a uuid appended id Parameters ---------- uuid_appended_id : str A uuid appended id of the form uuid|base_id. Returns ------- str The uuid portion of the appended id. """ if uuid_appended_id.find('|') < 0: return None id_split = uuid_appended_id.split('|') if len(id_split) != 2: raise ValueError('A uuid appended protocol id should be of the form uuid|base_id') return_uuid = id_split[0] return return_uuid
def lorentzian(x, x0, gamma): """complex lorentzian peak lorentzianAmp(x, x0, gamma) = lorentzian(x, x0, gamma) * conj(lorentzian(x, x0, gamma)) """ return 1 / (1 + 1j * (x - x0) / gamma)
def flatten(l): """ flatten a nested list. https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists Parameters ---------- l: array-like """ return [item for sublist in l for item in sublist]
def is_image_file(filename): """ Return true if the file is an image Parameters ---------- filename : str the name of the image file Return ------ bool : bool True if **file** is an image. """ return any(filename.lower().endswith(extension) for extension in ['.png', '.jpg', '.bmp', '.mat'])
def union(probs): """ Calculates the union of a list of probabilities [p_1, p_2, ... p_n] p = p_1 U p_2 U ... U p_n """ while len(probs)>1: if len(probs) % 2: p, probs = probs[0], probs[1:] probs[0]=probs[0]+p -probs[0]*p probs = [probs[i-1]+probs[i]-probs[i-1]*probs[i] for i in range(1, len(probs), 2)] return probs[0]