content
stringlengths
42
6.51k
def geometry_to_list(geometry): """ Converts the_geom to a dictionary containing SRID and a list of coordinate points :param geometry: the_geom in string format :return: A ditionary containing srid as a string and coordinates as a 2D list with float values """ coordinates_list = [] points = geometry.split(";") srid, coordinates = points[0], points[1] srid = srid[5:] points = coordinates[11:len(coordinates)-1].split(",") for pair in points: coord_pair = [] for number in pair.split(" "): coord_pair.append(float(number)) coordinates_list.append(coord_pair) return {"srid": int(srid), "coordinates": coordinates_list}
def sign(x): """ returns the sign of x -> -1, 0, 1 """ if x < 0: return -1 if x > 0: return 1 return 0
def normalize_whitespace(text): """Merges multiple consecutive whitespace characthers converting them to space (` `). Also strips whitespaces from start and end of the text.""" return " ".join(text.split())
def problem_from_var_dict(var_dict: dict) -> dict: """ Initialy used by the SALib framework, we keep the problem dict for all methods Parameters ---------- var_dict: dict var_dict where keys are variable names and values are the names of the categories Returns ------- problem: dict problem formulation of SALib: dict with useful keys: num_vars (number of variables), names (variables names) and bounds (bounds of uniform distribution of variables) """ problem = { "num_vars": len(var_dict.keys()), "names": sorted(list(var_dict.keys())), "bounds": [[0.0, 1.0]] * len(var_dict.keys()), } return problem
def find_page(cache, refer_bit, page, frame_num): """ check whether the cache has this page number if has the page, set the bit, return True if not, return False, which means we should replace one of page in the cache :param cache: :param refer_bit: :param page: :param frame_num: :return: """ for frame in range(frame_num): if cache[frame] == page: refer_bit[frame] = 1 return True return False
def _new_in_rhs(lhs_list, rhs_list): """Compare the lhs and rhs lists to see if the rhs contains elements not in the lhs""" added = [] lhs_codes = tuple(n.label_id for n in lhs_list) for node in rhs_list: if node.label_id not in lhs_codes: added.append(node) return added
def https_url(url): """ Make sure the url is in https """ return url.replace("http://", "https://")
def fake_train(lrate, batch_size, arch): """Optimum: lrate=0.2, batch_size=4, arch='conv'.""" f1 = (lrate - 0.2) ** 2 + (batch_size - 4) ** 2 + (0 if arch == "conv" else 10) return f1
def find_category_name(id, categories): """ Return a tuple of the category and name for a given id """ for c in categories: for i in categories[c]: if i == id: return (c, categories[c][i])
def steps(current, target, max_steps): """ Steps between two values. :param current: Current value (0.0-1.0). :param target: Target value (0.0-1.0). :param max_steps: Maximum number of steps. """ if current < 0 or current > 1.0: raise ValueError("current value %s is out of bounds (0.0-1.0)", current) if target < 0 or target > 1.0: raise ValueError("target value %s is out of bounds (0.0-1.0)", target) return int(abs((current * max_steps) - (target * max_steps)))
def repair_citation(text, citation): """Adjust article citations to match the "usual" pattern ... [3]. Args: text (string): article text citation (string): citation text Returns: string: adjusted text """ text = text.rsplit(citation, 1)[0] add_space = False if text.endswith(' '): text = text.rstrip() add_space = True if not text.endswith(' al.') and not text.endswith(' ref.') and text.endswith('.') or text.endswith(',') or text.endswith(';'): cut_off = text[-1] text = text[:-1] text += ' ' + '[' + citation + ']' + cut_off if add_space: text += ' ' else: if add_space: text += ' ' text += '[' + citation + ']' return text
def string_to_int(num): """Convert an integer or hex integer string to an int :returns: converted integer especially helpful for using ArgumentParser() """ if num.find("0x") != -1: return int(num, 16) else: return int(num)
def _CustomMachineTypeString(cpu, memory_mib): """Creates a custom machine type from the CPU and memory specs. Args: cpu: the number of cpu desired for the custom machine type memory_mib: the amount of ram desired in MiB for the custom machine type instance Returns: The custom machine type name for the 'instance create' call """ machine_type = 'db-custom-{0}-{1}'.format(cpu, memory_mib) return machine_type
def binary_search(array, low, high, element) -> int: """Binary search algorithm that returns the index of the element to look for""" if high >= low: mid = int((high + low) / 2) if array[mid] == element: return mid elif array[mid] > element: return binary_search(array, mid + 1, high, element) else: return binary_search(array, low, mid - 1, element) else: return -1
def find_closing_bracket(line: str, openbracketid: int) -> int: """Returns the index of the closing bracket for a given opening bracket. Looks for the closing bracket in string for an opening bracket that is pointed by the ``openbracketid``. Args: line (str) : a single line from the EDIF file openbracketid (int): the index of the opening bracket for which the closing bracket should be found Returns: int: index for the closing bracket or -1 if not found """ opencount = 0 finid = openbracketid for c in line[openbracketid:]: if c == '(': opencount += 1 elif c == ')': opencount -= 1 if opencount == 0: return finid finid += 1 return -1
def calc_scale1fb(xs, sum_weights): """ Given xs (in pb) and sum of gen weights, calculate scale1fb. :param xs: cross section (in pb) :type xs: float :param sum_weights: sum of gen weights :type sum_weights: float :return: scale1fb :rtype: float """ if xs <= 0: return -1 else: return (xs * 1000.) / sum_weights
def invalid_environment(expected_environment, default_environment): """invalid_environment message""" return "'{}' is not a valid target environment, defaulting to '{}'."\ .format(expected_environment, default_environment)
def is_unicode(obj): """Helper method to see if the object is a unicode string""" return type(obj) is str
def tr_oct_coord_test(x, y, z): # dist2 = 2 """Test for coordinate in truncated octahedron grid""" return ((z % 4 == 0 and x % 4 and y % 4 and (x-y) % 2) or (y % 4 == 0 and z % 4 and x % 4 and (z-x) % 2) or (x % 4 == 0 and y % 4 and z % 4 and (y-z) % 2))
def baseTenToOther(num: int, base: int, numerals: str = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"): """this function is used for converting a `10` based number to any other base! Args: num (int): your base 10 number. base (int): your wanted base for output. numerals (str, optional): numbers and alphabet which is used for converting number. Defaults to "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ". Returns: str: your base-10 inputted number converted to the base you inputted """ return ((num == 0) and numerals[0]) or ( baseTenToOther(num // base, base, numerals).lstrip(numerals[0]) + numerals[num % base])
def calculate_parameters_in_pixels( x_pixel_um, y_pixel_um, z_pixel_um, soma_diameter_um, max_cluster_size_um3, ball_xy_size_um, ball_z_size_um, ): """ Convert the command-line arguments from real (um) units to pixels :param x_pixel_um: :param y_pixel_um: :param z_pixel_um: :param soma_diameter_um: :param max_cluster_size_um3: :param ball_xy_size_um: :param ball_z_size_um: :return: """ mean_in_plane_pixel_size = 0.5 * (x_pixel_um + y_pixel_um) voxel_volume = x_pixel_um * y_pixel_um * z_pixel_um soma_diameter = int(round(soma_diameter_um / mean_in_plane_pixel_size)) max_cluster_size = int(round(max_cluster_size_um3 / voxel_volume)) ball_xy_size = int(round(ball_xy_size_um / mean_in_plane_pixel_size)) ball_z_size = int(round(ball_z_size_um / z_pixel_um)) return soma_diameter, max_cluster_size, ball_xy_size, ball_z_size
def unserialize_dimensions(dims): """ >>> unserialize_dimensions("") {} >>> unserialize_dimensions("time=foo,elevation=100m") {'elevation': '100m', 'time', 'foo'} """ if not dims: return {} return dict(kv.split('=', 1) for kv in dims.split(','))
def get_normal_points(cx, cy, cos_t, sin_t, length): """ For a line passing through (*cx*, *cy*) and having a angle *t*, return locations of the two points located along its perpendicular line at the distance of *length*. """ if length == 0.: return cx, cy, cx, cy cos_t1, sin_t1 = sin_t, -cos_t cos_t2, sin_t2 = -sin_t, cos_t x1, y1 = length*cos_t1 + cx, length*sin_t1 + cy x2, y2 = length*cos_t2 + cx, length*sin_t2 + cy return x1, y1, x2, y2
def obj_is_multiple(obj_multiple_instances: str) -> bool: """ maps the xml value of an Object's "MultipleInstances" to a boolean, that represents if the object can be instanced. this is useful to optimize memory usage of the generated code :param obj_multiple_instances: :return: """ return True if obj_multiple_instances.upper() == "Multiple".upper() else False
def strip_ext(filename): """ Remove the extension from a filename. """ return filename.rsplit('.', 1)[0]
def max_sub_array(nums, max_sum=None, current_sum=0, current_index=0): """ Returns the max subarray of the given list of numbers. Returns 0 if nums is None or an empty list. Time Complexity: O(n) Space Complexity: O(1) """ if nums == None: return 0 if len(nums) == 0: return 0 if not max_sum: max_sum = nums[0] if current_index == len(nums): if current_sum < max_sum and nums[current_index-1] > max_sum: max_sum = nums[current_index-1] return max_sum current_sum += nums[current_index] if current_sum >= max_sum: return max_sub_array(nums, max_sum=current_sum, current_sum=current_sum, current_index=current_index+1) if current_sum <= max_sum: return max_sub_array(nums, max_sum, current_sum, current_index=current_index+1)
def expand_parameters_from_remanence_array(magnet_parameters, params, prefix): """ Return a new parameters dict with the magnet parameters in the form '<prefix>_<magnet>_<segment>', with the values from 'magnet_parameters' and other parameters from 'params'. The length of the array 'magnet_parameters' must be equal to the sum of the number of segments in both cylinders. The first n_II elements refer to the inner magnet, and the remaining elements to the outer magnet. """ params_expanded = params.copy() n_II = params["n_II"] for i in range(0, n_II): params_expanded["%s_II_%d" % (prefix, i + 1,)] = magnet_parameters[i] n_IV = params["n_IV"] for j in range(0, n_IV): k = j + n_II # the first n_II elements refer to magnet II params_expanded["%s_IV_%d" % (prefix, j + 1,)] = magnet_parameters[k] return params_expanded
def use(block): """Variables that are read before they are written in the block. """ defined = set() # Locally defined. used = set() for i in block: used.update(v for v in i.get('args', []) if v not in defined) if 'dest' in i: defined.add(i['dest']) return used
def UnitStep(x): """Eq 1.12 from Fedkiw Osher book """ return 1 if x > 0 else 0
def sum_multiple(k, n): """Return the sum of all positive integer multiples of k less than or equal to n, i.e. sum_multiple(k, n) = k + 2k + ... + k floor(n/k) >>> sum_multiple(1, 10) 55 >>> sum_multiple(3, 10) 18 >>> sum_multiple(3, 9) 18 """ N = n/k return k*N*(N+1)/2
def _GetPercentageChange(value1, value2): """Returns the percentage change between the specified values.""" difference = value2 - value1 return 0 if value1 == 0 else difference/value1 * 100
def max_intensity(bitdepth: int, count: bool = False): """ Get maximum intensity for a given bitdepth. To get number of possible intensities, set `count` to True. """ mi = 2 ** bitdepth if not count: mi -= 1 return mi
def key_right(pos: int, page: int, page_len: int, pages: int, total_pkgs: int): """ Scroll the package list further one page. :param pos: Current cursor position :param page: Current page :param page_len: Number of packages listed on a page :param pages: Total number of pages :param total_pkgs: Total number of packages :return: int(position), int(page) """ if page == pages: pass else: if total_pkgs < page_len + (page_len * (page + 1)): page += 1 pos = total_pkgs else: page += 1 pos += page_len return pos, page
def get_class_name(cls): """Get the class full path name.""" return "{}.{}".format(cls.__module__, cls.__name__)
def get_run_name_nr(_run_name, _run_nr): """ :param _run_name: [str], e.g. 'runA' :param _run_nr: [int], e.g. 1 :return: _run_name_nr: [str], e.g. 'runA-1' """ return f"{_run_name}-{_run_nr}"
def parse_fromaddr(fromaddr): """Generate an RFC 822 from-address string. Simple usage:: >>> parse_fromaddr('[email protected]') '[email protected]' >>> parse_fromaddr(('from', '[email protected]')) 'from <[email protected]>' :param fromaddr: string or tuple """ if isinstance(fromaddr, tuple): fromaddr = "%s <%s>" % fromaddr return fromaddr
def checkProjectNameOption(projectNameOption): """ function to set the default value for projectNameOption Args: projectNameOption: name for the project directory (root directory) Returns: name for the project directory """ if projectNameOption is None: return 'RAAD' else: return projectNameOption
def count(seq): """ Count the number of items in seq Like the builtin ``len`` but works on lazy sequencies. Not to be confused with ``itertools.count`` See also: len """ if hasattr(seq, '__len__'): return len(seq) return sum(1 for i in seq)
def calculate_intensity_avg_no_bg(bg, intensity_ave): """ Get intensity after subtracting the background. :param bg: a float of calculated background :param intensity_ave: 1D list of averaged intensity :return: 1D list of intensity with background subtracted """ intensity_ave_no_bg = [i - bg for i in intensity_ave] for index in range(len(intensity_ave_no_bg)): intensity_ave_no_bg[index] = 0 if intensity_ave_no_bg[index] < 0 else intensity_ave_no_bg[index] return intensity_ave_no_bg
def clean_path(path): """ Removes extra space and leading slash at the beginning of a path Args: path(str): Path to clean Returns: str: A cleaned up path """ clean_key = path.strip() if clean_key[0] == '/': clean_key = clean_key[1:] return clean_key
def filter_hdf_paths(chrompairs, select, which): """ :param chrompairs: :param select: :param which: :return: """ if which == 'target': load_paths = [t[2] for t in chrompairs if t[1] == select] elif which == 'query': load_paths = [t[2] for t in chrompairs if t[0] == select] else: raise ValueError('Reference has to be specified as target or query, not as {}'.format(which)) return load_paths
def parse_string(string: str) -> str: """Re-encodes strings from AFIP's weird encoding to UTF-8.""" try: return string.encode("latin-1").decode() except UnicodeDecodeError: # It looks like SOME errors are plain UTF-8 text. return string
def make_anagrams_dict(l): """ 1. Create a reference set of characters out of each item from a list of words. 2. Search list of words for reference anagrams. 3. Return a dictionary with references as keys and a list of their anagrams as values. """ anagrams_dict_raw = {} anagrams_dict = {} for word in l: word_to_list = list(word) word_to_list.sort() ref = tuple(word_to_list) if ref not in anagrams_dict_raw: anagrams_dict_raw[ref] = [word] else: anagrams_dict_raw[ref].append(word) for key in anagrams_dict_raw: if len(anagrams_dict_raw[key]) >= 2: anagrams_dict[key] = anagrams_dict_raw[key] return anagrams_dict
def is_num(val): """check if a string is numeric""" try: float(val) except ValueError: return False return True
def pickaparcannot(files): """Return the aparc+aseg.mgz file""" aparcs = [] for s in files: if 'lh.aparc.annot' in s: aparcs.append(s) elif 'rh.aparc.annot' in s: aparcs.append(s) aparcs = sorted(aparcs) return aparcs
def create_points_list(lists): """Transforms two lists values into a list of couple of values""" created = list() for i in range(len(lists[0])): #typically i in range(2) point = list() for l in lists: #for each coordinate point.append(l[i]) created.append(point) return created
def num_lines(text): """Return the number of lines for a multiline text string.""" return len(text.strip().splitlines())
def format_wd_stig(wd, stig_x, stig_y): """Return a formatted string of focus parameters.""" return ('WD/STIG_XY: ' + '{0:.6f}'.format(wd * 1000) # wd in metres, show in mm + ', {0:.6f}'.format(stig_x) + ', {0:.6f}'.format(stig_y))
def is_number(s): """ returns true if token is a number """ try: float(s) if '.' in s else int(s) return True except ValueError: return False
def CHLSLOPE(YUP, YDN, elev_HT=None, elev_CNR=None, chl=None): """ Statement function for top slope of river """ return abs((YUP+elev_HT)-(YDN+elev_CNR))/chl
def _replace_special_string(string, old_list, new_list): """ Replace special string """ for i in range(0, len(old_list)): string = string.__str__().replace(old_list[i], new_list[i]) return string
def has_edge(e, f): """Check if an edge `e` is in a face `f`. Example: >>> e = (0, 1) >>> f = (1, 2, 3, 0) >>> has_edge(e, f) >>> True # because 0, 1 are adjacent in f (first and last) """ for pair in zip(f, f[1:] + (f[0],)): if e == pair or e == pair[::-1]: return True return False
def splits(text, L=20): """Return a list of all possible (first, rest) pairs, len(first)<=L. """ return [(text[:i+1], text[i+1:]) for i in range(min(len(text), L))]
def split_dataset(dataset, word_mapping, label_mapping, percent_testing=0.2, shuffle=False): """ Given a dataset, break it up into a training and dev set. :param dataset: NLTK tagging dataset :param word_mapping: map from word to index :param label_mapping: map from label to index :param percent_testing: how much data should be held out for testing? float from 0-1, default: 0.2 :param shuffle: Should we shuffle the data? Boolean: default: False :return: training_words (list of all training indices), training_labels (list of all training labels), testing_words (list of all testing indices), testing_labels (list of all testing labels) """ training_words, training_labels, testing_words, testing_labels = None, None, None, None return training_words, training_labels, testing_words, testing_labels
def _get_edit_distance_matrix(x: str, y: str) -> list: """Returns a len(y) + 1 by len(x) + 1 matrix, where the first row and column are filled with 0s and the rest is filled with -1s.""" matrix = [[-1 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)] for j in range(len(matrix[0])): matrix[0][j] = j for i, _ in enumerate(matrix): matrix[i][0] = i return matrix
def get_first_package_name(name): """ Returns first package name. From `a.b.c` it returns `a`. :param str name: Full module name :return: First package name. :rtype: str """ return name.split(".")[0]
def unnest(tup): """ Unnests a nested tuple. If an element is insight nested lists, the function returns the element, otherwise the list is returned. Parameters ---------- tup : list Nested tuples. Returns ------- object Element or list without unneccessary nesting. """ while True: if not isinstance(tup, list): return tup if len(tup) != 1: return tup tup = tup[0]
def load_capcodes_filter_dict(filename): """Load capcodes ignore or match data to dictionary.""" capcodes = dict() try: print("Loading data from '{}'".format(filename)) with open(filename, "r") as text_file: lines = text_file.readlines() for item in lines: if item[0] == "#": continue fields = item.split(",") if len(fields) == 2: capcodes[fields[0].strip()] = fields[1].strip() elif len(fields) == 1: capcodes[fields[0].strip()] = 'NO DESCR' print("{} records loaded".format(len(capcodes))) return capcodes except KeyError: print(f"Could not parse file contents of: {filename}") except OSError: print(f"Could not open/read file: {filename}, ignore filter") return capcodes
def join_logs(schema_log, data_log): """Join logs strings into single string.""" return ('\n> SCHEMA VALIDATION\n\n' + schema_log + '\n\n' '\n> DATA VALIDATION\n\n' + data_log + '\n')
def previous_number_in_loop(start: int, loop_size: int) -> int: """ If at max loop size, returns loop size -1. """ if start - 1 < 0: result = loop_size - 1 else: result = start - 1 return result
def impute_negatives(items): """ If an item in the list is negative replace it with previous items value. If it's the first item in the list replace it with zero. """ for i, item in enumerate(items): if item < 0: if i == 0: items[i] = 0 else: items[i] = items[i-1] return items
def inclusive(dictionary, *keys): """ Returns a new dictionary with keys. """ return {key: value for key, value in dictionary.items() if key in keys}
def isen_nozzle_choked_mass_flow(A_t, p_0, h_0, gamma_var): """ Calculates mass flow through a nozzle which is isentropically expanding a given flow and is choked (Mach number at throat is 1.0) Input variables: A_t : nozzle throat area gamma_var : ratio of specific heats p_t : stagnation chamber T_t : stagnation temperature """ #m_dot = ((gamma_var * p0) * (()**())) * (()**()) return None
def post(tokens): """ post-process output from NLTK tokenizer Args: tokens: a list contains a tokenized text Returns: processed tokens """ out = [] for t in tokens: if t[-1] == ".": out.append(t[:-1]) else: out.append(t) return out
def beta_model(r3d_kpc, n0, r_c, beta): """ Compute a beta model Parameters ---------- - r3d_kpc: array of radius in kpc - r_c : core radius parameter - n_0 : normalization - beta : slope of the profile Outputs -------- - beta model profile as a function of the input radius vector """ return n0 * (1 + (r3d_kpc / r_c)**2)**(-3.0*beta/2.0)
def add_arrays(arr1, arr2): """ Function to adds two arrays element-wise Returns the a new array with the result """ if len(arr1) == len(arr2): return [arr1[i] + arr2[i] for i in range(len(arr1))] return None
def spltime(tseconds): """ This gets the time in hours, mins and seconds """ hours = tseconds // 3600 minutes = int(tseconds / 60) % 60 seconds = tseconds % 60 return hours, minutes, seconds
def extract_total_order(X): """Extract cells and total order of the cells Parameters ---------- X: cell complex. List of dictionaries, one per dimension d. The size of the dictionary is the number of d-cells. The dictionary's keys are sets corresponding to the name of the cells in XQ. The dictionary's values are the indexes of the cells in the boundary,and chain homotopy matrices. ------- tot_order: cells complex X stored as one dictionary. The dictionary's keys are sets corresponding to the name of the cells in X. The dictionary's values are the indexes of the cells in the chain homotopy matrices phi and psi. """ tot_order=dict() k=0 for d in range(len(X)): for cell in X[d].keys(): tot_order[cell]=k k=k+1 return tot_order
def isascii(s): """Returns True if str s is entirely ASCII characters. (Compare to Python 3.7 `str.isascii()`.) """ try: s.encode("ascii") except UnicodeEncodeError: return False return True
def append(l, elem): """Append list with element and return the list modified""" if elem is not None: l.append(elem) return l
def floating_number(value, minimum=None, maximum=None, cut=False, pad=False): """Returns back a float from the given value""" value = float(value) if minimum is not None and value < minimum: if pad: return minimum raise ValueError( "Provided value of {} is below specified minimum of {}".format(value, minimum) ) if maximum is not None and value > maximum: if cut: return maximum raise ValueError( "Provided value of {} is above specified maximum of {}".format(value, maximum) ) return value
def drop_matches(list1, list2): """returns a list of the elements that are not in common""" list1.sort() list2.sort() matches = [] i = j = 0 lenLst1 = len(list1) lenLst2 = len(list2) while i < lenLst1 and j < lenLst2: if list1[i] < list2[j]: matches.append(list1[i]) i+=1 elif list1[i] > list2[j]: matches.append(list2[j]) j+=1 else: #they are the same i+=1 j+=1 while i < lenLst1: matches.append(list1[i]) i+=1 while j < lenLst2: matches.append(list2[j]) j+=1 return len(matches), matches
def get_species_counts(data): """Count the number of individuals of each species""" species_counts = dict() for record in data: species = record[1] count = int(record[2]) species_counts[species] = species_counts.get(species, 0) + count return species_counts
def tag_to_string(gallery_tag, simple=False): """ Takes gallery tags and converts it to string, returns string if simple is set to True, returns a CSV string, else a dict-like string """ assert isinstance(gallery_tag, dict), "Please provide a dict like this: {'namespace':['tag1']}" string = "" if not simple: for n, namespace in enumerate(sorted(gallery_tag), 1): if len(gallery_tag[namespace]) != 0: if namespace != 'default': string += namespace + ":" # find tags if namespace != 'default' and len(gallery_tag[namespace]) > 1: string += '[' for x, tag in enumerate(sorted(gallery_tag[namespace]), 1): # if we are at the end of the list if x == len(gallery_tag[namespace]): string += tag else: string += tag + ', ' if namespace != 'default' and len(gallery_tag[namespace]) > 1: string += ']' # if we aren't at the end of the list if not n == len(gallery_tag): string += ', ' else: for n, namespace in enumerate(sorted(gallery_tag), 1): if len(gallery_tag[namespace]) != 0: if namespace != 'default': string += namespace + "," # find tags for x, tag in enumerate(sorted(gallery_tag[namespace]), 1): # if we are at the end of the list if x == len(gallery_tag[namespace]): string += tag else: string += tag + ', ' # if we aren't at the end of the list if not n == len(gallery_tag): string += ', ' return string
def roundPrecision(number, precision=3): """ Rounds the given floating point number to a certain precision, for output.""" return float(('{:.' + str(precision) + 'E}').format(number))
def _short_tag(tag): """Helper method to remove any namespaces from the XML tag""" return tag[tag.rfind('}')+1:len(tag)]
def minimumBelow(requestContext, seriesList, n): """ Takes one metric or a wildcard seriesList followed by a constant n. Draws only the metrics with a minimum value below n. Example: .. code-block:: none &target=minimumBelow(system.interface.eth*.packetsSent,1000) This would only display interfaces which at one point sent less than 1000 packets/min. """ result = [] for series in seriesList: if min(series) <= n: result.append(series) return result
def test(azote, phosphore, potassium): """3 values between 0 and 1, summing to 1. Error is the distance between the given 3d point and the expected 3d point. """ expected = 0.3, 0.2, 0.5 return ( 1 - ( (azote - expected[0]) ** 2 + (phosphore - expected[1]) ** 2 + (potassium - expected[2]) ** 2 ) ** 0.5 )
def _get_a(elm, a): """ helper function, get attribute if element exists """ return elm.get(a) if elm is not None else elm
def flip(f, b, a): """ flip :: (a -> b -> c) -> b -> a -> c flip(f) takes its (first) two arguments in the reverse order of f. """ return f(a, b)
def convert_from_missing_indexer_tuple(indexer, axes): """ create a filtered indexer that doesn't have any missing indexers """ def get_indexer(_i, _idx): return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else _idx) return tuple([get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)])
def create_combinations_from_array(array): """ Takes an array and creates a list of combinations from it. Unlike itertools.combinations, this creates both xs of combinations example: >>>list(itertools.combinations(['A', 'B', 'C'])): [('A', 'B'), ('A', 'C'), ('B', 'C')] >>>create_combinations_from_array(['A', 'B', 'C']) [('A', 'B'), ('A', 'C'), ('B', 'A'), ('B', 'C'), ('C', 'A'), ('C', 'B')] """ return [ (item_1, item_2) for item_1 in array for item_2 in array if item_1 is not item_2]
def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs
def tic2beat(t, div): """ Convert `t` from midi tics to beats given beat division `div` Parameters ---------- t : number div : number Returns ------- number """ return t / float(div)
def get_n_points(strokes): """ Get number of points in a drawing. """ n_points = 0 for x, y in strokes: n_points += len(x) return n_points
def _generate_token_cmd(path, username, password): """ Retrieves details for all available storage clouds. :type path: str :param path: API path :type username: str :param username: User name :type password: str :param password: Password of the user :rtype: dict, str :returns: A dictionary or JSON data set as a string depending on return_type parameter. """ body = '{"username": "%s", "password": "%s"}' % (username, password) return "curl -X POST -H 'Content-Type: application/json' -d '%s' %s" % (body, path)
def sm_switcher(sm): """[summary] Args: sm (str): [description] Returns: str: [description] """ switcher={ "facebook":{'condition':'".php" in user_name or user_name=="name"', 'separator':" /"}, "twitter":{'condition':'"%" in user_name', 'separator':" /"}, "pinterest":{'condition':'user_name=="pin"', 'separator':" /"}, "linkedin":{'condition':'"linkedin" in row["label"].lower() and len(row["label"].lower().split(" /")) < 2', 'separator':".../"}, } return switcher.get(sm.lower(),"Not a social media")
def is_key_complete(key): """Returns true if the key is complete. Complete keys are marked with a blank symbol at the end of the string. A complete key corresponds to a full word, incomplete keys cannot be mapped to word IDs. Args: key (string): The key Returns: bool. Return true if the last character in ``key`` is blank. """ return key and key[-1] == ' '
def choose_most_elec_neg_potential(elem_lst, potential_dict): """Return the potential of the most electronegative element in a list of elements""" # if the element list is empty, return None if not elem_lst: return None # if the structure only contains 1 element, return None if len(potential_dict) == 1: return None # since the elements are already ranked by decreasing electronegativity # the first element in the list is by default the most electronegative one return potential_dict[elem_lst[0]]
def get_max_val_and_indexes(results_matrix, f1_sample_values, f3_sample_values): """ Returns the maximum value for a given input matrix and the values at the indexes of f1,f3 samples at which max value occurs. """ x = y = len(results_matrix) x_coord_of_max = 0 y_coord_of_max = 0 curr_max = results_matrix[0][0] for i in range(x): for j in range(y): if results_matrix[i][j] > curr_max: curr_max = results_matrix[i][j] x_coord_of_max = i y_coord_of_max = j return (curr_max, f1_sample_values[x_coord_of_max], f3_sample_values[y_coord_of_max])
def select_thumbnail(snippet): """Select the thumbail of a YouTube video""" thumbnails = sorted( snippet["snippet"]["thumbnails"].values(), key=lambda x: -x["width"] ) for thumbnail in thumbnails: if thumbnail["width"] / thumbnail["height"] == 16. / 9: return thumbnail["url"] return ""
def is_iterable(f): """ Returns True if an object can be iterated over by using iter(obj) """ try: iter(f) return True except TypeError: return False
def apply_ticket_permissions(env, req, tickets): """Apply permissions to a set of milestone tickets as returned by get_tickets_for_milestone().""" return [t for t in tickets if 'TICKET_VIEW' in req.perm('ticket', t['id'])]
def date_to_cisco_date(date): """ This function gets a date and returns it according to the standard of Cisco Email security. Args: date: YYYY-MM-DD hh:mm:ss. Returns: The date according to the standard of Cisco Email security - YYYY-MM-DDThh:mm:ss.000Z. """ return date.replace(' ', 'T') + '.000Z'
def is_pandigital(digit: str): """ We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once; for example, the 5-digit number, 15234, is 1 through 5 pandigital. A number is said to be pandigital if it contains each of the digits from 0 to 9 (and whose leading digit must be nonzero). However, "zeroless" pandigital quantities contain the digits 1 through 9. Sometimes exclusivity is also required so that each digit is restricted to appear exactly once. For example, 6729/13458 is a (zeroless, restricted) pandigital fraction and 1023456789 is the smallest (zerofull) pandigital number. The first few zerofull restricted pandigital numbers are 1023456789, 1023456798, 1023456879, 1023456897, 1023456978, ... :param digit: :return: """ digit_str = str(digit) digit_set = set(int(d) for d in digit_str) if len(digit_set) != len(digit_str): return False pattern = set(n for n in range(min(digit_set), max(digit_set) + 1)) return digit_set == pattern
def execute(code): """ Execute the given Python code. """ try : return eval(code, globals()) except: return exec(code, globals())
def chaotic_inertia_weight(c1, c2, z, max_iter, current_iter): """Introduced by Feng et al. 2008""" z = 4 * z * (1-z) return (c1-c2)*((max_iter-current_iter)/max_iter)+(c2*z)
def parse_regions(region_str): """Split a string""" if region_str is None: return [] return region_str.split(',')
def normalize_module_name(layer_name): """Normalize a module's name. PyTorch let's you parallelize the computation of a model, by wrapping a model with a DataParallel module. Unfortunately, this changes the fully-qualified name of a module, even though the actual functionality of the module doesn't change. Many time, when we search for modules by name, we are indifferent to the DataParallel module and want to use the same module name whether the module is parallel or not. We call this module name normalization, and this is implemented here. """ if layer_name.find("module.") >= 0: return layer_name.replace("module.", "") return layer_name.replace(".module", "")
def generarValorB(valor): """ Genera el valor de L del string de color param: valor: string de color """ L, a, b = valor.split("/") return float(b)