content
stringlengths
42
6.51k
def figsize(relwidth=1, aspect=.618, refwidth=6.5): """ Return figure dimensions from a relative width (to a reference width) and aspect ratio (default: 1/golden ratio). """ width = relwidth * refwidth return width, width*aspect
def extract_simple_type_restrictions(properties: dict) -> dict: """Extract Simple Type restrictions.""" restrictions = {} keys = [ "minLength", "maxLength", "pattern", "minimum", "maximum", "length", "totalDigits", "fractionDigits", "exclusiveMinimum", "exclusiveMaximum", ] for key in keys: value = properties.get(key) if value is not None: restrictions[key] = value return restrictions
def is_not_None(obj): """returns True if object is not None.""" return (obj is not None)
def write_file(file_name, data, line_length): """ Writes the results to a text file using a name based on file_name input: string, list returns: int """ pos = file_name.rfind('.') fn_o = file_name[:pos] + '.OUT' + file_name[pos:] f = open(fn_o, "w") for fsn, sequence in data: f.write(fsn + '\n') l_length = len(sequence) if line_length == 0 else line_length for p in range(0, len(sequence), l_length): f.write(sequence[p:p+l_length] + '\n') f.close() return len(data)
def _apply_phase_constrants(alleles, phasesets, antiphasesets): """Apply phase constraints to alleles.""" if phasesets: # If any adjacent allele belongs to an already selected phase set, # then allow only those alleles. Otherwise, all alleles are valid. alleles = [allele for allele in alleles if allele.phase in phasesets] or alleles if antiphasesets: # Remove any allele belonging to an anti-phaseset alleles = [allele for allele in alleles if allele.phase not in antiphasesets] return alleles
def format_date(dt): """ Returns formated date """ if dt is None: return dt return dt.strftime("%Y-%m-%d")
def get_ordering_field(view, method): """ If the APIs have the LIST method; for the view of LIST method, add the Ordering field for the users. """ if 'list' in method and view.serializer_class: model_fields = [field.name for field in view.queryset.model._meta.fields] serializer_fields = [ field.source or field_name for field_name, field in view.serializer_class().fields.items() if not getattr(field, 'write_only', False)] valid_fields = list(set(model_fields).intersection(set(serializer_fields))) return valid_fields else: return None
def correspondences(a, b): """ Returns - fast.forwards->slow.forwards - fast.backwards->slow.backwards """ return list(zip(a, b))
def getGlobValue(globs, path): """ Returns the value of the glob, where path matches :Parameters: - `globs`: The glob list (``[(glob, associated value)]``) - `path`: The path to match :Types: - `globs`: sequence - `path`: ``str`` :return: The matched value or ``None`` :rtype: any """ import fnmatch result = None for glob in globs: if fnmatch.fnmatchcase(path, glob[0]): result = glob[1] break return result
def get_sorted_mocap_vertex_data(mocap_vertex_data): """ returns the data sorted by index in a tuple [(string, int)] """ data = [(key, int(mocap_vertex_data[key]["vertexInd"])) for key in mocap_vertex_data.keys()] data = sorted(data, key=lambda tup: tup[1]) return data
def _convert_float(value): """Convert an "exact" value to a ``float``. Also works recursively if ``value`` is a list. Assumes a value is one of the following: * :data:`None` * an integer * a string in C "%a" hex format for an IEEE-754 double precision number * a string fraction of the format "N/D" * a list of one of the accepted types (incl. a list) Args: value (Union[int, str, list]): Values to be converted. Returns: Union[float, list]: The converted value (or list of values). """ if value is None: return None elif isinstance(value, list): return [_convert_float(element) for element in value] elif isinstance(value, int): return float(value) elif value.startswith("0x") or value.startswith("-0x"): return float.fromhex(value) else: numerator, denominator = value.split("/") return float(numerator) / float(denominator)
def secDhms(seconds): """Convert seconds to duration of days/hours/minuts/secs in the string format seconds: float, >=0 - seconds to be converted return duration: str - resulting duration in the format: [<days:int>d][<hours:int>h][<minutes:int>m][<seconds:float>] >>> secDhms(10) '10' >>> secDhms(60) '1m' >>> secDhms(65.7818934) '1m5.782' >>> secDhms(3725) '1h2m5' >>> secDhms(50*3600) '2d2h' >>> secDhms(24*3600+2) '1d0h0m2' """ days = int(seconds // (24 * 3600)) seconds -= days * 24 * 3600 hours = int(seconds // 3600) seconds -= hours * 3600 mins = int(seconds // 60) secs = seconds - mins * 60 res = '' if not days else str(days) + 'd' if not (hours or mins or secs): return res if res or hours: res += str(hours) + 'h' if not (mins or secs): return res if res or mins: res += str(mins) + 'm' return res if not secs else '{}{:.4g}'.format(res, secs)
def mean(values): """Get the mean of a list of values Args: values (iterable of float): A list of numbers Returns: float """ # Write the mean() function mean = sum(values) / len(values) return mean
def count_rule_conditions(rule_string: str) -> int: """ Counts the number of conditions in a rule string. Parameters ---------- rule_string : str The standard Iguanas string representation of the rule. Returns ------- int Number of conditions in the rule. """ n_conditions = rule_string.count("X['") return n_conditions
def monotonically_increasing(l): """ make a list of values monotonically increasing. Parameters: l (list,array): disordered list. Returns ml (list,array): monotonically increasing reordered list. """ return all(x < y for x, y in zip(l, l[1:]))
def get_trunk(py): """helper function that returns name of df column name for particular percentile (py) of latency""" p_col_name = { "P50_latency(ms)": "P50ms", 'P90_latency(ms)': "P90ms", 'P95_latency(ms)': "P95ms", 'P99_latency(ms)': "P99ms" } return p_col_name[py]
def infer_type_from_keys(keys: list): """infer ddf data type from the primary key""" if len(keys) == 1: if keys[0] == 'concept': return 'concepts' else: return 'entities' if 'synonym' in keys: return 'synonyms' else: return 'datapoints'
def run(result): """Function to test empty list return""" ret = [] return ret
def is_valid_coordinate(x0: float, y0: float) -> bool: """ validates a latitude and longitude decimal degree coordinate pairs. """ if isinstance(x0, float) and isinstance(y0, float): if -90 <= x0 <= 90: if -180 <= y0 <= 180: return True return False
def get_md5_digest(arg): """Returns the MD5 digest of the specified argument. The digest is a string containing only hexadecimal digits. Args: arg (string): The string to hash. """ import hashlib return hashlib.md5(arg).hexdigest()
def ParseCodePoint(s): """Parses the pua string representation. The format of the input is either: - empty string - hexadecimal integer. - hexadecimal integer leading '>'. We're not interested in empty string nor '>'-leading codes, so returns None for them. Note that '>'-leading code means it is "secondary" code point to represent the glyph (in other words, it has alternative (primary) code point, which doesn't lead '>' and that's why we'll ignore it). """ if not s or s[0] == '>': return None return int(s, 16)
def list_to_dict(recs, key): """ convert a list of dictionaries into a dictionary keyed on the specified dictionary key """ keys = [rec[key] for rec in recs] return dict(zip(keys, recs))
def get_best_indexes(logits, n_best_size): """ Get the n-best logits from a list. :param logits: list :param n_best_size: int :return: list best indexes. """ index_and_score = sorted( enumerate(logits[1:], 1), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes
def _get_magnification(r1, r2): """ Calculate magnification factor of the object. Parameters ---------- r1 : float Source to object distance. r2 : float Object to detector distance. Returns ------- float Magnification factor. """ return (r1 + r2) / r1
def _translate_keyname(inp): """map key names in settings file to key names in HotKeys """ convert = {'Equal': '=', 'Escape': 'Esc', 'Delete': 'Del', 'Return': 'Enter', 'BackSpace': 'Backspace', 'PageUp': 'PgUp', 'PageDown': 'PgDn', 'space': 'Space', 'Keypad*': 'Num*', 'Keypad+': 'Num+', 'Keypad-': 'Num-', 'Keypad/': 'Num/', } if inp in convert: out = convert[inp] else: out = inp return out
def mk_neg_expr(expr): """ returns a negated expression where EXPR is the expression to be negated """ return {"type" : "neg", "expr" : expr }
def factorial(n): """ input: positive integer 'n' returns the factorial of 'n' (n!) """ # precondition assert isinstance(n,int) and (n >= 0), "'n' must been a int and >= 0" ans = 1 # this will be return. for factor in range(1,n+1): ans *= factor return ans
def delete_between_line_no(md, to_delete): """Deletes content between the passed number of lines """ start, end = to_delete if end < start: raise ValueError('Starting line must be lower ' f'than end line, got: {to_delete}') lines = md.splitlines() return '\n'.join(lines[:start - 1] + lines[end:])
def basic_str(obj): """ Handy for writing quick and dirty __str__() implementations. """ return obj.__class__.__name__ + ': ' + obj.__repr__()
def cos2sin(sin: float) -> float: """returns Sin[ArcCos[x]] assuming 0 < x < pi.""" return ((sin + 1) * (-sin + 1)) ** 0.5
def bucket_sort(m): """ bucketSort(arr[], n) 1) Create n empty buckets (Or lists). 2) Do following for every array element arr[i]. .......a) Insert arr[i] into bucket[n*array[i]] 3) Sort individual buckets using insertion sort. 4) Concatenate all sorted buckets. """ n = 4 # number of buckets to use buckets = [[] for _ in range(n)] for x in m: pos = (x // n) - 1 if pos > n: pos = n - 1 elif pos < 0: pos = 0 buckets[pos].append(x) result = [] for bucket in buckets: result += sorted(bucket) return result
def detect_file_type_stream(stream): """Read a file and guess if its a fasta or gtf file Parameters ---------- stream: file name of file object to examine Returns ------- str fasta, gtf, or unrecognized """ head = stream.read(1024) if head[0] == '>': return 'fasta' elif '\t' in head: return 'gtf' else: return 'unrecognized'
def find_donor(string_name, donor_list): # Tested """Create donor key in dictionary if it doesn't exist already.""" if string_name not in donor_list: donor_list[string_name] = [] return donor_list[string_name]
def get_scientific_name_from_row(r): """ r: a dataframe that's really a row in one of our taxonomy tables """ if 'canonicalName' in r and len(r['canonicalName']) > 0: scientific_name = r['canonicalName'] else: scientific_name = r['scientificName'] return scientific_name
def angle_diff(b1, b2): """ Return smallest difference (clockwise, -180 -> 180) from b1 to b2. """ r = (b2 - b1) % 360 if r > 180: return r - 360 return r
def _merge_meta(epi_ref, meta_list): """Prepare a tuple of EPI reference and metadata.""" return (epi_ref, meta_list[0])
def get_player_position(ram): """ given the ram state, get the position of the player """ def _getIndex(address): assert type(address) == str and len(address) == 2 row, col = tuple(address) row = int(row, 16) - 8 col = int(col, 16) return row * 16 + col def getByte(ram, address): # Return the byte at the specified emulator RAM location idx = _getIndex(address) return ram[idx] # return the player position at a particular state x = int(getByte(ram, 'aa')) y = int(getByte(ram, 'ab')) return x, y
def compute_pareto_front(costs: list): """ Find rows of entries in the Pareto front. Parameters ---------- costs: list of arrays List of arrays of costs. Returns ------- front_ids: list of ints List of row indices of elements from the pareto front. """ front_ids = [] n_points = len(costs) for ind1 in range(n_points): pareto = True for ind2 in range(n_points): r11, r12 = costs[ind1] r21, r22 = costs[ind2] if ind1 != ind2: if (r21 > r11 and r22 >= r12) or (r21 >= r11 and r22 > r12): pareto = False break if pareto: front_ids.append(ind1) return front_ids
def canonical_order(match): """ Before defining a new interaction, we must check to see if an interaction between these same 4 atoms has already been created (perhaps listed in a different, but equivalent order). If we don't check for this this, we will create many unnecessary redundant interactions (which can slow down he simulation). To avoid this, I define a "canonical_order" function which sorts the atoms and bonds in a way which is consistent with the symmetry of the interaction being generated... Later the re-ordered list of atom and bond ids will be tested against the list of atom/bond ids in the matches-found-so-far, before it is added to the list of interactions found so far. Note that the energy of a dihedral interaction is a function of the dihedral-angle. The dihedral-angle is usually defined as the angle between planes formed by atoms 0,1,2 & 1,2,3. This angle does not change when reversing the order of the atoms. So it does not make sense to define a separate dihedral interaction between atoms 0,1,2,3 AS WELL AS between 3,2,1,0. So we sort the atoms so that the first atom has a lower atomID than the last atom. (Later we will check to see if we have already defined an interaction between these 4 atoms. If not then we create a new one.) """ # match[0][0:3] contains the ID numbers of the 4 atoms in the match atom0 = match[0][0] atom1 = match[0][1] atom2 = match[0][2] atom3 = match[0][3] # match[1][0:2] contains the ID numbers of the the 3 bonds bond0 = match[1][0] bond1 = match[1][1] bond2 = match[1][2] if atom0 < atom3: # return ((atom0, atom1, atom2, atom3), (bond0, bond1, bond2)) same # as: return match else: return ((atom3, atom2, atom1, atom0), (bond2, bond1, bond0))
def uBR(v): """ uBR series selector """ return "BC" in v["version"]
def key_from_string(key): """ Returns hex array from a string """ return [int(key[i:i + 2], 16) for i in range(0, len(key), 2)]
def has_errors(build): """Checks if there are errors present. Args: build: the whole build object Returns: True if has errors, else False """ return "errors" in build and len(build["errors"])
def chunks(l, n, o): """Yield successive n-sized chunks with o-sized overlap from l.""" return [l[i: i + n] for i in range(0, len(l), n-o)]
def boo(n1, n2, n3): """boo (n1, n2, n3) - returns the middle value when n1, n2, and n3 are sorted from lowest to highest""" list = [n1, n2, n3] list.sort() return list[1]
def fix_arg_name(name): """ Fix arg name / reserved word? :param name: :return: """ if name in ['pass', 'break', 'continue', 'except', 'try', 'for', 'while', 'do', 'def', 'class', 'in', 'isinstance', 'tuple', 'list', 'set', 'None']: return '%s_' % name return name
def find_text_idx(sentence): """ Return the index of the # text line or -1 """ for idx, line in enumerate(sentence): if line.startswith("# text"): return idx return -1
def get_wc1(conds): """ [ [wc, wo, wv], [wc, wo, wv], ... ] """ wc1 = [] for cond in conds: wc1.append(cond[0]) return wc1
def arch_handler(value, **kwargs): """ Return a Package URL qualifier for the arch. """ return {'qualifiers': 'arch={}'.format(value)}
def dimension(dim: float, tol: int = 0, step: float = 0.4) -> float: """ Given a dimension, this function will round down to the next multiple of the dimension. An additional parameter `tol` can be specified to add `tol` additional steps to add a tolerance to accommodate for shrinking. """ # Add small value to reduce risk of the remainder being zero. dim += 1e-10 return (dim // step) * step + tol * step
def validate_maximum_water_depth(cls, value): """Return 0.0 for waterdepth in case of invalid values. Invalid values are: - everything lower than 0.01m (including -9999, -inf) - everything above 50m as it's unrealistic (including inf) """ # in the case that the value is a "100.0" we won't catch it if isinstance(value, (float, int)): if value < 0.01: value = 0.0 elif value > 50.0: value = 0.0 return value
def remove_all(lst, item): """Remove all occurences of *item* in *lst*.""" while item in lst: lst.remove(item) return lst
def simpson_index(H, P): """ # ======================================================================== SIMPSON INDEX PURPOSE ------- Calculates the Simpson index. INPUT ----- [INT] [H] The number of haplotypes. [FLOAT] [P] A list of (relative) frequencies. RETURN ------ [FLOAT] The Simpson index. # ======================================================================== """ index = 0 for i in range(0, H): index += float(P[i]) * float(P[i]) return index
def wrap(char, wrapper): """Wrap a sequence in a custom string.""" return wrapper.format(char=char)
def validate_values(config, values): """ Return True if all values are given and have the desired type. Parameters ---------- config : dict configuration dictionary values : list list of (str, type) tuples of values and value types expected in config Returns ------- True if config is valid. Raises ------ Exception if value is not found or has the wrong type. """ if not isinstance(config, dict): raise TypeError("config must be a dictionary") for value, vtype in values: if value not in config: raise ValueError("%s not given" % value) if not isinstance(config[value], vtype): if config[value] is None: raise ValueError("%s not given" % value) raise TypeError("%s must be %s, not %s" % (value, vtype, config[value])) return True
def add(a, b=0): """ returns the sum of a and b """ print(a + b) return a + b
def getID_FromLongestTerm(text, lookupDict): """ Given a span of text, this method will tokenize and then identify the set of entities that exist in the text. It will prioritise long terms first in order to reduce ambiguity. Example: For the text "the cheese pizza is good" and the lookup dict of {("cheese","pizza"):1, ("pizza",):2}, the algorithm will identify "cheese pizza" as an entity with ID 1. Those tokens will then be removed, and so "pizza" will not be identified. :param text: The text span to be searched. It will be tokenized using the very naive method of splitting by whitespace. :param lookupDict: Dictionary that maps tokenized terms to a particular ID :type text: str :type lookupDict: dict with tuple keys :returns: IDs of all entities identified in text :rtype: set """ terms = set() # Lowercase and do very naive tokenization of the text np = text.lower().split() # The length of each search string will decrease from the full length # of the text down to 1 for l in reversed(range(1, len(np)+1)): # We move the search window through the text for i in range(len(np)-l+1): # Extract that window of text s = tuple(np[i:i+l]) # Search for it in the dictionary if s in lookupDict: sTxt = " ".join(np[i:i+l]) for wordlistid,tid in lookupDict[s]: #myTxt = "%s\t%s" % (tid,sTxt) #terms.add((wordlistid,myTxt)) terms.add((wordlistid,sTxt)) # If found, save the ID(s) in the dictionar #terms.update(lookupDict[s]) # And blank it out np[i:i+l] = [ "" for _ in range(l) ] return terms
def merge_list(lst, *to_merged_list): """ Merge multiple lists into the first list :param lst: :param to_merged_list: :return: """ for item in to_merged_list: lst.extend(item) return lst
def has_valid_type(l, t): """ Check if every element in a two-dimensional list has the same type.""" for sub in l: for e in sub: if type(e) != t: return False return True
def get_options(validators): """given a validator chain, if one has .options, return them""" options = None if validators: if not isinstance(validators, (list, tuple)): validators = [validators] for item in validators: if hasattr(item, "options"): options = item.options break if callable(options): options = options() return options
def _flatten_args(xs): """Flatten the input into a tuple of variables. In the typical case, `xs` is a tuple or list of objects where each object is either a variable, list, or tuple. In the case where it is a list of tuple, the objects in the list or tuple could also be either a variable, list or tuple. Although the non-list and non-tuple items are typically an instance of variable, any object other than list or tuple is allowed. This function simply flattens the hierarchical lists/tuples so that all objects that are deeply contained in `xs` that are non-list and non-tuple will be returned in a single tuple. Args: xs: Returns: The flattened tuple, allong with the indecies and count so that the items can be unflattened later (i.e., by calling `_unflatten_args()`. fixme: does not work if xs is a variable only. """ inds = [] ys = [] i = 0 if not isinstance(xs, (list, tuple)): inds.append(('s', )) return (xs,), inds, 0 for x in xs: if isinstance(x, (list, tuple)): x, sub_inds, total = _flatten_args(x, ) inds.append(('i', i, i+total, sub_inds)) i += total else: x = [x] inds.append(('f', i)) i += 1 ys.extend([y for y in x]) return tuple(ys), inds, i
def get_closest_readlength(estimated_readlength): # type: (int) -> int """Find the predefined readlength closest to the estimated readlength. In the case of a tie, choose the shortest readlength.""" readlengths = [36, 50, 75, 100] differences = [abs(r - estimated_readlength) for r in readlengths] min_difference = min(differences) index_of_min_difference = [i for i, d in enumerate(differences) if d == min_difference][0] return readlengths[index_of_min_difference]
def _generate_anchor_configs(min_level, max_level, num_scales, aspect_ratios): """Generates mapping from output level to a list of anchor configurations. A configuration is a tuple of (num_anchors, scale, aspect_ratio). Args: min_level: integer number of minimum level of the output feature pyramid. max_level: integer number of maximum level of the output feature pyramid. num_scales: integer number representing intermediate scales added on each level. For instances, num_scales=2 adds two additional anchor scales [2^0, 2^0.5] on each level. aspect_ratios: list of tuples representing the aspect ratio anchors added on each level. For instances, aspect_ratios = [(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level. Returns: anchor_configs: a dictionary with keys as the levels of anchors and values as a list of anchor configuration. """ anchor_configs = {} for level in range(min_level, max_level + 1): anchor_configs[level] = [] for scale_octave in range(num_scales): for aspect in aspect_ratios: anchor_configs[level].append((2 ** level, scale_octave / float(num_scales), aspect)) return anchor_configs
def convert_int_out(value: bytes) -> int: """ Make sure Integer values are actually of type int. """ return int(float(value))
def gen_combos(challbs): """Generate natural combinations for challbs.""" # completing a single DV challenge satisfies the CA return tuple((i,) for i, _ in enumerate(challbs))
def validate_status(value): """Validates a status value :param value: A candidate value for status :type value: string :return: True if values is among the status choices. Otherwise returns False :rtype: bool """ return value in ('new', 'started', 'paused', 'won', 'lost')
def template_interpolation( source, template_fill ): """ :param source: :param template_fill: tuple of tuple pairs, eg (("a",1),("b",2)) Hashability needed for context_dependent_memoize dictates the type """ return source % dict(template_fill)
def _build_common_goservice_conf(instance_name, instance_path): """ Helper: return common settings for golang CS/PS/sciond """ return { 'general': { 'ID': instance_name, 'ConfigDir': instance_path, 'ReconnectToDispatcher': True, # XXX(matzf): for CS, topology/go.py this is False. Why? }, 'logging': { 'file': { 'Path': 'logs/%s.log' % instance_name, 'Level': 'debug', }, 'console': { 'Level': 'crit', }, }, 'TrustDB': { 'Backend': 'sqlite', 'Connection': 'gen-cache/%s.trust.db' % instance_name, } }
def pig_step_properties(script, arguments=None, input=None, output=None): """ Create pig step properties :param script: the script path of step :type script: string :param arguments: arguments for the step :type arguments: string :param input: the input path of step :type input: string :param output: the output path of step :type output: string :return: :rtype map """ pig_step = { 'script': script } if arguments is not None: pig_step['arguments'] = arguments if input is not None: pig_step['input'] = input if output is not None: pig_step['output'] = output return pig_step
def format_file_size(value): """Convert a file size in bytes into a human-readable format""" for unit in ['B', 'KB', 'MB', 'GB']: if abs(value) < 1024.0: return f'{value:.2f}{unit}' value /= 1024.0 return f'{value:.2f}TB'
def make_unique(value, existing, suffix="_%s", counter=2, case=False): """ Returns a unique string, appending suffix % counter as necessary. @param existing collection of existing strings to check @oaram case whether uniqueness should be case-sensitive """ result, is_present = value, (lambda: result in existing) if not case: existing = [x.lower() for x in existing] is_present = lambda: result.lower() in existing while is_present(): result, counter = value + suffix % counter, counter + 1 return result
def no_c(my_string): """Remove all characters c and C from a string.""" copy = [x for x in my_string if x != 'c' and x != 'C'] return ("".join(copy))
def extract_diamonds(s): """Diamonds extractor.""" d = 0 o = 0 for c in s: if c == '<': o += 1 elif (c == '>') and (o > 0): d += 1 o -= 1 return d
def calculate_over_under(rolled_up): """ >>> calculate_over_under([{'name': 'Ximena', 'paid': 165}, {'name': 'Clara', 'paid': 159}, {'name': 'Cassidy', 'paid': 216}]) [{'name': 'Ximena', 'paid': 165, 'over_under': 15.0, 'owed': 180.0, 'percent_paid': 0.9166666666666666}, {'name': 'Clara', 'paid': 159, 'over_under': 21.0, 'owed': 180.0, 'percent_paid': 0.8833333333333333}, {'name': 'Cassidy', 'paid': 216, 'over_under': -36.0, 'owed': 180.0, 'percent_paid': 1.2}] """ owed = sum([rup["paid"] for rup in rolled_up]) / len(rolled_up) for rup in rolled_up: rup["over_under"] = owed - rup["paid"] rup["owed"] = owed rup["percent_paid"] = rup["paid"] / rup["owed"] return rolled_up
def psri(b3, b4, b6): """ Plant Senescence Reflectance Index (Merzlyak et al., 1999). .. math:: PSRI = (b4 - b3)/b6 :param b3: Green. :type b3: numpy.ndarray or float :param b4: Red. :type b4: numpy.ndarray or float :param b6: Red-edge 2. :type b6: numpy.ndarray or float :returns PSRI: Index value .. Tip:: Merzlyak, M.N.; Gitelson, A.A.; Chivkunova, O.B.; Rakitin, V.Y. 1999. \ Non-destructive optical detection of pigment changes during leaf \ senescence and fruit ripening. Physiologia Plantarum 106, 135-141. \ doi:10.1034/j.1399-3054.1999.106119.x. """ PSRI = (b4 - b3)/b6 return PSRI
def ordered_json_str(ordered_dict): """Dump a dict to a minified JSON string with ordered keys.""" kv_strs = [] # Keys must be iterated over in alphabetical order in order to have # deterministic string dump functionality. for key, val in sorted(ordered_dict.items()): kv_strs.append( '"{k}":{q}{v}{q}'.format( k=key, v=val, # Numeric values don't have quotes around them. Note that this falls # apart for JSON objects or arrays, but these keyfile attributes # only map to strings and ints, so we can take this shortcut. q=("" if isinstance(val, int) else '"'), ) ) return "{" + ",".join(kv_strs) + "}"
def repoNameToPath(repoName): """ Extracts the repo name from a full path """ return repoName.split("/")[-1]
def init_board(): """Function initialize the new empty board of the game.""" board = [] for _ in range(3): board_in = [] for _ in range(3): board_in.append(".") board.append(board_in) return board
def convert(atom_snippet): """Convert Atom snippet to UltiSnippet :data: Atom snippet :returns: UtilSnippet """ snippet = ('snippet %(trigger)s "%(description)s"\n' % { 'trigger': (atom_snippet['displayText']).replace(" ", ""), 'description': (atom_snippet['description']).replace("\"", "\'") } + '%s\n' % atom_snippet['snippet'] + 'endsnippet') return snippet
def modifyNames(sheetNames): """I change the name of the sheets in the xlsx file to match with the csv actual ones """ modifiedNames = sheetNames.copy() for i in range(len(modifiedNames)): if (modifiedNames[i] == "TotalAnnualMaxCapacityInvestmen"): modifiedNames[i] = "TotalAnnualMaxCapacityInvestment" elif (modifiedNames[i] == "TotalAnnualMinCapacityInvestmen"): modifiedNames[i] = "TotalAnnualMinCapacityInvestment" elif (modifiedNames[i] == "TotalTechnologyAnnualActivityLo"): modifiedNames[i] = "TotalTechnologyAnnualActivityLowerLimit" elif (modifiedNames[i] == "TotalTechnologyAnnualActivityUp"): modifiedNames[i] = "TotalTechnologyAnnualActivityUpperLimit" elif (modifiedNames[i] == "TotalTechnologyModelPeriodActLo"): modifiedNames[i] = "TotalTechnologyModelPeriodActivityLowerLimit" elif (modifiedNames[i] == "TotalTechnologyModelPeriodActUp"): modifiedNames[i] = "TotalTechnologyModelPeriodActivityUpperLimit" return modifiedNames
def dot(v1, v2): """the dot product of two vectors""" x1, y1, z1 = v1 x2, y2, z2 = v2 x = x1 * x2 y = y1 * y2 z = z1 * z2 return x + y + z
def convert_output_key(name): """ Convert output name into IE-like name :param name: output name to convert :return: IE-like output name """ if not isinstance(name, tuple): return name if len(name) != 2: raise Exception('stats name should be a string name or 2 elements tuple ' 'with string as the first item and port number the second') return '{}.{}'.format(*name)
def get_consensusbase(bases, mincov=3): """ :param mincov: :type bases: list """ bases = "".join(bases) a = bases.count('A') t = bases.count('T') c = bases.count('C') g = bases.count('G') n = bases.count("N") + bases.count('-') counts = [(a, 'A'), (t, 'T'), (c, 'C'), (g, 'G')] s_dic = sorted(counts, key=lambda x: x[0], reverse=True) max = s_dic[0] if max[0] < mincov: return "N" else: return max[1]
def mndbi(b8, b12): """ Modified Normalized Difference Built-up Index \ (Shingare, Hemane, and Dandekar, 2014). .. math:: MNDBI = (b12 - b8)/(b12 + b8) :param b8: NIR. :type b8: numpy.ndarray or float :param b12: SWIR 2. :type b12: numpy.ndarray or float :returns MNDBI: Index value .. Tip:: Shingare, P.P., Hemane, P.M., Dandekar, D.S., 2014. Fusion \ classification of multispectral and panchromatic image using \ improved decision tree algorithm. in: International Conference \ on Signal Propagation and Computer Technology (ICSPCT) 2014, \ pp. 598-603. doi:10.1109/ICSPCT.2014.6884944. """ MNDBI = (b12 - b8)/(b12 + b8) return MNDBI
def issn_is_in_data(data, issn: str, equal: bool): """Check if issn param is in any of the regular data ISSNs (print, electronic, link) if equal is True check exactly the value of issn param normally data have this structure { ... issn:{ p:"XXXX-YYYY", e:"XXXX-YYYY", l:"XXXX-YYYY" } ... } param: data: data JSON of journals param: issn: string to compare param: equal: bool :rtype: bool """ if issn and 'issn' in data: if equal: issn_p = issn.lower() == data['issn']['p'].lower() if 'p' in data['issn'] else False issn_e = issn.lower() == data['issn']['e'].lower() if 'e' in data['issn'] else False issn_l = issn.lower() == data['issn']['l'].lower() if 'l' in data['issn'] else False else: issn_p = issn.lower() in data['issn']['p'].lower() if 'p' in data['issn'] else False issn_e = issn.lower() in data['issn']['e'].lower() if 'e' in data['issn'] else False issn_l = issn.lower() in data['issn']['l'].lower() if 'l' in data['issn'] else False return issn_p or issn_e or issn_l return False
def cigar(individual): """Cigar test objective function. """ return individual[0]**2 + 1e6 * sum(gene * gene for gene in individual)
def convert_classes_to_indexes(labels, classes): """ Convert a list of labels representing classes to their corresponding indexes. More precisely, convert TripletDataset labels to the index of the class in the dataset, while keeping the current label for a FolderDataset dataset. :param labels: list of labels to convert. :param classes: list of all the classes in the dataset. """ if all([l in classes for l in labels]): labels = [classes.index(label) for label in labels] return labels
def min4(x): """ >>> min4(-2) -2.0 """ return min(1, 2.0, x, 14)
def GetProblemIndexFromKey(problems, problem_key): """Get a problem's index given its key and a problem list. Args: problems: Iterable of problems in the current contest. problem_key: String with the problem key that must be searched. Returns: The index of the requested problem in the problem list. If the problem is not found this method returns None. """ # Look at all the problems and return position of the first problem whose # key matches the looked key. for i, problem in enumerate(problems): if problem['key'] == problem_key: return i return None
def parse_number(f): """Parse numbers like 123.456 or 1,234M.""" if f[-1] == 'M': return int(float(f[:-1].replace(',', '.')) * 1e6) return int(f.replace('.', ''))
def mult_polynoms_x8_1_for_sbox(a, b): """Multiply two polynoms in F2[X] modulo X^8+1 NB. This is NOT constant-time and leaks secret values in timing differences. DO NOT USE THIS CODE TO IMPLEMENT SECURE APPLICATIONS """ m = 0 assert 0 <= a < 0x100 assert 0 <= b < 0x100 while b: if b & 1 == 1: m ^= a a <<= 1 if a & 0x100: a ^= 0x101 b >>= 1 assert 0 <= m < 0x100 return m
def estimate_bias_randomized_response_bool(prior, p): """estimates the bias of randomized response when the probability of returning the true answer is `p`, and the likelihood that each answer is given is held in `priors`. For example, say you have a prior that your survey question will be answered "yes" 90% of the time. You run randomized response with p = 0.5. Then on average, the randomized responses will exhibit a bias of -0.2. As in, randomized responses will be False 2% more often than in the real data. :returns the bias of the randomized response""" assert 0 <= prior <= 1 assert 0 <= p <= 1 expectation = p * prior + (1 - p) / 2 return expectation - prior
def is_in_parents_props(prop_name: str, parent_props) -> bool: """For every prop in parent check if that prop is same as the passed prop if so, stop searching and return found """ found = False i = 0 if len(parent_props) <= 0: return found while not found and i < len(parent_props): p = parent_props[i] if p.get("value", None) == prop_name or p.get("left", None) == prop_name: found = True i += 1 return found
def int_to_roman(input): """ Convert an integer to a Roman numeral. """ if not isinstance(input, type(1)): raise TypeError("expected integer, got %s" % type(input)) if input == 0: return '0' if not 0 < input < 4000: raise ValueError(f"Argument must be between 1 and 3999 {input}") ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1) nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I') result = [] for i in range(len(ints)): count = int(input / ints[i]) result.append(nums[i] * count) input -= ints[i] * count return ''.join(result)
def simple_hexdump(bytes_, row_size=16): """Convert a bytestring into hex characters. This is called "simple" because it doesn't print the index in the leftmost column or the printable characters in the rightmost column (as the CLI ``hexdump -C`` does). Args: bytes_ (bytes): The bytestring to convert. row_size (int): The number of bytes that should go in each row of output. If ``row_size`` is ``-1``, then all output will go in a single row. Returns: str: The hexdump of ``bytes_``. """ # NOTE: This utilizes the fact that iterating over a bytestring produces # the corresponding integers for each character. if row_size == -1: return " ".join(f"{c:02x}" for c in bytes_) rows = [] for i in range(0, len(bytes_), row_size): rows.append(" ".join(f"{c:02x}" for c in bytes_[i : i + row_size])) return "\n".join(rows)
def get_smallest_prime_factor(n): """ Returns the smallest integer that is a factor of `n`. If `n` is a prime number `None` is returned. Parameters ---------- n : int The integer to be factored. Returns ------- int or None The smallest integer that is a factor of `n` or None if `n` is a prime. Examples -------- >>> get_smallest_prime_factor(7) >>> get_smallest_prime_factor(8) 2 >>> get_smallest_prime_factor(9) 3 """ for i in range(2, n): if (n % i) == 0: return i return None
def is_prefix(pre_path, path): """Return True if pre_path is a path-prefix of path.""" pre_path = pre_path.strip('.') path = path.strip('.') return not pre_path or path.startswith(pre_path + '.')
def website_addr_for_run(run_id: str) -> str: """ Return the autumn-data.com URL for a given run_id """ app, region, ts, sha = run_id.split('/') return f"http://autumn-data.com/app/{app}/region/{region}/run/{ts}-{sha}.html"
def parse_bbox(bbox): """Given PDFMiner bbox info as a comma-delimited string, return it as a list of floats.""" return list(map(float, bbox.split(',')))
def optional_column( dictionary: dict, key: str, column_width: int = 0, alignment: str = "<" ): """Adds a value to a column, if the key exists in the dictionary and adds spaces of the appropriate width if not. Args: dictionary (dict): Dictionary with data inside key (str): Key of the data that is to be checked and added if present column_width (int): Number of spaces to be returned instead if the key is not present alignment (str): Specified alignment of column Returns: entry (str): Either the value of the entry to be put into the table, column_width number of spaces """ if key in dictionary: entry_string = str(dictionary[key]) if column_width > 0: entry = f"{entry_string:{alignment}{column_width}}" elif column_width == 0: entry = entry_string else: raise ValueError("Column width for optional column must be non-negative") else: entry = f" " * column_width return entry
def _extractRGBFromHex(hexCode): """ Extract RGB information from an hexadecimal color code Parameters: hexCode (string): an hexadecimal color code Returns: A tuple containing Red, Green and Blue information """ hexCode = hexCode.lstrip('#') # Remove the '#' from the string # Convert each byte into decimal, store it in a tuple and return return tuple(int(hexCode[i:i+2], 16) for i in (0, 2, 4))