content
stringlengths
42
6.51k
def _reverse_sort_by_order(name_to_order_node): """Sorts map of name_to_order_node nodes in reverse order. The output is such that the nodes in name_to_order_node are sorted in descending order of the "order" field. Args: name_to_order_node: Map from name to {order, node}. Output of graph_compute_order.get_compute_order(). Returns: sorted_name_to_order_node: Sorted version of the input, in descending order. """ return sorted(name_to_order_node.items(), key=lambda x: -x[1].order)
def get_shrinked_checkpoint_content(content, lines_to_keep)->str: """Shrink (reduce size) checkpoint files size""" i = 0 original_lines = content.splitlines() newlines = [] for line in reversed(original_lines): if i < lines_to_keep: newlines.append(line) i += 1 else: break #reached max lines to keep if len(newlines) < len(original_lines): newlines.append(original_lines[0]) # Add header line new_content = "\n".join(reversed(newlines)) return new_content
def list_first_second_neigh(node, dict_neighbours): """ Find the list of first and second nodes :param node: A node in the graph :param dict_neighbours: dict where keys is a node and value is a set of its node :return: Set of first and second neighbours """ neigh = dict_neighbours[node].copy() for n in list(neigh): set1 = dict_neighbours[n].copy() neigh = neigh.union(set1) return neigh
def to_precision(x, p): """ Returns a string representation of x formatted with a precision of p Based on the webkit javascript implementation taken from here: https://code.google.com/p/webkit-mirror/source/browse/JavaScriptCore/kjs/number_object.cpp Implemented in https://github.com/randlet/to-precision """ import math x = float(x) if x == 0.: return "0." + "0"*(p-1) out = [] if x < 0: out.append("-") x = -x e = int(math.log10(x)) tens = math.pow(10, e - p + 1) n = math.floor(x/tens) if n < math.pow(10, p - 1): e = e -1 tens = math.pow(10, e - p+1) n = math.floor(x / tens) if abs((n + 1.) * tens - x) <= abs(n * tens -x): n = n + 1 if n >= math.pow(10,p): n = n / 10. e = e + 1 m = "%.*g" % (p, n) if e < -2 or e >= p: out.append(m[0]) if p > 1: out.append(".") out.extend(m[1:p]) out.append('e') if e > 0: out.append("+") out.append(str(e)) elif e == (p -1): out.append(m) elif e >= 0: out.append(m[:e+1]) if e+1 < len(m): out.append(".") out.extend(m[e+1:]) else: out.append("0.") out.extend(["0"]*-(e+1)) out.append(m) return "".join(out)
def drop_lengthy_samples(words, prons, enc_maxlen, dec_maxlen): """We only include such samples less than maxlen.""" _words, _prons = [], [] for w, p in zip(words, prons): if len(w.split()) + 1 > enc_maxlen: continue if len(p.split()) + 1 > dec_maxlen: continue # 1: <EOS> _words.append(w) _prons.append(p) return _words, _prons
def powerset_table(alphabet): """ As most of FSA tools doesn't support symbols larger than one character, we have to use a one-symbol representation. Eg. : {a,b} is 0, {a,c} is 1, etc. This function generates a list. l[0] = 'ab', l[1]='ac', ... """ alphabet = list(set(alphabet)) table = [] for i in range(len(alphabet)): for j in range(i+1,len(alphabet)): table.append(alphabet[i]+alphabet[j]) return table
def _best_rect_ratio_match(bricks, size, margin): """ Sort and filter list of bricks based on target ration and margin 'margin' determines how easily a brick passes filtering. Results are sorted from best matching to worst before returning the list. """ def _ratio(_size): if _size[1] == 0: return 0 else: return _size[0] / _size[1] target_ratio = _ratio(size) bricks = filter(lambda x: abs(_ratio(x['dimensions']) - target_ratio) < target_ratio * margin, bricks) return sorted(bricks, key=lambda x: abs(_ratio(x['dimensions']) - target_ratio))
def obj_in_list(target_list, obj): """ >>> l = [1,2,3,4] >>> obj_in_list(l, 2) True >>> obj_in_list(l, 5) False """ return True if obj in target_list else False
def lower(word): """Sets all characters in a word to their lowercase value""" return word.lower()
def render_bulk_string_decode(value, completers=None): """Only for server group commands, no double quoted, displayed. Display use UTF-8. """ decoded = value.decode() splitted = "\n".join(decoded.splitlines()) return splitted
def get_warp(transforms): # grab the warp from the forward transforms """Return the transform1Warp.nii.gz file""" for name in transforms: if 'transform1Warp.nii.gz' in name: return name raise ValueError('transform1Warp.nii.gz not found')
def is_palindrome(n): """Tests if a number is a palindrome Args: n (Integer): Integer to test palindromomity Returns: Boolean: True if n is a palindrome, false otherwise """ # Turn the integer into a list of characters chars = list(str(n)) length = len(chars) # For the first half of characters and check for palindrome quality # Important: I know that I said half, and normally would have done that, # except that the division operation AND bit shifting # is more expensive than simply checking each character (for small n) for i in range(length): if chars[i] != chars[length - i - 1]: return False return True
def get_unique_fields(fld_lists): """Get unique namedtuple fields, despite potential duplicates in lists of fields.""" flds = [] fld_set = set([f for flst in fld_lists for f in flst]) fld_seen = set() # Add unique fields to list of fields in order that they appear for fld_list in fld_lists: for fld in fld_list: # Add fields if the field has not yet been seen if fld not in fld_seen: flds.append(fld) fld_seen.add(fld) assert len(flds) == len(fld_set) return flds
def rgba_to_hex(rgba): """Convert an RGBA tuple into an 8-digit hexadecimal RGBA number. Args: rgba (Tuple[ :obj:`int` ]): Tuple to convert to hex. Returns: :obj:`int`: RGBA hex. """ if not all(isinstance(n, int) and 0 <= n < 256 for n in rgba) or len(rgba) != 4: raise ValueError(f"{rgba!r} is not an RGBA tuple.") r, g, b, a = rgba return r << 24 | g << 16 | b << 8 | a
def severity_string_to_int(severity): """ Converts a severity string to DBot score representation :type severity: ``str`` :param severity: String representation of a severity :return: DBot score representation of the severity :rtype ``int`` """ if severity in ('Critical', 'High'): return 3 elif severity in ('Medium', 'Low'): return 2 return 0
def is_even(number): """ This function checks the even functionality returns: True if even, False Otherwise """ return (number%2 == 0)
def _date_range(data): """Return the first and last dates in data""" try: first = data[0]["date"] last = data[0]["date"] except KeyError: # /recent has no dates return None, None for row in data: date = row["date"] if date < first: first = date elif date > last: last = date return first, last
def _extract_language(locale_string): """ Extracts language from locale string. :param locale_string: Something like language_COUNTRY.encoding :return: language """ return locale_string.split("_")[0].lower()
def classic_factorial(value: int) -> int: """We always check base cases first""" if value < 2: return 1 else: """Recursive case(s) follow""" p1 = value - 1 r1 = classic_factorial(p1) return value * r1
def str_constraint(constraint, check_value, tolerance=0.1): """Validates a numeric constraint described by a string. The string can specify fixed value constraints such as "0.0" or range constraints such as "<3.0" or ">=10.0" """ check_greater_eq = True if ">=" in constraint else False check_less_eq = True if "<=" in constraint else False check_greater = True if ">" in constraint and not check_greater_eq else False check_less = True if "<" in constraint and not check_less_eq else False value = float(constraint.strip(">").strip("<").strip("=")) if check_greater: if check_value > value: return True elif check_less: if check_value < value: return True elif check_greater_eq: if check_value >= value: return True elif check_less_eq: if check_value <= value: return True else: if abs(check_value - value) < tolerance: return True return False
def merge_dicts(*dict_args): """Given any number of dicts, shallow copy and merge into a new dict, precedence goes to key value pairs in latter dicts. :param dict_args: List of dictionaries :return: Single dictionary created after merging all """ result = {} for dictionary in dict_args: result.update(dictionary) return result
def sort_results_by_unique(results): """Sort validation errors but number of unique occurrences.""" return sorted(results, reverse=True, key=lambda r: r['unique'])
def GetFirst(parameters, key, default=None): """Returns the first value of the given key. Args: parameters: A dictionary of lists, {key: [value1, value2]} key: name of parameter to retrieve default: value to return if the key isn't found Returns: The first value in the list, or default. """ if key in parameters: if parameters[key]: return parameters[key][0] return default
def is_printable(char: str) -> bool: """Determine whether a character is printable for our purposes. We mainly use Python's definition of printable (i. e. everything that Unicode does not consider a separator or "other" character). However, we also treat U+F8FF as printable, which is the private use codepoint used for the Apple logo character. """ return char.isprintable() or char == "\uf8ff"
def human_time(seconds): """ Returns a human-friendly time string that is always exactly 6 characters long. Depending on the number of seconds given, can be one of:: 1w 3d 2d 4h 1h 5m 1m 4s 15s Will be in color if console coloring is turned on. Parameters ---------- seconds : int The number of seconds to represent Returns ------- time : str A human-friendly representation of the given number of seconds that is always exactly 6 characters. """ units = [ ('y', 60 * 60 * 24 * 7 * 52), ('w', 60 * 60 * 24 * 7), ('d', 60 * 60 * 24), ('h', 60 * 60), ('m', 60), ('s', 1), ] seconds = int(seconds) if seconds < 60: return ' {0:2d}s'.format(seconds) for i in range(len(units) - 1): unit1, limit1 = units[i] unit2, limit2 = units[i + 1] if seconds >= limit1: return '{0:2d}{1}{2:2d}{3}'.format( seconds // limit1, unit1, (seconds % limit1) // limit2, unit2) return ' ~inf'
def site_tag_line(request, registry, settings): """Expose website URL from ``tm.site_tag_line`` config variable to templates. This is used on the default front page to catch the attention of audience. """ return settings["tm.site_tag_line"]
def _TransformCluster(resource): """Get Cluster ID from backup name.""" # backup name is in the format of: # projects/{}/instances/{}/clusters/{}/backups/{} backup_name = resource.get('name') results = backup_name.split('/') cluster_name = results[-3] return cluster_name
def combine_paths(paths, as_list=True): """combines path strings into a single string""" combined = "" first = True for path in paths: if not first: combined += " " combined += path first = False if as_list: return [combined] else: return combined
def arithmetic_mean(vals, freqs=None): """Returns arithmetic mean of vals.""" if freqs is None: return sum(vals)/float(len(vals)) else: return sum([v*i for v, i in zip(vals, freqs)])/sum(freqs)
def int_to_32bytes( i: int ) -> bytes: # used because int can't fit as bytes function's input """ :param i: :return: """ o = [0] * 32 for x in range(32): o[31 - x] = i & 0xFF i >>= 8 return bytes(o)
def wordlists(*wl): """ Input is arbitrary number of lists of strings. Output is one dictionary where each string is a key and the count of those strings is the value """ word_dict = {} for i in wl: for x in i: if x in word_dict: word_dict[x] += 1 else: word_dict[x] = 1 return word_dict
def is_shot(event): """ Verify whether or not the event is a shot. Sometimes, a play action can continue after a shot if the team gains again the ball. We account for this case by looking at the next events of the game. Parameters ---------- event: dict a dictionary describing the event Returns ------- True is the event is a shot False otherwise """ event_id = event['eventName'] return event_id == 10
def add_readgroups_to_header(header, readgroups): """Given a BAM header, and a list read groups, merge read groups to the input BAM header and return merged BAM header. ...doctest: >>> header = {'HD': {'SO': 'coordinate'}, 'SQ': [{'SN':'chr1', 'LN':100}, {'SN':'chr2', 'LN':200}]} >>> new_header = add_readgroups_to_header(header, [{'ID': 'rg1', 'SM': 'sample1', 'PU': 'movie1'}]) >>> rg = new_header['RG'][0] >>> rg['ID'], rg['SM'], rg['PU'] ('rg1', 'sample1', 'movie1') >>> new_header['HD'], new_header['SQ'][0], new_header['SQ'][1] ({'SO': 'coordinate'}, {'LN': 100, 'SN': 'chr1'}, {'LN': 200, 'SN': 'chr2'}) """ if not 'RG' in header: header['RG'] = [] header['RG'].extend(readgroups) return header
def find_place_num(n, m): """ """ if n==1 or m==1: return 1 else: return find_place_num(n-1, m) + find_place_num(n, m-1)
def is_riff(file_bytes): """Whether or not the given bytes represent a RIFF file. :params bytes file_bytes: The bytes of the file to check. :rtype: bool :return: ``True`` if the bytes represent a RIFF file, ``False`` else. """ return file_bytes.startswith(b"RIFF")
def indent(text: bytes, prefix: bytes): """Add 'prefix' to the beginning of all non-empty lines in 'text' Like textwrap.indent, but for bytes. """ return b"".join( (prefix + line if not line.isspace() else line) for line in text.splitlines(True) )
def escape(str): """Escape a string in a manner suitable for XML/Pango.""" return str.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
def oxford_comma(items, conjunction='and'): """Given a list of items, properly comma and 'and' or 'or' them together Expects `items` to be a list of strings """ result = '' if len(items) == 0: result = '' elif len(items) == 1: result = items[0] elif len(items) == 2: result = (' %s ' % conjunction).join(items) elif len(items) > 2: result = (', %s ' % conjunction).join([', '.join(items[:-1]), items[-1],]) else: raise Exception('oxford_comma: Illegal arguments') return result
def removeElements(head, val): """ :type head: ListNode :type val: int :rtype: ListNode """ current=head previous=None while current != None: if current.val == val: if previous == None: head=current.next else: previous.next=current.next current=current.next else: previous=current current=current.next return head
def sub_domains(l): """check the subdomains""" l= str(l) if l.count('.') <= 3: return 0 else: return 1
def _nbf(n, sigma): """ Internal helper function. Calculates the number of basis functions **nbf** from the half-image width **n** and the basis width parameter **sigma**. """ return int(round(n / sigma))
def escape(s): """Escape Discord formatting.""" return s\ .replace("_", "\_")\ .replace("~", "\~")\ .replace("*", "\*")\ .replace("`", "\`")
def update_config(default, variant): """Performs deep update on all dict structures from ``variant``, updating only individual fields. Any field in ``variant`` must be present in ``default``, else raises ``KeyError`` (helps prevent mistakes). Operates recursively to return a new dictionary.""" new = default.copy() for k, v in variant.items(): if k not in new: raise KeyError(f"Variant key {k} not found in default config.") if isinstance(v, dict) != isinstance(new[k], dict): raise TypeError(f"Variant dict structure at key {k} mismatched with" " default.") new[k] = update_config(new[k], v) if isinstance(v, dict) else v return new
def get_ptx_surface_access(geom_ptx): """ Operand b is a scalar or singleton tuple for 1d surfaces; is a two-element vector for 2d surfaces; and is a four-element vector for 3d surfaces, where the fourth element is ignored. Coordinate elements are of type .s32. For 1d surface arrays, operand b has type .v2.b32. The first element is interpreted as an unsigned integer index (.u32) into the surface array, and the second element is interpreted as a 1d surface coordinate of type .s32. For 2d surface arrays, operand b has type .v4.b32. The first element is interpreted as an unsigned integer index (.u32) into the surface array, and the next two elements are interpreted as 2d surface coordinates of type .s32. The fourth element is ignored. """ access_reg = { "1d" : "{%r{{[0-9]}}}", "2d" : "{%r{{[0-9]}}, %r{{[0-9]}}}", "3d" : "{%r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}}", "a1d" : "{%r{{[0-9]}}, %r{{[0-9]}}}", "a2d" : "{%r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}, %r{{[0-9]}}}", } return access_reg[geom_ptx]
def vsdi(b2, b4, b11): """ Visible and Shortwave Infrared Drought Index \ (Zhang et al., 2013). .. math:: VSDI = 1 - ((b11 - b2) + (b4 - b2)) :param b2: Blue. :type b2: numpy.ndarray or float :param b4: Red. :type b4: numpy.ndarray or float :param b11: SWIR 1. :type b11: numpy.ndarray or float :returns VSDI: Index value .. Tip:: Zhang, N., Hong, Y., Qin, Q., Liu, L. 2013. VSDI: a visible and \ shortwave infrared drought index for monitoring soil and vegetation \ moisture based on optical remote sensing. International Journal of \ Remote Sensing 34(13), 4585-4609. doi:10.1080/01431161.2013.779046. """ VSDI = 1 - ((b11 - b2) + (b4 - b2)) return VSDI
def slicify(slc, dim): """ Force a slice to have defined start, stop, and step from a known dim. Start and stop will always be positive. Step may be negative. There is an exception where a negative step overflows the stop needs to have the default value set to -1. This is the only case of a negative start/stop value. Parameters ---------- slc : slice or int The slice to modify, or int to convert to a slice dim : tuple Bound for slice """ if isinstance(slc, slice): # default limits start = 0 if slc.start is None else slc.start stop = dim if slc.stop is None else slc.stop step = 1 if slc.step is None else slc.step # account for negative indices if start < 0: start += dim if stop < 0: stop += dim # account for over-flowing the bounds if step > 0: if start < 0: start = 0 if stop > dim: stop = dim else: if stop < 0: stop = -1 if start > dim: start = dim-1 return slice(start, stop, step) elif isinstance(slc, int): if slc < 0: slc += dim return slice(slc, slc+1, 1) else: raise ValueError("Type for slice %s not recongized" % type(slc))
def parse_s3_url(url): """ Break a url into bucket and key """ from urllib.parse import urlparse up = urlparse(url) bucket = up.netloc key = up.path.lstrip('/') return bucket, key
def hamming_distance(lhs,rhs): """Returns the Hamming Distance of Two Equal Sequence/Strings """ return len([(x,y) for x,y in zip(lhs,rhs) if x !=y])
def is_upper(val): """Checks all upper case in the string""" return True if val.upper() == val else False
def get_thumbnail_path(book_id: int, gpx_name: str) -> str: """ Returns the path to the thumbnail file. Example: 'map/static_map/42/super_track.jpg' """ return "map/static_map/" + "/".join([str(book_id), gpx_name]) + ".jpg"
def lamost_default_dr(dr=None): """ Check if dr argument is provided, if none then use default :param dr: data release :type dr: Union(int, NoneType) :return: data release :rtype: int :History: 2018-May-13 - Written - Henry Leung (University of Toronto) """ # enforce dr5 restriction if dr is None: dr = 5 print(f'dr is not provided, using default dr={dr}') elif dr == 5: pass else: raise ValueError('Only LAMOST DR5 is supported') return dr
def get_route_and_nexthops_from_output(output, route, route_type): """ Library function to get the show dump for a route in the command "show ip route/show ipv6 route/show rib". :param output : Output of either of the show commands "show ip route/show ipv6 route/show rib" :type output : string :param route : Route which is of the format "Prefix/Masklen" :type route : string :param route_type : Route type which can be "static/BGP" :type route_type : string :return: string """ found_route = False found_nexthop = False # Spilt the output in lines lines = output.split('\n') # Output buffer for storing the route and its next-hops route_output = '' # Walk through all the lines for the output of # "show ip route/show ipv6 route/show rib" for line in lines: # If the route ("prefix/mask-length") is not found in the output # then try to find the route in the output. Otherwise the route # was already found and now try to check whether the next-hop # is of type 'route_type' if not found_route: # If the route ("prefix/mask-length") is found in the line # then set 'found_route' to 'True' and add the line to the # output buffer if route in line: found_route = True route_output = route_output + line + '\n' else: # If the route_type occurs in the next-hop line, # then add the next-hop line into the output buffer. if 'via' in line and route_type in line: route_output = route_output + line + '\n' found_nexthop = True else: # If the next-hop is not of type 'route_type', # then reset 'found_route' to 'False' if not found_nexthop: found_route = False route_output = '' else: break # Return the output buffer to caller return route_output
def is_python_file(fpath): # pragma: no cover """Naive Python module filterer""" return fpath.endswith(".py") and "__" not in fpath
def generate_ngram_successors_solution(text, N): """Splitting the .txt file into a list of sublists of size N (N-Grams) :param text: Parsed twitter messages as String :param N: Number of N :returns ngram_successors: """ # Initialize ngram_successors as a list ngram_successors = [] # Splitting up the input file into a list of strings words = text.split(" ") # Splitting all words into sublists of length N # and appending these to ngram_successors for i in range(len(words) - N + 1): ngram_successors.append(words[i : i + N]) return ngram_successors
def getstatusoutput(cmd): """Return (status, output) of executing cmd in a shell.""" import os pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r') text = pipe.read() sts = pipe.close() if sts is None: sts = 0 if text[-1:] == '\n': text = text[:-1] return ( sts, text)
def has_hex_prefix(hex_string: str) -> bool: """ Check if a hex string starts with hex prefix (0x). Parameters ---------- hex_string : The hexadecimal string to be checked for presence of prefix. Returns ------- has_prefix : `bool` Boolean indicating whether the hex string has 0x prefix. """ return hex_string.startswith("0x")
def expand_fqdn(fqdn: str): """ Moaaar recursive fuckery =D :param fqdn: A fully qualified domain name (this.is.sparta.com) :return: A tuple of fqdns based on the original (sparta.com, is.sparta.com, this.is.sparta.com) """ zones = str(fqdn).split('.') if len(zones) == 2: return ['.'.join((zones.pop(0), zones.pop(0)))] else: new_fqdn = '.'.join((x for x in zones[1:])) zones.pop(0) return expand_fqdn(new_fqdn) + [fqdn]
def merge_list_dictionaries(*dictionaries): """Merges dictionary list values from given dictionaries""" for addendum in dictionaries[1:]: for key in addendum: if key in dictionaries[0]: dictionaries[0][key] += addendum[key] else: dictionaries[0][key] = addendum[key] return dictionaries[0]
def calculate_cho(slope, intercept, power, cho_list): """function to calculate the CHO consumption""" # Calculate CHO consumption based on linear function cho = slope * power + intercept # scaled down from CHO per day to 1 hour cho = cho/24 # Add the calculated value to list cho_list.append(round(cho)) # Scale down to recording intervall of 1s cho = cho/60/60 # Return the cho conspumtion per s return cho
def frequency_maker(list_pair,hf_to_lf_ratio,order=1): """Allows the frequency distribution to be counterbalanced by item""" part_one, part_two = (0, 1) if order == 1 else (1, 0) local_list=[] local_list.extend(list_pair[part_one]*hf_to_lf_ratio) local_list.extend(list_pair[part_two]) return local_list
def removeObstacle(numRows, numColumns, lot): """ See shortestMazePath for more info. This is similar to shortestMazePath with slightly different conditions. 1 <= numRows, numColumns <= 1000 """ possible_paths = { 'left': [-1, 0], 'right': [1, 0], 'up': [0, 1], 'down': [0, -1] } numRows, numColumns, dist = len(lot), len(lot[0]), 0 queue = [(0, 0, lot[0][0])] # (x, y, val) visited = set() # Points already explored while queue: next = [] for x, y, val in queue: if val == 9: return dist if (x,y) not in visited: for x1, y1 in possible_paths.values(): nextX, nextY = x + x1, y + y1 if 0 <= nextX < numRows and 0 <= nextY < numColumns: next.append((nextX, nextY, lot[nextX][nextY])) visited.add((x,y)) queue = next dist += 1 return -1
def lon_to_xindex(lon, res=1): """ For a given longitude return the x index in a 1x1x5-day global grid :param lon: Longitude of the point :param res: resolution of the grid :type lon: float :type res: float :return: grid box index :rtype: integer The routine assumes that the structure of the SST array is a grid that is 360 x 180 x 73 i.e. one year of 1degree lat x 1degree lon data split up into pentads. The west-most box is at 180degrees W with index 0 and the northern most box also has index zero. Inputs on the border betwen grid cells are pushed east. """ if res == 1: inlon = lon if inlon >= 180.0: inlon = -180.0 + (inlon - 180.0) if inlon < -180.0: inlon = inlon + 360. xindex = int(inlon + 180.0) while xindex >= 360: xindex -= 360 return int(xindex) else: inlon = lon if inlon >= 180.0: inlon = -180.0 + (inlon - 180.0) if inlon < -180.0: inlon = inlon + 360. xindex = int((inlon + 180.0) / res) while xindex >= 360 / res: xindex -= 360 / res return int(xindex)
def main(x, y): """ Docstring """ result = x + y return result
def validate_str(str): """ This removes all the spaces, new line and tabs from a given string :param str: :return: """ return ''.join(str.split())
def getFrequencyAccuracy(l1): """ Returns accuracy as a percentage from a given list L1, such that accuracy is the frequency at which the majority item in the list occurs """ count_zeros = sum([1 for i in range(0, len(l1)) if l1[i] == 0]) count_ones = sum([1 for i in range(0, len(l1)) if l1[i] == 1]) return max(count_zeros / float(len(l1)), count_ones / float(len(l1)))
def to_msg_definition(identifier): """ Create message definition. """ return "SBP_" + identifier
def geometry_axis_bound(geometry, axis, bound): """Return value of axis-bound for given geometry. Args: geometry (arcpy.Geometry, None): Geometry to evaluate. Returns: float """ if not geometry: return None return getattr(geometry.extent, axis.upper() + bound.title())
def backslashedp(c): """ BACKSLASHEDP char BACKSLASHED? char """ ## outputs TRUE if the input character was originally entered into ## Logo with a backslash (\) before it or within vertical bars (|) ## to prevent its usual special syntactic meaning, FALSE ## otherwise. (Outputs TRUE only if the character is a ## backslashed space, tab, newline, or one of ()[]+-*/=<>\":;\\~?| ## ) ## @@: doesn't make sense for us. return False
def _strip_prefix(cmd_line): """Strip an OS operating prefix from a command line. """ if cmd_line.startswith('cmd.exe /c '): return cmd_line[11:].strip('"') if cmd_line.startswith('cmd /c '): return cmd_line[7:].strip('"') if cmd_line.startswith('/bin/bash -c '): return cmd_line[13:] if cmd_line.startswith('/bin/sh -c '): return cmd_line[11:] return cmd_line
def modelTemplate(promoter, decay=False): """ Nsteps basic linear pathway defined using tellurium """ antinom = '' if promoter is not None: if decay: antinom += """ model Prom_Upstream_Model() """ else: antinom += """ model Prom_Model() """ else: antinom += """ model Noprom_Model() """ antinom += """ // Compartments and Species: compartment Cell; species Substrate in Cell, Product in Cell, Enzyme in Cell; """ if promoter is not None: antinom += """ species Inducer in Cell; """ antinom += """ species Activated_promoter in Cell; // species Growth in Cell; // Biomass: Growth -> Substrate; Cell*Kgf*Growth - Cell*Kgr*Substrate // Decay: Growth -> ; Cell*Kd*Growth """ if decay: antinom += """ Substrate -> ; Cell*Kd*Substrate; """ antinom += """ // Reactions: //Induc: => Inducer; Cell*Constant_flux__irreversible(1); // See doi: https://doi.org/10.1101/360040 for modeling the induction using the Hill function """ if promoter is not None: antinom += """ // Induction: Inducer => Activated_promoter; Cell*Hill_Cooperativity(Inducer, Induction_Shalve, Induction_Vi, Induction_h); Induction: Inducer => Activated_promoter; Cell*Hill_Coop2(Inducer, Activated_promoter, Induction_n, Induction_kf1, Induction_kr1); """ antinom += """ Expression: Activated_promoter => Enzyme; Copy_number*Cell*Expression_k1*Activated_promoter; Leakage: => Enzyme; Cell*Constant_flux__irreversible(Leakage_vl); Degradation: Enzyme => ; Cell*Degradation_k2*Enzyme; Catalysis: Substrate => Product; Cell*Henri_Michaelis_Menten__irreversible(Substrate, Enzyme, Catalysis_Km, Catalysis_kcat); // Species initializations: Substrate = 0.5*1e-9; Product = 0; Enzyme = 0; """ if promoter is not None: antinom += """ Inducer = 1e-2; """ if decay: antinom += """ Kd = 1e-4; """ antinom += """ Activated_promoter = 0; Copy_number = 1; // Compartment initializations: Cell = 1; // Growth = 1;pathSim.PlotResponse() // Variable initializations: // Induction_Shalve = 1e-1; // Induction_Vi = 1e7; // Induction_h = 1.85; Induction_n = 1.85; Induction_kf1 = 1e3; Induction_kr1 = 1e-1; Expression_k1 = 1e6; Leakage_vl = 0; Degradation_k2 = 1e-6; Catalysis_Km = 0.1; Catalysis_kcat = 0.1; Kgf = 5; Kgr = 1; // Other declarations: const Cell; end """ return antinom
def _publications_urls(request, analyses): """Return set of publication URLS for given analyses. Parameters ---------- request HTTPRequest analyses seq of Analysis instances Returns ------- Set of absolute URLs (strings) """ # Collect publication links, if any publication_urls = set() for a in analyses: surface = a.related_surface if surface.is_published: pub = surface.publication pub_url = request.build_absolute_uri(pub.get_absolute_url()) publication_urls.add(pub_url) return publication_urls
def get_neighbour_list(r, c, i, j): """ :param r: max row :param c: max col :param i: current row :param j: current col :return: list of list """ neighbours = [] # up : i - 1, j if 0 <= i - 1 < r and 0 <= j < c: neighbours.append((i - 1, j)) # down : i + 1, j if 0 <= i + 1 < r and 0 <= j < c: neighbours.append((i + 1, j)) # left : i, j - 1 if 0 <= i < r and 0 <= j - 1 < c: neighbours.append((i, j - 1)) # right : i, j + 1 if 0 <= i < r and 0 <= j + 1 < c: neighbours.append((i, j + 1)) return neighbours
def cxSet(ind1, ind2): """Apply a crossover operation on input sets. The first child is the intersection of the two sets, the second child is the difference of the two sets. """ temp = set(ind1) # Used in order to keep type ind1 &= ind2 # Intersection (inplace) ind2 ^= temp # Symmetric Difference (inplace) return ind1, ind2
def max_safe(iterable, default=0): """Helper wrapper over builtin max() function. This function is just a wrapper over builtin max() w/o ``key`` argument. The ``default`` argument specifies an object to return if the provided ``iterable`` is empty. Also it filters out the None type values. :param iterable: an iterable :param default: 0 by default """ try: return max(x for x in iterable if x is not None) except ValueError: # TypeError is not caught here as that should be thrown. return default
def flatten(lst, out=None): """ @return: a flat list containing the leaves of the given nested list. @param lst: The nested list that should be flattened. """ if out is None: out = [] for elt in lst: if isinstance(elt, (list, tuple)): flatten(elt, out) else: out.append(elt) return out
def convert_all(string: str) -> int: """ Input is the binary representation of a number. R or B means 1. L or F means 0. Return the value of this binary representation """ return int("".join("1" if char in ("R", "B") else "0" for char in string), 2)
def second_insertion_sort(numbers): """Non recursive second implementation of insertion sort. This one contains the first optimization which checks if previous element is greater than current. iteration counter will be present to show the difference between each implementation. Args: numbers (list): list of integers to be sorted. Returns: numbers (list): sorted list. iterations (int): number of iterations the algorithm took to sort the list. Args: numbers (list): list of integers to be sorted. Returns: numbers (list): sorted list. """ size = len(numbers) iterations = 0 for i in range(size): iterations += 1 j = i while j > 0 and numbers[j] < numbers[j - 1]: iterations += 1 numbers[j], numbers[j - 1] = numbers[j - 1], numbers[j] j -= 1 return numbers, iterations
def build_message(cmd, data): """ Gets command name (str) and data field (str) and creates a valid protocol message Returns: str, or None if error occured """ if len(cmd)>16 or len(data)>9999: return None spaces=' ' zeos="0000" full_msg=cmd +spaces[0:16-len(cmd)]+"|"+zeos[0:4-len(str(len(data)))]+ str(len(data))+"|"+data return full_msg
def parse_paired(target): """ Based on Makefile target name, parse whether the data is paired-end """ return target.startswith('r12')
def _create_arguments_dictionary(parameter_extractors, environment, context): """Creates a dictionary of arguments (to pass to an action function) using the parameters and the parameter_extractor_mapping as a guide and extracts the values from environment and context. Arguments: parameter_extractors -- A list of functions used to extract and prepare the parameters. Each of these functions should: 1. Accept 2 arguments: the TrailEnvironment and context. If there is no context, None will be passed as an argument. 2. They must return a dictionary whose key is one of the parameters. The dictionary created from all of the parameter_extractors is then used as kwargs to the function_or_method. See example usage for more clarity. environment -- The TrailEnvironment object passed to an action function. context -- The user-defined context object passed to the action function. Returns: A dictionary containing the values retrieved from environment and context using each of the functions from parameter_extractors. The dictionary is of the form: { '<Parameter>': <Value>, } """ arguments = {} for parameter_extractor in parameter_extractors: arguments.update(parameter_extractor(environment, context)) return arguments
def get_job_locations_from_db(loc_list): """Get the number of jobs by country as a dictionary.""" countries = {} for loc in loc_list: country = loc.split()[-1].lower() if country == 'usa' or country == 'states' or country == 'us': countries.setdefault('usa', 0) countries['usa'] += 1 elif country == 'uk' or country == 'kingdom' or country == 'england': countries.setdefault('uk', 0) countries['uk'] += 1 else: countries.setdefault(country, 0) countries[country] += 1 return countries
def _cuda_dot_product(a, b): """Calculate the dot product between two 1D arrays of length 3. Parameters ---------- a : numba.cuda.cudadrv.devicearray.DeviceNDArray b : numba.cuda.cudadrv.devicearray.DeviceNDArray Returns ------- float """ return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
def connect_string(config): """return connect string""" return "DATABASE={};HOSTNAME={};PORT={};PROTOCOL=TCPIP;UID={};PWD={};".format( config['db_name'], config['server'], config['server_port'], config['username'], config['password'])
def str_to_frame_data(s) -> list: """Convert string to frame data.""" return [int(s[i : i + 2], 16) for i in range(0, len(s), 2)]
def _encode(data, name='data'): """Call data.encode("latin-1") but show a better error message.""" try: return data.encode("latin-1") except UnicodeEncodeError as err: raise UnicodeEncodeError( err.encoding, err.object, err.start, err.end, "%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') " "if you want to send it encoded in UTF-8." % (name.title(), data[err.start:err.end], name)) from None
def _clean_up_pool_args(args): """ A helper function to clean up common arguments in conv and pooling ops. """ assert isinstance(args, dict) if 'stride_h' in args and 'stride_w' in args: assert 'stride' not in args and 'strides' not in args args['strides'] = [args['stride_h'], args['stride_w']] args.pop('stride_h') args.pop('stride_w') elif 'stride' in args: args['strides'] = [args['stride'], args['stride']] args.pop('stride') # rename 'kernel', 'kernels', to 'kernel_shape' if 'kernel_h' in args and 'kernel_w' in args: assert 'kernel' not in args and 'kernels' not in args args['kernel_shape'] = [args['kernel_h'], args['kernel_w']] args.pop('kernel_h') args.pop('kernel_w') elif 'kernel' in args: args['kernel_shape'] = [args['kernel'], args['kernel']] args.pop('kernel') elif 'kernels' in args: args['kernel_shape'] = args['kernels'] args.pop('kernels') if 'pad_t' in args and 'pad_l' in args and 'pad_b' in args and 'pad_r' in args: assert 'pad' not in args and 'pads' not in args args['pads'] = [ args['pad_t'], args['pad_l'], args['pad_b'], args['pad_r'] ] for pad in ['pad_t', 'pad_l', 'pad_b', 'pad_r']: args.pop(pad) elif 'pad' in args: args['pads'] = [args['pad'], args['pad']] args.pop('pad') if 'dilation_h' in args and 'dilation_w' in args: assert 'dilation' not in args and 'dilations' not in args args['dilations'] = [args['dilation_h'], args['dilation_w']] args.pop('dilation_h') args.pop('dilation_w') elif 'dilation' in args: args['dilations'] = [args['dilation'], args['dilation']] args.pop('dilation') return args
def check_claim_attrs(claim_attrs, expected_claim): """ Check if field 'attrs' in gotten claim matches with expected claim json. :param claim_attrs: value of field 'attrs' in gotten claim. :param expected_claim: :return: True of False. """ for key in expected_claim.keys(): if claim_attrs[key] != expected_claim[key][0]: return False return True
def _get_hex_id(replay_id): """ from a replay id (64-bit int) get the appropriate hex notation (zero-padded 16 char int) regardless of decimal or hex input """ # replay_id is 64bit int (can be decimal or hex) # check length to find out, hex should be 16 chars (64 bit) if len(replay_id) == 16: replay_id = int(replay_id, 16) else: replay_id = int(replay_id) # convert to hex replay_id_hex = "{:016x}".format(replay_id) return replay_id_hex
def clearBit(int_type: int, offset: int) -> int: """clearBit() returns an integer with the bit at 'offset' cleared.""" mask = ~(1 << offset) return int_type & mask
def faction_type(faction): """Determines single character representation of faction.""" if faction == 'autobots': return 'A' elif faction == 'decepticons': return 'D'
def find_next_square2(sq: int) -> int: """ This version is just more compact. """ sqrt_of_sq = sq ** (1/2) return -1 if sqrt_of_sq % 1 != 0 else int((sqrt_of_sq + 1) ** 2)
def capitalise_first_letter(old_string): """ Really simple method to capitalise the first character of a string. Args: old_string: The string to be capitalised Returns: new_string: The capitalised string """ if len(old_string) > 0: return old_string[0].upper() + old_string[1:] else: return old_string
def inverse_dict(dic): """Accepts dictionary, returns dictionary where keys become values, and values become keys""" new_dict = {} for k, v in dic.items(): new_dict[v] = k return new_dict
def contains_tlvs(sub_tlvs, tlv_types): """Verify if all types of tlv in a list are included in a sub-tlv list. """ return all((any(isinstance(sub_tlv, tlv_type) for sub_tlv in sub_tlvs)) for tlv_type in tlv_types)
def stringcomp (fx, fy): """ Return a number within C{0.0} and C{1.0} indicating the similarity between two strings. A perfect match is C{1.0}, not match at all is C{0.0}. This is an implementation of the string comparison algorithm (also known as "string similarity") published by Qi Xiao Yang, Sung Sam Yuan, Li Zhao, Lu Chun and Sun Peng in a paper called "Faster Algorithm of String Comparison" ( http://front.math.ucdavis.edu/0112.6022 ). Please note that, however, this implementation presents some relevant differences that will lead to different numerical results (read the comments for more details). @param fx: A C{string}. @param fy: A C{string}. @return: A float with the value of the comparision between C{fx} and C{fy}. C{1.0} indicates a perfect match, C{0.0} no match at all. @rtype: C{float} """ # get the smaller of 'n' and 'm', and of 'fx' and 'fy' n, m = len(fx), len(fy) if m < n: (n, m) = (m, n) (fx, fy) = (fy, fx) # Sum of the Square of the Number of the same Characters ssnc = 0. # My implementation presents some relevant differences to the pseudo-code # presented in the paper by Yang et al., which in a number of cases will # lead to different numerical results (and, while no empirical tests have # been perfomed, I expect this to be slower than the original). # The differences are due to two specific characteristcs of the original # algorithm that I consider undesiderable for my purposes: # # 1. It does not takes into account the probable repetition of the same # substring inside the strings to be compared (such as "you" in "where # do you think that you are going?") because, as far as I was able to # understand, it count only the first occurence of each substring # found. # 2. It does not seem to consider the high probability of having more # than one pattern of the same length (for example, comparing between # "abc1def" and "abc2def" seems to consider only one three-character # pattern, "abc"). # # Demonstrating the differences between the two algorithms (or, at least, # between my implementation of the original and the revised one): # # "abc1def" and "abc2def" # Original: 0.534 # Current: 0.606 for length in range(n, 0, -1): while True: length_prev_ssnc = ssnc for i in range(len(fx)-length+1): pattern = fx[i:i+length] pattern_prev_ssnc = ssnc fx_removed = False while True: index = fy.find(pattern) if index != -1: ssnc += (2.*length)**2 if fx_removed == False: fx = fx[:i] + fx[i+length:] fx_removed = True fy = fy[:index] + fy[index+length:] else: break if ssnc != pattern_prev_ssnc: break if ssnc == length_prev_ssnc: break return (ssnc/((n+m)**2.))**0.5
def use_list_comprehension(letters): """ >>> import string >>> use_list_comprehension(string.ascii_letters) # doctest: +ELLIPSIS [97, 98, 99, ..., 88, 89, 90] """ return [ord(i) for i in letters]
def fib(n, i=0, a=0, b=1): """Return the nth fibonacci number. >>> fib(6) 8 >>> [fib(n) for n in range(6)] [0, 1, 1, 2, 3, 5] >>> fib(-1) Traceback (most recent call last): ... ValueError: n must be >= 0 """ if not n >= 0: raise ValueError("n must be >= 0") if i < n: return fib(n, i+1, b, a+b) return a
def api_url(domain): """Returns the Freshbooks API URL for a given domain. >>> api_url('billing.freshbooks.com') 'https://billing.freshbooks.com/api/2.1/xml-in' """ return "https://%s/api/2.1/xml-in" % (domain, )
def check_lost(positions): """Checks if any blocks are above the screen.""" for pos in positions: x, y = pos if y < 1: return True return False
def to_deg(value, loc): """convert decimal coordinates into degrees, munutes and seconds tuple Keyword arguments: value is float gps-value, loc is direction list ["S", "N"] or ["W", "E"] return: tuple like (25, 13, 48.343 ,'N') """ if value < 0: loc_value = loc[0] elif value > 0: loc_value = loc[1] else: loc_value = "" abs_value = abs(value) deg = int(abs_value) t1 = (abs_value-deg)*60 min = int(t1) sec = round((t1 - min)* 60, 5) return (deg, min, sec, loc_value)