content
stringlengths
42
6.51k
def convert_dictionary_list(dict_list): """Convert a dictionary to separated lists of keys and values. Convert the list of items of the given dictionary (i.e. pairs key, value) to a set of columns containing the keys and a set of rows containing the values. dict_list[in] Dictionary with a list of items to convert Returns tuple - (columns, rows) """ cols = [] rows = [] # First, get a list of the columns for node in dict_list: for key in node.keys(): if key not in cols: cols.append(key) # Now form the rows replacing missing columns with None for node in dict_list: row = [] for col in cols: row.append(node.get(col, None)) rows.append(row) return (cols, rows)
def image_proc_normalize( image ): """ Normalize image pixel component values to 0-1 range. """ output_image = [] for row in image: output_row = [] for pixel in row: if isinstance( pixel, float ) or isinstance( pixel, int ): output_row.append( pixel / 255 ) else: output_row.append( [component / 255 for component in pixel] ) output_image.append( output_row ) return output_image
def remove_multiple_newlines(content): """ mdoc format gives a warning if multiple newlines are put into a document. To suppress this warning, we strip multiple newlines :returns: str """ flt = content # remove double newlines flt = flt.replace('\n\n\n\n', '\n') flt = flt.replace('\n\n\n', '\n') flt = flt.replace('\n\n', '\n') return flt
def istradiationalfloat(value): """ Checks if the string can be converted to a floating point value Does not allow for fortran style floats, i.e -2.34321-308 only standard floats. """ try: float(value) return True except ValueError: return False
def get_option_usage_string(name, option): """Returns a usage string if one exists else creates a usage string in the form of: -o option_name OPTIONNAME """ usage_str = option.get("usage") if not usage_str: usage_str = f"-o {name} {str.upper(name.replace('_',''))}" return usage_str
def convert_idx(text, tokens): """ Calculates the coordinates of each start end spans of each token. :param text: The text to extract spans from. :param tokens: The tokens of that text. :return: A list of spans. """ current = 0 spans = [] for token in tokens: current = text.find(token, current) if current < 0: print("Token {} cannot be found".format(token)) raise Exception() spans.append((current, current + len(token))) current += len(token) return spans
def load_custom_overrides(_, __, value): """Load custom overrides from user Parameters ---------- value : list list of str (override_name=override_value) Returns ------- dict user overrides """ user_overrides = {} for override in value: override_name, override_value = override.split('=') user_overrides[override_name] = override_value return user_overrides
def query_position(record, gene: str, reference_position: int): """ Given a position on the reference, return the same position but relative to the full query sequence. """ ref_pos = record[gene + "_germline_start"] - 1 query_pos = record[gene + "_sequence_start"] - 1 # Iterate over alignment columns if ref_pos == reference_position: return query_pos sequence_alignment = record[gene + "_sequence_alignment"] germline_alignment = record[gene + "_germline_alignment"] for ref_c, query_c in zip(germline_alignment, sequence_alignment): if ref_c != "-": ref_pos += 1 if query_c != "-": query_pos += 1 if ref_pos == reference_position: return query_pos return None
def normalize (bbox): """ Normalizes EPSG:4326 bbox order. Returns normalized bbox, and whether it was flipped on horizontal axis. """ flip_h = False bbox = list(bbox) while bbox[0] < -180.: bbox[0] += 360. bbox[2] += 360. if bbox[0] > bbox[2]: bbox = (bbox[0],bbox[1],bbox[2]+360,bbox[3]) #bbox = (bbox[2],bbox[1],bbox[0],bbox[3]) if bbox[1] > bbox[3]: flip_h = True bbox = (bbox[0],bbox[3],bbox[2],bbox[1]) return bbox, flip_h
def error_porcentual(aceptado: float, experimental: float) -> str: """ Calcular error porcentual de un resultado experimental obtenido con respecto al aceptado """ return "{:.2f}%".format(((aceptado - experimental) / aceptado) * 100)
def successors(x, y, X, Y): """ Return a dict of {state: action} pairs describing what can be reached from the (x, y) state, and how. Note, tuple is hashable. """ assert x <= X and y <= Y # (x, y) is glass levels; X and Y are glass sizes # 1. x pour to y, post-state: (0, y+x) # 2. y pour to x, post-state: (y+x, 0) # 3. fill x, post-state: (X, y) # 4. empty x, post-state: (0, y) # 5. fill y, post-state: (x,Y), # 6. empty y, post-state: (x, 0) return {((0, y+x) if y + x <= Y else (x-(Y-y), y+(Y-y))):'X->Y', ((x+y, 0) if x + y <= X else (x+(X-x), y-(X-x))):'X<-Y', (X, y): 'fill X', (x, Y): 'fill Y', (0, y): 'empty X', (x, 0): 'empty Y'}
def get_region(page_number, polygon, text): """ :param page_number: OCR page number :param polygon: The VOTT polygon value for the field :return: The populated json attributes """ bounding_boxes = [] bounding_boxes.append(polygon) return { "page": page_number, "text": text, "boundingBoxes": bounding_boxes }
def make_franken_headers(environ): """Takes a WSGI environ, returns a dict of HTTP headers. https://www.python.org/dev/peps/pep-3333/#environ-variables """ headers = [(k[5:], v) for k, v in environ.items() if k[:5] == b'HTTP_'] headers.extend( (k, environ.get(k, None)) for k in (b'CONTENT_TYPE', b'CONTENT_LENGTH') ) return dict((k.replace(b'_', b'-'), v) for k, v in headers if v is not None)
def get_lower(somedata): """ Handle Python 2/3 differences in argv encoding """ result = "" try: result = somedata.decode("utf-8").lower() except: result = somedata.lower() return result
def get_octagonals(num): """ Get the first n octagonal numbers. """ return [int(i * (3 * i - 2)) for i in range(1, num + 1)]
def length_units_from_node(node): """Returns standard length units string based on node text, or 'unknown'.""" if node is None or node.text == '' or node.text == '\n': return 'unknown' else: return node.text.strip()
def snake_to_camel(snake_str): """ :param snake_str: :return: """ components = snake_str.split('_') # We capitalize the first letter of each component except the first one # with the 'title' method and join them together. return components[0] + ''.join(x.title() for x in components[1:])
def add_content_below(text: str, path: str, line=3) -> bool: """Add additional content (like binder button) to existing rst file :param text: Text that will be added inside rst file :type text: str :param path: Path to modified file :type path: str :param line: Line number that content will be added. Defaults to 3. :type line: int :returns: Informs about success or failure in modifying file :rtype: bool """ try: with open(path, "r+", encoding="utf-8") as file: current_file = file.readlines() current_file[line:line] = text file.seek(0) file.writelines(current_file) return True except FileNotFoundError: return False
def _process_content_range(content_range): """Convert a 'Content-Range' header into a length for the response. Helper for :meth:`Response.length`. :type content_range: str :param content_range: the header value being parsed. :rtype: int :returns: the length of the response chunk. """ _, _, range_spec = content_range.partition(' ') byte_range, _, _ = range_spec.partition('/') start, _, end = byte_range.partition('-') return int(end) - int(start) + 1
def noneorstr(s): """Turn empty or 'none' string to None.""" if s.lower() in ('', 'none'): return None else: return s
def update_age(row): """Update age with time passed since bl to current visit""" from datetime import datetime if row["session_id"] != "ses-M00": examdate = datetime.strptime(row["EXAMDATE"], "%Y-%m-%d") examdate_bl = datetime.strptime(row["EXAMDATE_bl"], "%Y-%m-%d") delta = examdate - examdate_bl updated_age = round( float(row["AGE"]) + (delta.days / 365.25), 1, ) else: updated_age = row["AGE"] return updated_age
def is_ob(x): """filter for valid 80-char observation records""" return not isinstance(x[0], Exception)
def simplify_alphabet(sequence): """Replace ambiguous amino acids. Some sequences are encoded with 'U', arbitrarily choose C as residue to replace any U (Selenocystein). Parameters: ---------- sequence: string, peptide sequences """ return sequence.replace("U", "C")
def ping(*args, **kw): """ Ping the lamps. """ # Here blink them return True
def remain(a, b): """Find remainder of two lists, sequences, etc., after intersection. Returns a list that includes repetitions if they occur in the inputs.""" return [e for e in a if e not in b]
def get_case(word, other_tag="O"): """ Detects the case of a given word. Parameters ---------- word: str A string representing a word. other_tag: str A string representing the class name for words that are not ALL_CAPS or FIRST_CAP (default: 'O'). Returns ------- str A character representing the case of the word. """ if word.isupper(): return "A" #ALL_CAPS elif word.istitle(): return "F" #FIRST_CAP else: return other_tag
def data_to_n(data): """Read initial one-, four- or eight-unit value from graph6 integer sequence. Return (value, rest of seq.)""" if data[0] <= 62: return data[0], data[1:] if data[1] <= 62: return (data[1] << 12) + (data[2] << 6) + data[3], data[4:] return ( (data[2] << 30) + (data[3] << 24) + (data[4] << 18) + (data[5] << 12) + (data[6] << 6) + data[7], data[8:], )
def createFeature(element): """ Trim dictionary representation of feature from featureXML output """ feature = {} keys = ["FWHM", "charge", "intensity", "overallquality", "label",\ "spectrum_index", "spectrum_native_id"] newkeys = ["FWHM", "Charge", "Intensity", "Overallquality", "Label",\ "spectrum_index", "spectrum_native_id"] for key, newkey in zip(keys, newkeys): feature[newkey] = element[key] feature["RT"] = element["position"][0]["position"] feature["mz"] = element["position"][1]["position"] hullX = [point["x"] for point in element["convexhull"][0]["pt"]] feature["RTmin"] = min(hullX) feature["RTmax"] = max(hullX) return feature
def compute_percent_id(seq_1, seq_2): """Return % identity for two sequences.""" assert len(seq_1) == len(seq_2) # otherwise it is a bug matches = sum( [ 1 for i in range(len(seq_1)) if seq_1[i] == seq_2[i] and seq_1[i] != "N" and seq_2[i] != "N" and seq_1[i] != "-" and seq_2[i] != "-" ] ) length = len(seq_1.replace("N", "").replace("-", "")) # we ignore N's pid = matches * 100 / length if length != 0 else 0 return pid
def get_tool_names(tool_list): """ SUMMARY: creates a list (set) of unique tool names for searching, auto-suggestion, etc. INPUT: a list of two-item tuple (tool, platform) OUTPUT: a de-duplicated list (set) of tool names """ tool_names = [] for x in tool_list: tool_names.append(x[0]) return set(tool_names)
def diff(value_a, value_b, position_a, position_b): """ Retorna o menor valor entre dois valores """ if not value_a or not value_b: return 101, 0 if value_a < value_b: return position_a, round((value_b - value_a), 2) elif value_a > value_b: return position_b, round((value_a - value_b), 2) else: return 100, 0
def cstr2str(cs, encoding='utf8'): """Convert a null-terminated C-string bytes to a Python string""" return cs.split(b'\0', 1)[0].decode(encoding)
def calGasHourlySpaceVelocity(VoFlRa, ReVo): """ cal: gas hourly space velocity [1/h] args: VoFlRa: volumetric flowrate [m^3/h] ReVo: reactor volume [m^3] """ # try/except try: GaHoSpVe = VoFlRa/ReVo # res return GaHoSpVe except Exception as e: print(e)
def swap_count(list1: list, list2: list) -> float: """count the number of swaps required to transform array1 into array2""" L = list(list2) swaps = 0 for element in list(list1): ind = L.index(element) L.pop(ind) swaps += ind return swaps
def nested_dict_get(dictionary, *keys, **kargs): """Get an item from a nested dictionary. Checks if an item exists in a nested level by keys in a dictionary and if yes returns it. Otherwise return default. Parameters ---------- dictionary : dict The dictionary that is going to be parsed. keys : list(hashable) A list of keys. default : object, default=None The value to return, if the element is not found. Returns ------- dict_elem : object Returns either the dictionary element or the value in default. """ if len(kargs) == 1 and "default": default = kargs["default"] elif len(kargs) == 0: default = None else: raise TypeError('optional argument can only be "default"') element = dictionary for k in keys: if (k in element): element = element[k] else: return default return element
def stack_peek(vrt_stack): """the peek of a vrt stack """ return vrt_stack[-1]
def find_next_list_property(search_dict, fields_to_check): """Find first object in a dictionary where object[field] is a list""" for key, val in search_dict.items(): for field in fields_to_check: if field in val and isinstance(val[field], list): return key, field return None, None
def extract_id_from_uri(id_or_uri): """ Extract ID from the end of the URI Args: id_or_uri: ID or URI of the OneView resources. Returns: str: The string founded after the last "/" """ if '/' in id_or_uri: return id_or_uri[id_or_uri.rindex('/') + 1:] else: return id_or_uri
def _filter_id(dict_dirty: dict): """Remove id fields from resultset.""" return dict( filter( lambda elem: elem[0][-3:] != "_id" and elem[0] != "id", dict_dirty.items() ) )
def _format_servers_list_power_state(state): """Return a formatted string of a server's power state :param state: the power state number of a server :rtype: a string mapped to the power state number """ power_states = [ 'NOSTATE', # 0x00 'Running', # 0x01 '', # 0x02 'Paused', # 0x03 'Shutdown', # 0x04 '', # 0x05 'Crashed', # 0x06 'Suspended' # 0x07 ] try: return power_states[state] except Exception: return 'N/A'
def is_sklearn_pipeline(pipeline): """ checks whether pipeline is a sklearn pipeline :param pipeline: :return: """ # we do checks via strings, not isinstance in order to avoid a dependency on sklearn return type(pipeline).__name__ == 'Pipeline' and type(pipeline).__module__ == 'sklearn.pipeline'
def log_minor_tick_formatter(y: int, pos: float) -> str: """ Provide reasonable minor tick formatting for a log y axis. Provides ticks on the 2, 3, and 5 for every decade. Args: y: Tick value. pos: Tick position. Returns: Formatted label. """ ret_val = "" # The positions of major ticks appear to be skipped, so the numbering starts at 2 # Thus, to labe the 2, 3, and 5 ticks, we need to retun the label for the 0th, 1st, and # 3rd labels. values_to_plot = [0, 1, 3] # The values 2 - 9 are availble for the minor ticks, so we take the position mod 8 to # ensure that we are repeating the same labels over multiple decades. if (pos % 8) in values_to_plot: # "g" auto formats to a reasonable presentation for most numbers. ret_val = f"{y:g}" return ret_val
def rot_code(data): """ The rot_code function encodes/decodes data using string indexing :param data: A string :return: The rot-13 encoded/decoded string """ rot_chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] substitutions = [] # Walk through each individual character for c in data: # Walk through each individual character if c.isupper(): try: # Find the position of the character in # rot_chars list index = rot_chars.index(c.lower()) except ValueError: substitutions.append(c) continue # Calculate the relative index that is 13 # characters away from the index substitutions.append( (rot_chars[(index-13)]).upper()) else: try: # Find the position of the character in # rot_chars list index = rot_chars.index(c) except ValueError: substitutions.append(c) continue substitutions.append(rot_chars[((index-13))]) return ''.join(substitutions)
def format_timedelta(days: int = 0, hours: int = 0, minutes: int = 0, seconds: int = 0) -> str: """Returns a simplified string representation of the given timedelta.""" s = '' if days == 0 else f'{days:d}d' if hours > 0: if len(s) > 0: s += ' ' s += f'{hours:d}h' if minutes > 0: if len(s) > 0: s += ' ' s += f'{minutes:d}min' if seconds > 0 or len(s) == 0: if len(s) > 0: s += ' ' s += f'{seconds:d}sec' return s
def _GetHandlerFromRequest(request): """Safely extracts a request handler from a Request. Args: request: A webapp2.Request instance. Returns: The handler that corresponds to the given Request (which can be a class or method), or None if there is no such handler (e.g. 404's). """ route = getattr(request, 'route', None) if route is not None: return getattr(route, 'handler', None)
def fileparts(file): """ :param file: a filepath :return: the root, name, and extension of the pfile """ import os.path (root, ext) = os.path.splitext(file) (x, name) = os.path.split(root) if root==name: return ('', name, ext) else: return (root, name, ext)
def safe_int(val, default=None): """ Returns int() of val if val is not convertable to int use default instead :param val: :param default: """ try: val = int(val) except (ValueError, TypeError): val = default return val
def convert_to_hexbytes(i, sep=None): """ Convert an integer input to a hexadecimal string. Optionally separate converted bytes with a selected delimiter. """ hexval = f"{i:x}" if sep: return sep.join(x + y for x, y in zip(hexval[::2], hexval[1::2])) else: return hexval
def get_vars(triple): """Get variables in a triple pattern""" return set([v for k, v in triple.items() if v.startswith('?')])
def strip_and_get(val, fallback=""): """ Return the enclosing text or provided fallback value :param val: item to get text and strip whitespace :param fallback: fallback when val is None :return: stripped text value """ if val: return val.text.strip(' \t\n\r') else: return fallback
def get_value(obj, key): """Retrieve a value by key if exists, else return None.""" try: return obj[key] except KeyError: return
def li(list_, c): """ Returns an item of a list_, or false if this element doesn't exist. """ if len(list_) > c: return list_[c] return False;
def d(value: bytes) -> str: """ Decode a bytestring for interpolating into an error message. """ return value.decode(errors="backslashreplace")
def get_floating_latest(api_versions_list, preview_mode): """Get the floating latest, from a random list of API versions. """ api_versions_list = list(api_versions_list) absolute_latest = sorted(api_versions_list)[-1] trimmed_preview = [ version for version in api_versions_list if "preview" not in version ] # If there is no preview, easy: the absolute latest is the only latest if not trimmed_preview: return absolute_latest # If preview mode, let's use the absolute latest, I don't care preview or stable if preview_mode: return absolute_latest # If not preview mode, and there is preview, take the latest known stable return sorted(trimmed_preview)[-1]
def ExpandUidPool(uid_pool): """Expands a uid-pool definition to a list of uids. @param uid_pool: a list of integer pairs (lower, higher range boundaries) @return: a list of integers """ uids = set() for lower, higher in uid_pool: uids.update(range(lower, higher + 1)) return list(uids)
def get_managed_instance_group(project_id, name, healthcheck, instance_template_name, base_instance_name, zone): """ Generate a managed instance group resource. """ resource = { 'name': name, # https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers 'type': 'gcp-types/compute-v1:instanceGroupManagers', 'properties': { 'project': project_id, 'instanceTemplate': '$(ref.' + instance_template_name + '.selfLink)', 'baseInstanceName': base_instance_name, 'zone': zone, 'targetSize': 1, 'autoHealingPolicies': [ { 'healthCheck': '$(ref.' + healthcheck + '.selfLink)', 'initialDelaySec': 120 } ] } } return resource
def performance_info_table(channel, tx, ctle, dfe, jitter, plotting, total) -> str: """Return a string html table of the performance of each element from the last run.""" info_str = ( "<H2>Performance by Component</H2>\n" '<TABLE border="1">\n' ' <TR align="center">\n' " <TH>Component</TH><TH>Performance (Msmpls./min.)</TH>\n" ' </TR>\n <TR align="right">\n' ) info_str += f' <TD align="center">Channel</TD><TD>{channel}</TD>\n' info_str += ' </TR>\n <TR align="right">\n' info_str += f' <TD align="center">Tx Preemphasis</TD><TD>{tx}</TD>\n' info_str += ' </TR>\n <TR align="right">\n' info_str += f' <TD align="center">CTLE</TD><TD>{ctle}</TD>\n' info_str += ' </TR>\n <TR align="right">\n' info_str += f' <TD align="center">DFE</TD><TD>{dfe}</TD>\n' info_str += ' </TR>\n <TR align="right">\n' info_str += f' <TD align="center">Jitter Analysis</TD><TD>{jitter}</TD>\n' info_str += ' </TR>\n <TR align="right">\n' info_str += f' <TD align="center">Plotting</TD><TD>{plotting}</TD>\n' info_str += ' </TR>\n <TR align="right">\n' info_str += ' <TD align="center"><strong>TOTAL</strong></TD><TD><strong>%6.3f</strong></TD>\n' % (total) info_str += " </TR>\n</TABLE>\n" return info_str
def _convert_to_json(tweet_list): """ Convert a list of tweepy tweet objects to json format. Args: tweet_list (list): list of tweepy tweet objects Returns: modified list in json format """ tweet_list = [i._json for i in tweet_list] return tweet_list
def reverse_int(integer: int) -> int: """Reverse an integer.""" reversed_int = int(str(abs(integer))[::-1]) if integer < 0: reversed_int *= -1 return reversed_int if -(2**31) < reversed_int < 2**31 - 1 else 0
def get_binary_indexes(coordinates): """Generate binary coordinates.""" bin_coordinates = [] for coord in coordinates: coord = ("{:08b}".format(coord[0]), "{:08b}".format(coord[1]), "{:08b}".format(coord[2]),) bin_coordinates.append(coord) return bin_coordinates
def joinParameter(*args): """Joins dictionaries in a consitent way For multiple occurences of a key the value is defined by the first key : value pair. Arguments: *args: list of parameter dictonaries Returns: dict: the joined dictionary """ keyList = [x.keys() for x in args] n = len(args) keys = [] values = [] for i in range(n): values = values + [args[i][k] for k in keyList[i] if k not in keys] keys = keys + [k for k in keyList[i] if k not in keys] return {keys[i] : values[i] for i in range(len(keys))}
def time_at_timestamp(timestamp_ms: int) -> float: """Convert millisecond Epoch timestamp to Python float time value. Args: timestamp_ms: Epoch timestamp in milliseconds. Returns: Python time value in float. """ return float(timestamp_ms) / 1000.0
def get_split_up_spoon(matches): """Split up spoon code""" split_up_spoon = "" for match in matches: group = match.group() split_up_spoon += "{} ".format(group) return split_up_spoon
def get_bed_len(bed_line): """Return summ exons length.""" block_sizes = [int(x) for x in bed_line.split("\t")[10].split(",") if x] return sum(block_sizes)
def psy_const_of_psychrometer(psychrometer, atmos_pres): """ Calculate the psychrometric constant for different types of psychrometer at a given atmospheric pressure. Based on FAO equation 16 in Allen et al (1998). :param psychrometer: Integer between 1 and 3 which denotes type of psychrometer: 1. ventilated (Asmann or aspirated type) psychrometer with an air movement of approximately 5 m/s 2. natural ventilated psychrometer with an air movement of approximately 1 m/s 3. non ventilated psychrometer installed indoors :param atmos_pres: Atmospheric pressure [kPa]. Can be estimated using ``atm_pressure()``. :return: Psychrometric constant [kPa degC-1]. :rtype: float """ # Select coefficient based on type of ventilation of the wet bulb if psychrometer == 1: psy_coeff = 0.000662 elif psychrometer == 2: psy_coeff = 0.000800 elif psychrometer == 3: psy_coeff = 0.001200 else: raise ValueError( 'psychrometer should be in range 1 to 3: {0!r}'.format(psychrometer)) return psy_coeff * atmos_pres
def parantheses_delta(line): """ Return the number of opening minus the number of closing parantheses in LINE. Don't count those inside strings or chars. """ escaped = False in_squote = False in_dquote = False delta = 0 for c in line: if escaped: escaped = False elif in_dquote: if c == '\\': escaped = True elif c == '"': in_dquote = False elif in_squote: if c == '\\': escaped = True elif c == "'": in_squote = False elif c == '(': delta += 1 elif c == ')': delta -= 1 elif c == '"': in_dquote = True elif c == "'": in_squote -= True return delta
def get_game_url(season, game): """ Gets the url for a page containing information for specified game from NHL API. :param season: int, the season :param game: int, the game :return: str, https://statsapi.web.nhl.com/api/v1/game/[season]0[game]/feed/live """ return 'https://statsapi.web.nhl.com/api/v1/game/{0:d}0{1:d}/feed/live'.format(season, game)
def format_error_report(shift_errors): """ Format the error report for Slack """ error_report = '' for shift in shift_errors: error_report += 'Shift: {}\n'.format(shift['shift']['start_dt']) for error in shift['errors']: error_report += ' {}\n'.format(error) for warning in shift['warnings']: error_report += ' {}\n'.format(warning) error_report += '\n' return error_report
def Heaviside(x): """Heaviside function Theta(x)""" if x < 0: return 0 elif x > 0: return 1 return 0.5
def parse_resolution(resolution_string): """ Parse and raise ValueError in case of wrong format @param resolution_string string representing a resolution, like "128x128" @return resolution as a tuple of integers """ tokens = resolution_string.split('x') if len(tokens) != 2: raise ValueError return tuple(int(t) for t in tokens)
def pretty_print_cmdline(cmdline): """Pretty print a command line. Take a command line suitable to be passed to a Popen-like call and returns a string that represents it in a way that preserves the structure of arguments and can be passed to bash as is. More precisely, delimitate every item of the command line with single apstrophes and join all the arguments separating them with spaces. """ return " ".join(["'%s'" % (x.replace("'", "'\"'\"'")) for x in cmdline])
def human_readable_stat(timedelta_seconds, stat_locale: dict = {}): """ Transform a timedelta expressed in seconds into a human readable string Parameters ---------- timedelta_seconds Timedelta expressed in seconds stat_locale Dict mapping stat strings Returns ---------- string Human readable string """ timedelta_seconds = int(float(timedelta_seconds)) years = timedelta_seconds // 31104000 months = timedelta_seconds // 2592000 days = timedelta_seconds // 86400 hours = timedelta_seconds // 3600 % 24 minutes = timedelta_seconds // 60 % 60 seconds = timedelta_seconds % 60 if years > 0: return str(years) + stat_locale.get("year", "Y") if months > 0: return str(months) + stat_locale.get("month", "MO") if days > 0: return str(days) + stat_locale.get("day", "D") if hours > 0: return str(hours) + stat_locale.get("hour", "h") if minutes > 0: return str(minutes) + stat_locale.get("minute", "m") return str(seconds) + stat_locale.get("second", "s")
def time_independent_HMAC_compare(a, b): """ No-one likes timing attacks. This function should probably not be part of the public API, and thus will be deprecated in a future release to be replaced with a internal function. """ if len(a) != len(b): return False result = 0 a, b = a.lower(), b.lower() for x, y in zip(a, b): result |= ord(x) ^ ord(y) return result == 0
def split_list(self): """Divide a list by two and return two lists.""" half = len(self) // 2 return self[:half], self[half:]
def compute_output(t0, t1): """Compute the network's output based on the "time to first spike" of the two output neurons.""" if t0 is None or t1 is None: # If neither of the output neurons fired within the allotted time, # give a response which produces a large error. return -1.0 else: # If the output neurons fire within 1.0 milliseconds of each other, # the output is 1, and if they fire more than 11 milliseconds apart, # the output is 0, with linear interpolation between 1 and 11 milliseconds. response = 1.1 - 0.1 * abs(t0 - t1) return max(0.0, min(1.0, response))
def get_line_ending(line): """Return line ending.""" non_whitespace_index = len(line.rstrip()) - len(line) if not non_whitespace_index: return '' else: return line[non_whitespace_index:]
def get_processed_paths(image_id, paths_list): """Get processed paths from dictionary.""" dataset_type_list = paths_list # Set default dict to iterate through. for indx, item in enumerate(dataset_type_list): if item['id'] == image_id: path_subdict = dataset_type_list[indx] break else: # If the value was not found. raise KeyError(f"The item {image_id} was not found in the dataset.") return path_subdict
def _is_safe_size(n): """ Is the size of FFT such that FFTPACK can handle it in single precision with sufficient accuracy? Composite numbers of 2, 3, and 5 are accepted, as FFTPACK has those """ n = int(n) for c in (2, 3, 5): while n % c == 0: n /= c return (n <= 1)
def fourier_number( conductive_length_scale: float, conductivity: float, density: float, heat_capacity: float, time_scale: float, ) -> float: """ Calculates the Fourier coefficients based off the parameters passed in. The Fourier number is computed by: fourier_number = ( conductivity * time_scale ) / ( conductivy_length_scale ^ 2 * density * heat_capacity ) :param conductive_length_scale: The length scale over which conduction occurs, measured in meters. :param conductivity: The thermal conductivity of the material, measured in Watts per meter Kelvin. :param density: The density of the medium, measured in kilograms per meter cubed. :param heat_capacity: The heat capcity of the conductive medium, measured in Joules per kilogram Kelvin. :param time_scale: The time scale of the simulation being run, measured in seconds. :return: The Fourier number based on these values. """ f_num: float = (conductivity * time_scale) / ( # [W/m*K] * [s] heat_capacity # [J/kg*K] * density # [kg/m^3] * conductive_length_scale ** 2 # [m]^2 ) return f_num
def reverseString(st): """ Reverses a string """ return str(st[::-1])
def blendConcaveInc(d=0.0, u=1.0, s=1.0, h=1.0): """ blending function increasing concave d = delta x = xabs - xdr u = uncertainty radius of xabs estimate error s = tuning scale factor eq 3.12 returns blend """ d = float(d) u = float(u) s = float(s) m = (2 * s - u - d) if m == 0: return h if d >= s: return h else: b = h * (s - u) / m return b
def count_cigar_ops(cigar): """ For curious people: regexes are very slow for parsing CIGAR strings. cigar: Unicode """ b = 0 num_m, num_i, num_d = 0, 0, 0 for i in range(len(cigar)): if cigar[i] <= '9': continue # Check if there are no digits before the op char. assert(b < i) count = int(cigar[b:i]) op = cigar[i] b = i + 1 if op == 'D': num_d += count elif op == 'I': num_i += count elif op in ['M', '=', 'X']: num_m += count else: # pragma: no cover pass # pragma: no cover # Check if there are dangling ops. assert(b == len(cigar)) total_len = num_d + num_i + num_m return num_m, num_i, num_d, total_len
def cswap(swap, x_2, x_3): """shall be implemented in constant time (so NOT like it is now)""" if swap: tmp = x_2 x_2 = x_3 x_3 = tmp return (x_2, x_3)
def filter(case_ids, to_filter): """Filter cases. :param case_ids: Parameter list for pytest.mark.parametrize. :param to_filter: List of parameters to filter from case_ids. :return: Filtered case_ids. """ return [case for case in case_ids if case not in to_filter]
def bellman_quality_equation(reward, gamma, next_state_value): """ Bellman quality equation, simplified version Q(s,a) = R(s,a) + gamma * simga(T(s, a, s') * V(s')) """ return reward + gamma * next_state_value
def exactly_one_constraint(literals, encoding): """Given a list of literals and encoding information, returns a list of lines that implement the 'exactly one' constraint on the list of literals (that can then be inserted into a CNF file).""" return ['{} 0'.format(' '.join(literals))] + [ '-{} -{} 0'.format(literals[i], literals[j]) for i in range(len(literals)) for j in range(i + 1, len(literals)) ]
def decapitalize_first(string): """Returns string with its first character decapitalized (if any)""" return string[:1].lower() + string[1:]
def _sort_key_min_confidence_sd(sample): """Samples sort key by the min. confidence_sd.""" min_confidence_sd = float("+inf") for inference in sample["inferences"]: confidence_sd = inference.get("confidence_sd", float("+inf")) if confidence_sd < min_confidence_sd: min_confidence_sd = confidence_sd return min_confidence_sd
def rotcon2pmi(rotational_constant: float): """ Convert rotational constants in units of MHz to Inertia, in units of amu A^2. The conversion factor is adapted from: Oka & Morino, JMS (1962) 8, 9-21 This factor comprises h / pi^2 c. Parameters ---------- rotational_constant: Corresponding rotational constant in MHz Returns ------- Rotational constant converted to units of amu A^2 """ return 1 / (rotational_constant / 134.901)
def part1(data): """ The navigation subsystem syntax is made of several lines containing chunks. There are one or more chunks on each line, and chunks contain zero or more other chunks. Every chunk must open and close with one of four legal pairs of matching characters: If a chunk opens with (, it must close with ). If a chunk opens with [, it must close with ]. If a chunk opens with {, it must close with }. If a chunk opens with <, it must close with >. Some lines are incomplete, but others are corrupted. Find and discard the corrupted lines first. A corrupted line is one where a chunk closes with the wrong character To calculate the syntax error score for a line, take the first illegal character on the line: ): 3 points. ]: 57 points. }: 1197 points. >: 25137 points. What is the total syntax error score for those errors? """ syntax_error_score = 0 points = { ")": 3, "]": 57, "}": 1197, ">": 25137, } char_check = { "(": ")", "[": "]", "{": "}", "<": ">", } for line in data: found_illegal_char = False active_chunks = [] for char in line: if not found_illegal_char: if char in ["(", "[", "{", "<"]: active_chunks.append(char) else: if char == char_check[active_chunks[-1]]: active_chunks.pop(-1) else: print(f"Expected close to {active_chunks[-1]} but found {char}") found_illegal_char = True syntax_error_score += points[char] return syntax_error_score
def stringToBool( string ): """ Converts a string with the contents 'true' or 'false' to the appropriate boolean value. Examples: >>> stringToBool( 'true' ) True >>> stringToBool( 'false' ) False >>> stringToBool( 'True' ) Traceback (most recent call last): ... ValueError: can't convert to boolean: True """ if string == "true": return True elif string == "false": return False else: raise ValueError( "can't convert to boolean: %s" % string )
def calculate_labels(first_pass): """Given the first pass code, generate any labels""" address = 0 labels = {} for label, instruction in first_pass: if label: labels[label] = address address += len(instruction) return labels
def quick_sort(values): """simple quick sort implementation""" if len(values) == 0: return [] elif len(values) == 1: return values elif len(values) == 2: if values[0] > values[1]: return values[::-1] else: return values pivot = values[0] less_list = [x for x in values if x < pivot] more_list = [x for x in values if x > pivot] same_list = [x for x in values if x == pivot] # keep track of dupes less_list = less_list + same_list if len(more_list) == 0: more_list.append(less_list.pop()) return quick_sort(less_list) + quick_sort(more_list)
def _int_to_bin(i, n): """ Convert integer i to a list of {0,1} of length n """ return [int(x) for x in list('{0:0{1}b}'.format(i,n))]
def verify_metadata_version(metadata, version=None): """ Utility function to verify that the metadata has the correct version number. If no version number is passed, it will just extract the version number and return it. :param metadata: the content of an export archive metadata.json file :param version: string version number that the metadata is expected to have """ try: metadata_version = metadata['export_version'] except KeyError: raise ValueError("metadata is missing the 'export_version' key") if version is None: return metadata_version if metadata_version != version: raise ValueError("expected export file with version {} but found version {}".format(version, metadata_version)) return None
def factorial(n: int) -> int: """Compute the factorial of natural number n.""" if n in (1, 0): return 1 return n * factorial(n - 1)
def prepare_forecast_ready_message(event: dict): """ Prepare a message to notify users that forecasts are ready. :param file: the DatasetFile that was updated to trigger this message :return: message or none """ dataset_group = event.get("dataset_group_name") message = f"Forecast for {dataset_group} is ready!" return message
def count_vulns(data): """Count number of vulnerabilities in depscan output. Args: data (str): String representation of depscan "json" output Returns: [high, med, low]: Number of high/med/low severity vulnerabilities """ high = data.count('"severity": "HIGH"') high += data.count('"severity": "CRITICAL"') med = data.count('"severity": "MEDIUM"') low = data.count('"severity": "LOW"') return high, med, low
def extract_content(messages, msg_type): """Extract content from messages received from a kernel.""" return [ message['content'] for message in messages if message['header']['msg_type'] == msg_type]
def get_zhang_aspect_ratio(aspect_ratio): """Compute an equivalent aspect ratio according to Zhang. Parameters ---------- aspect_ratio : float Aspect ratio of a cylindrical fiber. Returns ------- float Equivalent aspect ratio for an ellipsoid. References ---------- .. [1] Zhang, D.; Smith, D.E.; Jack, D.A.; Montgomery-Smith, S., 'Numerical Evaluation of Single Fiber Motion for Short-Fiber-Reinforced Composite Materials Processing.' J. Manuf. Sci. Eng. 2011, 133, 51002. https://doi.org/10.1115/1.4004831 """ return ( 0.000035 * aspect_ratio ** 3 - 0.00467 * aspect_ratio ** 2 + 0.764 * aspect_ratio + 0.404 )