content
stringlengths
42
6.51k
def _get_connect_string(backend, user="openstack_citest", passwd="openstack_citest", database="openstack_citest"): """Return connect string. Try to get a connection with a very specific set of values, if we get these then we'll run the tests, otherwise they are skipped. """ if backend == "postgres": backend = "postgresql+psycopg2" return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % {'backend': backend, 'user': user, 'passwd': passwd, 'database': database})
def round_complex(elem: complex, precision: int) -> complex: """ Returns the complex 'elem' rounded to 'precision' """ return complex(round(elem.real, precision), round(elem.imag, precision))
def _filter(names, spec): """Remove names from list of names.""" if isinstance(spec, str): names.remove(spec) elif isinstance(spec, list): for item in spec: names.remove(item) elif isinstance(spec, dict): for item in spec.values(): names.remove(item) return names
def gen_edges(col_num, row_num): """Generate the names of the outer edges in the grid network. Parameters ---------- col_num : int number of columns in the grid row_num : int number of rows in the grid Returns ------- list of str names of all the outer edges """ edges_col = [] edges_row = [] # build the left and then the right edges for i in range(col_num): edges_col += ['left' + str(row_num) + '_' + str(i)] edges_col += ['right' + '0' + '_' + str(i)] # build the bottom and then top edges for i in range(row_num): edges_row += ['bot' + str(i) + '_' + '0'] edges_row += ['top' + str(i) + '_' + str(col_num)] return edges_col, edges_row
def countsort(xs, key = lambda x: x, reverse = False): """ Performs counting sort on the provided list Args: xs (List[T]): a list of objects to sort key (Callable([T], Q)): a function to produce a comparable object reverse (bool): True if the sorting order should be reversed, False by default Returns: The reference to the new list. The new list is sorted. """ N = len(xs) if N == 0 or N == 1: return xs ys, zs = [key(x) for x in xs], list(range(N)) lower, upper = min(ys), max(ys) cs = [0 for i in range(0, upper - lower + 2)] for y in ys: cs[y - lower + 1] += 1 for i in range(1, len(cs)): cs[i] += cs[i - 1] if reverse: for i in range(N): ci = ys[i] - lower zs[N - 1 - cs[ci]] = xs[i] cs[ci] += 1 else: for i in range(N): ci = ys[i] - lower zs[cs[ci]] = xs[i] cs[ci] += 1 return zs
def order_people_heights(heights, in_fronts): """ You are given a list of people. Each person has a height and how many before it are taller. heights = [5, 3, 2, 6, 1, 4] people_in_front = [0, 1, 2, 0, 3, 2] And this is what it look like x x x x x x x x x x x x x x x x x x x x x 0 1 2 0 3 2 Order people heights that they fulfill their people in front constraint. ordered_heights = [5, 3, 2, 6, 1, 4] x x x x x x x x x x x x x x x x x x x x x 0 1 2 3 0 2 """ people = sorted(zip(heights, in_fronts), key=lambda p: p[0]) ordered_people = [None] * len(people) for height, in_front in people: if people[in_front] is None: people[in_front] = height else: empty_slots = [i for i, h in enumerate(ordered_people) if h is None] i = empty_slots[in_front] ordered_people[i] = height return ordered_people
def get_total_time(input_time): """Returns the duration of a track in a human-readable way""" return int(int(input_time)/1000)
def _cmplx_rdiv_ ( s , o ) : """divide complex values >>> r = other / v """ return o * ( 1.0 / complex ( s ) )
def _is_ros_binary_type(field_type): """ Checks if the field is a binary array one, fixed size or not list(bytearray(de(encoded_data))) _is_ros_binary_type("uint8") >>> False _is_ros_binary_type("uint8[]") >>> True _is_ros_binary_type("uint8[3]") >>> True _is_ros_binary_type("char") >>> False _is_ros_binary_type("char[]") >>> True _is_ros_binary_type("char[3]") >>> True """ return field_type.startswith('uint8[') or field_type.startswith('char[')
def url2pid(url): """ url2pid(url): convert url to product id Arguments: - url: url to convert Keyword Arguments: None Returns: - product id for url """ if url.endswith('/'): url = url[:-1] urlsplit = url.split('/') return (urlsplit[-2] + '_' + urlsplit[-1]).replace('__','_')
def reflect_mpo_2site(mpo_2site): """Spatial reflection of a 2-site MPO. """ return tuple(reversed(mpo_2site))
def dist(a, b): """ Returns the 'manhattan distance' between a and b """ return abs(a[0] - b[0]) + abs(a[1] - b[1])
def year2Century(year): """ year to century Parameters ---------- year : TYPE DESCRIPTION. Returns ------- TYPE DESCRIPTION. """ str_year = str(year) if(len(str_year)<3): return 1 elif(len(str_year)==3): if(str_year[1:3]=="00"): #100,200,300,400.....900 return int (str_year[0]) else: #190 , 250, 370 .... return int (str-year[0])+1 else: #1700,1800,1900 if (str_year[2:4]=="00"): return int(str_year[:2]) else: #1705,1810,1930 return int(str_year[:2])+1
def get_validated_value(key, value): """Check and add required data to certain values based on their key """ if key in ("VALIDATOR_ENDPOINT", "VALIDATOR") and not value.startswith("tcp://"): value = "tcp://" + value return value
def get_state_ranges(states): """From a list of characters, finds all regions with similar values. Returns regions with respective values as: {(start, end):value, (start, end):value} """ ranges = {} values = [] for i in range(len(states)): if i == 0: # Start position values.append(i) elif i == len(states)-1: # End position, stop range values.append(i) ranges[(min(values), i)] = states[i-1] elif states[i] == states[i-1]: # Extend range values.append(i) continue elif states[i] != states[i-1]: # Stop existing range and begin new range if len(values) == 0: ranges[i-1, i] = states[i-1] else: ranges[(min(values), i)] = states[i-1] values = [] return ranges
def remove_prepending(seq): """ Method to remove prepending ASs from AS path. """ last_add = None new_seq = [] for x in seq: if last_add != x: last_add = x new_seq.append(x) is_loopy = False if len(set(seq)) != len(new_seq): is_loopy = True # raise Exception('Routing Loop: {}'.format(seq)) return new_seq, is_loopy
def get_query_string(context): """ Return the query string from the request context """ request = context.get('request', None) if request is None: return '' else: return request.GET.urlencode()
def get_reverse(sequence): """Reverse orientation of `sequence`. Returns a string with `sequence` in the reverse order. If `sequence` is empty, and empty string is returned. """ if sequence: return sequence[::-1] else: return ""
def _get_resource_type(api_version: str, kind: str) -> str: """ >>> _get_resource_type("v1", "Pod") "pod" >>> _get_resource_type("batch/v1", "Job") "job.v1.batch" """ if '/' in api_version: api_group, version = api_version.split('/') return f'{kind}.{version}.{api_group}'.lower() else: return kind.lower()
def makeBundlesDictFromList(bundleList): """Utility to convert a list of MetricBundles into a dictionary, keyed by the fileRoot names. Raises an exception if the fileroot duplicates another metricBundle. (Note this should alert to potential cases of filename duplication). Parameters ---------- bundleList : list of MetricBundles """ bDict = {} for b in bundleList: if b.fileRoot in bDict: raise NameError('More than one metricBundle is using the same fileroot, %s' % (b.fileRoot)) bDict[b.fileRoot] = b return bDict
def load_identifier(value): """load identifier""" if value == "y": return True return False
def get_subnets(pool): """ convert IP pool list to dict format :param pool: list :return: dict """ return [{"network": r} for r in pool]
def _backtrack(table, source, target, i, j): """Backtracks the Longest Common Subsequence table to reconstruct the LCS. Args: table: Precomputed LCS table. source: List of source tokens. target: List of target tokens. i: Current row index. j: Current column index. Returns: List of tokens corresponding to LCS. """ if i == 0 or j == 0: return [] if source[i - 1] == target[j - 1]: # Append the aligned token to output. return _backtrack(table, source, target, i - 1, j - 1) + [target[j - 1]] if table[i][j - 1] > table[i - 1][j]: return _backtrack(table, source, target, i, j - 1) else: return _backtrack(table, source, target, i - 1, j)
def get_y_from_vw_example(vw_example): """ get y from a vw_example. this works for regression dataset """ return float(vw_example.split('|')[0])
def gen_auth_resp(chall_list): """Generate a dummy authorization response.""" return ["%s%s" % (chall.__class__.__name__, chall.domain) for chall in chall_list]
def _parse_field_value(line): """ Parse the field and value from a line. """ if line.startswith(':'): # Ignore the line return None, None if ':' not in line: # Treat the entire line as the field, use empty string as value return line, '' # Else field is before the ':' and value is after field, value = line.split(':', 1) # If value starts with a space, remove it. value = value[1:] if value.startswith(' ') else value return field, value
def no_set_ad(on=0): """Remover Opcoes do Active Desktop do Menu Configuracoes DESCRIPTION Esta entrada removera as opcoes do Active Desktop das Configuracoes no Menu Iniciar. COMPATIBILITY Todos. MODIFIED VALUES NoSetActiveDesktop : dword : 00000000 = Desabilita restricao; 00000001 = Habilita restricao. """ if on : return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\ CurrentVersion\\Policies\\Explorer] "NoSetActiveDesktop"=dword:00000001''' else : return '''[HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\\ CurrentVersion\\Policies\\Explorer] "NoSetActiveDesktop"=dword:00000000'''
def parseOtrFingerprintRecord(value): """ Extract fingerprints from an OTR fingerprint record's value. """ parts = value.split(" ") fingerprints = [] for part in parts: if part[0:2] == "f:": fingerprints.append(part[2:]) return fingerprints
def parse_firewall_msg(msg): """ Parse a syslog message from the firewall program into a python dictionary. :param msg: firewall msg from syslog :return: a dictionary of firewall related key value pairs """ words = msg.split(' ') action = 'allow' src = -1 dest = -1 proto = '' source_port = -1 destination_port = -1 mac = '' out = '' len_ = -1 tos = -1 proc = -1 ttl = -1 id_ = -1 mark = -1 seq = -1 code = -1 for w in words: if w.startswith('DROP'): action = 'drop' elif w.startswith('SRC='): src = w.split('=')[1] elif w.startswith('DST='): dest = w.split('=')[1] elif w.startswith('PROTO='): proto = w.split('=')[1] elif w.startswith('SPT='): source_port = w.split('=')[1] elif w.startswith('DPT='): destination_port = w.split('=')[1] elif w.startswith('MAC='): mac = w.split('=')[1] elif w.startswith('OUT='): out = w.split('=')[1] elif w.startswith('LEN='): len_ = w.split('=')[1] elif w.startswith('TOS='): tos = w.split('=')[1] elif w.startswith('PREC='): proc = w.split('=')[1] elif w.startswith('TTL='): ttl = w.split('=')[1] elif w.startswith('ID='): id_ = w.split('=')[1] elif w.startswith('MARK='): mark = w.split('=')[1] elif w.startswith('SEQ='): seq = w.split('=')[1] elif w.startswith('CODE='): code = w.split('=')[1] d = dict() d['action'] = action d['src_ip'] = src d['dest_ip'] = dest d['proto'] = proto d['source_port'] = source_port d['destination_port'] = destination_port d['mac_address'] = mac d['out'] = out d['len'] = len_ d['tos'] = tos d['proc'] = proc d['ttl'] = ttl d['id'] = id_ d['mark'] = mark d['seq'] = seq d['code'] = code return d
def flatten(l): """ flattens a list of list structure... nothing else. """ return [item for sublist in l for item in sublist]
def index_map(idx_lines): """Returns a map from the orbital index to its descriptive quantum numbers :param idx_lines: lines defining the index -> orbital key """ idx_map = dict() for line in idx_lines: row = line.split() row[0] = int(row[0]) idx_map[row[0]] = tuple(row[1:]) return idx_map
def ListTestFolders(dname): """ Returns a list of folders with test files Input: dname - Root folder """ import os res = [] for roots, dirs, files in os.walk(dname): if roots.find('.svn')==-1 and len(roots)>1: if os.path.isfile(roots+'/test.py'): res.append(roots) return res
def get_region_string(coords, region_size): """ Given a set of (long,lat) coordinates, and the size of a square region in long,lat space, return a string in the format expected by GEE. Parameters ========== coords: list of floats, [longitude,latitude] region_size: float, size of each side of the region, in degrees Returns ======= region_string: str, string representation of list of four coordinates, representing four corners of the region. """ left = coords[0] - region_size / 2 right = coords[0] + region_size / 2 top = coords[1] + region_size / 2 bottom = coords[1] - region_size / 2 region_string = str([[left, top], [right, top], [right, bottom], [left, bottom]]) return region_string
def text_in_bytes(text, binary_data, encoding="utf-8"): """Return True of the text can be found in the decoded binary data""" return text in binary_data.decode(encoding)
def _is_within_close_bracket(s, index, node): """Fix to include right ']'.""" if index >= len(s) - 1: return False return s[index] == ']' or s[index + 1] == ']'
def dBm_to_watt(p_dBm): """ Convert power in units of dBm into Watt """ return 1e-3 * 10 ** (p_dBm / 10)
def familyClustering(backwardEq, forwardEq, groupsContent): """Function clusters family members to the set gepending on the groupsContent redirection predicate. Args: backwardEq (set): set of backward equivalent states pairs forwardEq (set): set of forward equivalent states pairs groupsContent (set): redirectiong rouls Returns: dict: Dictionary of dictionaries with only two keys (B and F), containing backward and forward equivalent states. """ # Create empty clustersDict = {frozenset(k):{'B':set(), 'F':set()} for k in groupsContent} for r, s in backwardEq: for key in groupsContent: if r in key: clustersDict[frozenset(key)]['B'].add(frozenset({r, s})) for r, s in forwardEq: for key in groupsContent: if r in key: clustersDict[frozenset(key)]['F'].add(frozenset({r, s})) return clustersDict
def _get_main_message_object(message_no, main_messages): """Returns the main message object reference if the main message in Norwegian is in the list of main messages. :param message_no: :param main_messages: :return: """ for m in main_messages: if message_no == m.main_message_no: return m return None
def monotonic(values, mode="<", atol=1.e-8): """ Returns False if values are not monotonic (decreasing|increasing). mode is "<" for a decreasing sequence, ">" for an increasing sequence. Two numbers are considered equal if they differ less that atol. .. warning: Not very efficient for large data sets. >>> values = [1.2, 1.3, 1.4] >>> monotonic(values, mode="<") False >>> monotonic(values, mode=">") True """ if len(values) == 1: return True if mode == ">": for i in range(len(values)-1): v, vp = values[i], values[i+1] if abs(vp - v) > atol and vp <= v: return False elif mode == "<": for i in range(len(values)-1): v, vp = values[i], values[i+1] if abs(vp - v) > atol and vp >= v: return False else: raise ValueError("Wrong mode %s" % mode) return True
def get_clockwise(pnts): """ :param pnts: [[x1, y1], [x2, y2], [x3, y3], [x4, y4]] :return: [[x1, y1], [x2, y2], [x3, y3], [x4, y4]] left-top, right-top, right-bottom, left-bottom """ out = [] p = sorted(pnts, key=lambda x: x[1]) if p[0][0] < p[1][0]: out.append(p[0]) out.append(p[1]) else: out.append(p[1]) out.append(p[0]) if p[2][0] > p[3][0]: out.append(p[2]) out.append(p[3]) else: out.append(p[3]) out.append(p[2]) return out
def oddify(num): """ Return the next odd number if ``num`` is even. Examples -------- >>> oddify(1) 1 >>> oddify(4) 5 """ return num + (num % 2 == 0)
def _find_prev_change(changes, current_row, wrap): """Find the previous line before current row in changes. Arguments: changes (list): The list of first lines of changed hunks. current_row (int): The row to start searching from. wrap (bool): If True continue with first change after end of file. Returns: int: The previous line in changes. """ return next( (change for change in reversed(changes) if change < current_row), changes[-1] if wrap else changes[0])
def _dittus_boelter(Re, Pr, consts): """Return the Nussult number evaluated by the Dittus-Boelter correlation""" return consts[0] * (Re**consts[1] * Pr**consts[2]) + consts[3]
def _ch_disp_name(ch): """Convert channel names like 'proc_Plasmid' to 'Plasmid'""" return '_'.join(ch.split('_')[1:])
def shout_echo(word1, echo=1): """Concatenate echo copies of word1 and three exclamation marks at the end of the string.""" # Concatenate echo copies of word1 using *: echo_word echo_word = word1*echo # Concatenate '!!!' to echo_word: shout_word shout_word = echo_word + '!!!' # Return shout_word return shout_word
def isfloat(value) -> bool: """is the value floatable""" try: float(value) return True except ValueError: return False
def create_hero_lookup(hero_records: list): """ Convert input hero list to a dictionary based on 'hero_id' :param hero_records: List of the Hero info data structure returned by OpenDotaApi :return: dictionary of hero records indexed by hero ID """ lookup = {} for rec in hero_records: lookup[rec['id']] = rec return lookup
def descend(pos, wiggle, stds): """Find a best place to choose as sample boundary.""" partial = stds[pos-wiggle:pos+wiggle] smallest = min(partial, key=lambda x: x[0]) return smallest[2]
def GenerateFilename(dimension, element_type, internal_format, format, type): """Generate test filename.""" filename = ("tex-" + dimension + "d-" + internal_format + "-" + format + "-" + type + ".html") return filename.lower()
def split_imperative(lst): """ Imperatively splits the lst into two equal (unless len(lst) is odd) halves with a single scan of lst """ l = [lst[0]] r = [] i = 1 j = 0 for item in lst[1:]: if i < j + 1: l.append(r[0]) r = r[1:] + [item] i += 1 else: r.append(item) j += 1 return (l, r)
def seq_score(seq, matrix, start_index=0): """ Score >seq< using values from >matrix<. Returns a float. """ score = None for i, e in enumerate(seq, start=start_index): if score is None: score = matrix[e][i] else: score *= matrix[e][i] return score
def create_image_slug(name: str, reverse: bool=False): """Create an image slug Example ------- an_image_slug.jpg Parameters ---------- reverse: from an image slug, guess the name of the image: an_image_slug becomes in that case 'An image slug' """ if reverse: if '_' in name: spaced_name = name.split('_') cleaned_name = [name.split('.') for name in spaced_name if '.' in name][0][0] spaced_name.pop(-1) spaced_name.append(cleaned_name) return ' '.join(spaced_name).capitalize() image_name = '_'.join(name.split(' ')) return f'{image_name.strip().lower()}.jpg'
def findInDoubleMatrix(lst, val): """ This method will find all occurences of val in a given lst provided that lst is a double matrix. We can use this method to find values from the mapping given above. """ result = [] temp = [] for i, x in enumerate(lst): for j, y in enumerate(x): if y == val: temp = [i, j] result.append(temp) return result
def is_multisig(policy): """Returns a boolean indicating if the policy is a multisig""" return ( 'type' in policy and 'p2wsh' in policy['type'] and 'm' in policy and 'n' in policy and 'cosigners' in policy )
def quick_sort(lst): """Create a sorted copy of the given list using quick sort.""" if len(lst) <= 1: return lst pivot = lst[0] left, right = [], [] for x in lst: if x <= pivot: left.append(x) else: right.append(x) return quick_sort(left) + [pivot] + quick_sort(right)
def diff(a: str, b: str, a_name: str, b_name: str) -> str: """ Return a unified diff string between strings `a` and `b`. Borrowed from black """ import difflib a_lines = [line + "\n" for line in a.splitlines()] b_lines = [line + "\n" for line in b.splitlines()] return "".join( difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5) )
def elapsed_time(start, end): """Create a pretty string with the elapsed time.""" hours, rem = divmod(end - start, 3600) mins, secs = divmod(rem, 60) return "{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(mins), secs)
def _df_instance_to_path(df_inst): """Convert a df instance name to a mountpoint""" # df_root is not a dynamic file system. Ignore that one. if df_inst == 'df_root': return '/' else: # For all others replace all '-' with '/' return('/' + df_inst[3:].replace('-', '/'))
def UsersOwnersOfHotlists(hotlists): """Returns a set of all users who are owners in the given hotlists.""" result = set() for hotlist in hotlists: result.update(hotlist.owner_ids) return result
def get_geom_steps_manu_hdu(manu, amp): """Get x and y steps (+1 or -1) to convert between readout and physical orientation for a particular amp Parameters ---------- manu : `str` Manufactor 'ITL' or 'E2V' amp : `int` HDU index Returns ------- step_x : `int` Step to take in x to go from readout to physical order step_y : `int` Step to take in y to go from readout to physical order """ if manu == 'ITL': flip_y = -1 elif manu == 'E2V': flip_y = 1 else: raise ValueError("Unknown CCD type %s" % manu) if amp <= 8: step_x = 1 step_y = -1 else: step_x = -1 step_y = flip_y return (step_x, step_y)
def days_in_month(year, month): """ Input: :int year :int month Output: :int :number of days in a month """ if month in [1, 3, 5, 7, 8, 10, 12]: return 31 elif month in [4, 6, 9, 11]: return 30 elif (year % 4 == 0) and (year % 100 != 0) or (year % 400 == 0): return 29 else: return 28
def is_collections_type(type_): """ Checks if the given type is a ``collections`` module type :param type_: The type to check :return: True if the type is part of the ``collections`` module, otherwise False :rtype: bool """ return ( isinstance(type_, type) and getattr(type_, "__module__", None) == "collections" )
def convert_to_hexadecimal(bits, padding): """ Converts bits to a hexadecimal character with padding. E.g. Converts [False, False, False, True], 0 to "1". Converts [True, False, False, False], 2 to "08" Args: bits: List of boolean values. padding: Integer of number of 0 padded places. Returns: string: Zero padded hexadecimal number. """ bits_as_strings = ["1" if bit else "0" for bit in bits] bits_base_2 = int("".join(bits_as_strings), 2) zero_padded_eight_digit_hexadecimal_with_prefix = "{0:#0{1}x}".format(bits_base_2, padding + 2) zero_padded_eight_digit_hexadecimal_without_prefix = zero_padded_eight_digit_hexadecimal_with_prefix[2:] return zero_padded_eight_digit_hexadecimal_without_prefix.upper()
def _nin(val1, val2) -> float: """Distance computation for 'not in'""" if val1 not in val2: return 0.0 return 1.0
def topRange(l, s): """ Given a list of values, determines a range in which the values in the top s% must lie. Args: l: A list of values s: A percentage Return: A tuple (lowest value that could be in top s%, max value in l) """ mx = max(l) mn = min(l) if s is 100: return (mn, mx) dx = (mx - mn) / 100.0 # Width of 1% in the range covered by l's vals min_thresh = mx - (s * dx) return (min_thresh, mx)
def _list_str(lst,sep): """ Returns the list as a string with the given separator and no brackets. """ ret = '' for x in lst: ret += str(x)+sep return ret[:-1*len(sep)] # remove extra separator
def get_object_info(header_dict, orca_class_name): """ returns a list with all info from the header for each card with name orca_class_name. """ object_info_list = [] crates = header_dict["ObjectInfo"]["Crates"] for crate in crates: cards = crate["Cards"] for card in cards: if card["Class Name"] == orca_class_name: card["Crate"] = crate["CrateNumber"] object_info_list.append(card) if len(object_info_list) == 0: print('OrcaDecoder::get_object_info(): Warning: no object info ' 'for class name', orca_class_name) return object_info_list
def has_dependencies(op, dag): """Checks if the node has dependencies.""" for op_spec in dag.values(): if op in op_spec.downstream: return True return False
def _ne(field, value, document): """ Returns True if the value of document[field] is not equal to a given value """ return document.get(field, None) != value
def find_block_length(encryption_oracle): """Returns the length of a block for the block cipher used by the encryption_oracle. To find the length of a block, we encrypt increasingly longer plaintexts until the size of the output ciphertext increases too. When this happens, we can then easily compute the length of a block as the difference between this new length of the ciphertext and its initial one. """ my_text = '' ciphertext = encryption_oracle(my_text) initial_len = len(ciphertext) new_len = initial_len while new_len == initial_len: my_text += 'A' ciphertext = encryption_oracle(my_text) new_len = len(ciphertext) return new_len - initial_len
def rematch(offsets): """ rematch bert token """ mapping = [] for offset in offsets: if offset[0] == 0 and offset[1] == 0: mapping.append([]) else: mapping.append([i for i in range(offset[0], offset[1])]) return mapping
def _viewitems(obj): """Python2/3 compatible iteration over ditionary.""" func = getattr(obj, "viewitems", None) if not func: func = obj.items return func()
def get_uv_5(x,y): """Get the velocity field for exercise 6.1.5""" return x*(2-x-y), x-y
def successors_of(state): """ The successor seems to create the correct tree """ # Generate valid moves valid_moves = [] for i in range(len(state)): if state[i] == 1 or state[i] == 2: continue count = 1 while count < state[i] / 2: new_state = state.copy() new_state[i] = new_state[i] - count new_state.append(count) new_state.sort(reverse=True) valid_moves.append((count, new_state)) count += 1 print(valid_moves) return valid_moves
def audit_certs_issuer(certs, trusted_ca_list): """ As implemented now, we will get the tls certs in order, the root cert is at the end. We check to see if the issuer is in our trusted_ca_list; if not, we report it as a concern """ try: top_cert_issuer = certs[-1]["issuer"] org_name = None for entry in top_cert_issuer: if "organizationName" in entry: org_name = entry["organizationName"] break except KeyError: return None if org_name: if org_name not in trusted_ca_list: return 'CA not trusted: ' + org_name else: # Didn't find org_name in cert return "CA is none" return None
def serialize_forma(analysis, type): """Convert the output of the forma250 analysis to json""" return { 'id': None, 'type': type, 'attributes': { 'areaHa': analysis.get('area_ha', None), 'areaHaLoss': analysis.get('area_ha_loss', None), 'alertCounts': analysis.get('alert_counts', None) } }
def get_colours(encoding=False): """get colour code""" return { "default": "\033[;0m" if encoding else "", "white_text": "\033[;035m" if encoding else "", "white": "\033[;45m" if encoding else "", "red": "\033[;41m" if encoding else "", "cyan": "\033[;46m" if encoding else "", "green": "\033[;42m" if encoding else "", "magenta": "\033[;44m" if encoding else "", "yellow": "\033[;43m" if encoding else "", }
def take_first(it): """ Retrieve the first value from an iterable object. Parameters ---------- it : iterable Iterable object to retrieve the first value from. Returns ------- first : number or string or ndarray or None First value from the iterable object. Will be None if ``StopIteration`` is raised. """ it = iter(it) try: return next(it) except StopIteration: return None
def determine_label_yolo(label, labels): """ Definition: Converts label to index in label set Parameters: label - label from file labels - list of labels Returns: index of label in labels (in str format) """ return str(labels.index(label))
def assign_to_date(commits, dates): """Add each commit to a list according to date @param commits: list of commit dicts @param dates: dict of commit dicts by date '%Y-%m-%d' @returns: dict of lists by dates """ for commit in commits: d = commit['ts'].strftime('%Y-%m-%d') if not dates.get(d): dates[d] = [] dates[d].append(commit) return dates
def seedsIndex(filename, filenameSeeds): """ entrada: duas listas de nomes uma com os nomes relevantes e outra com os nomes das seeds saida: indice das seeds na lista de relevantes """ index = [] for name in filenameSeeds: aux = -1 for i in range(0, len(filename)): if(name.split('_')[-1] == 'frame'+filename[i].split('_')[-1]+'.jpg') and (name.split('/')[-1].split('_')[0] == filename[i].split('_')[0]): aux=1 index.append(i) break if(aux==-1): print(name) print("Seed not in relevant set!") return(index)
def secant(f, x1=-12345, x2=6789, maxIter=10000, tolVal=1e-10): """secant method; x1 and x2 are crucial for finding the desired root""" for i in range(maxIter): xnew = x2 - (x2 - x1) / (f(x2) - f(x1)) * f(x2) if abs(xnew - x2) < tolVal: break x1 = x2 x2 = xnew else: print("Calculation exceeded maximum number of iterations!") exit() return xnew, i
def kelvin_to_celsius( arg1 ): """ Function to convert kelvin to celsius. Takes a single flaot as an argument and returns a float itself. """ celsius = (arg1 - 273.15) return "{:.2f}".format(celsius)
def quotestrip(word): """Remove quotes and/or double quotes around identifiers.""" if not word: return None while (word.startswith("'") and word.endswith("'")) or (word.startswith('"') and word.endswith('"')): word=word[1:-1] return word
def are_keys_binary_strings(input_dict): """Check if the input dictionary keys are binary strings. Args: input_dict (dict): dictionary. Returns: bool: boolean variable indicating whether dict keys are binary strings or not. """ return all(not any(char not in '10' for char in key) for key in input_dict.keys())
def angleSum(a, b): """Add two angles in degrees, returning a value mod 360 """ return (a + b) % 360
def get_offload_snapshot(module, array): """Return Snapshot (active or deleted) or None""" try: snapname = module.params['name'] + "." + module.params['suffix'] for snap in array.get_pgroup(module.params['name'], snap=True, on=module.params['offload']): if snap['name'] == snapname: return snapname except Exception: return None
def _ci_to_hgvs_coord(s, e): """ Convert continuous interbase (right-open) coordinates (..,-2,-1,0,1,..) to discontinuous HGVS coordinates (..,-2,-1,1,2,..) """ def _ci_to_hgvs(c): return c + 1 if c >= 0 else c return (None if s is None else _ci_to_hgvs(s), None if e is None else _ci_to_hgvs(e) - 1)
def PS(n): """ Returns powerset of n (excluding null set) """ from itertools import combinations P = [] for sz in range(1, n+1, 1): # create all possible combinations of size i temp = combinations(range(n), sz) for item in temp: P.append(item) return P
def parse_statlog(string): """ Statlog files are separated by variable number of spaces. """ lines = string.split("\n") lines = list( filter(lambda line: len(line.strip()) > 0, lines) ) # filter out empty lines # noqa: E501 for line_idx, line in enumerate(lines): line = line.split(" ") line = list( filter(lambda e: len(e) > 0, line) ) # filter out empty elements # noqa: E501 lines[line_idx] = line return lines
def make_dict(keys, values): """Convert two lists or two variables into a dictionary.""" if isinstance(keys, (list, tuple)): if len(keys) != len(values): raise ValueError("keys and values have different length.") return dict(zip(keys, values)) return {keys: values}
def process_single(word): """ Process a single word, whether it's identifier, number or symbols. :param word: str, the word to process :return: str, the input """ if word[0].isnumeric(): try: int(word) except ValueError: raise ValueError("Expression {} not valid".format(word)) return word
def normalize_url_prefix(url_prefix): """Enforce a consistent URL representation The normalized prefix will begin and end with '/'. If there is no prefix the normalized form will be '/'. Examples: =========== ============ INPUT NORMALIZED =========== ============ None '/' '' '/' '/' '/' 'example' '/example/' '/example' '/example/' 'example/' '/example/' '/example/' '/example/' =========== ============ Args: url_prefix (str): The prefix Returns: str: The normalized prefix """ if url_prefix in (None, "/", ""): return "/" new_url_prefix = "" # Make string begin with / if not url_prefix.startswith("/"): new_url_prefix += "/" new_url_prefix += url_prefix # Make string end with / if not url_prefix.endswith("/"): new_url_prefix += "/" return new_url_prefix
def replace_short_forms(note: str, long_forms: list, span: list) -> str: """ Given a list of long forms and the span of the short forms, replace the short form by long form in note using the string indeces. """ note_replaced: str = note # Iterates in reverse order, otherwise we would have to change the span indeces for long_form, index_span in zip(long_forms[::-1], span[::-1]): note_replaced = note_replaced[: index_span[0]] + long_form + note_replaced[index_span[1]:] return note_replaced
def is_password_parameter(data_model, source_type: str, parameter: str) -> bool: """Return whether the parameter of the source type is a password.""" # If the parameter key can't be found (this can happen when the parameter is removed from the data model), # err on the safe side and assume it was a password type parameter_type = data_model["sources"][source_type]["parameters"].get(parameter, dict(type="password"))["type"] return str(parameter_type) == "password"
def calc_confusion_counts(actual, predicted, sensitive, unprotected_vals, positive_pred): """ Returns outs of (C=YES|sensitive=privileged), (C=NO|sensitive=privileged), (C=YES|sensitive=not privileged) and (C=NO|sensitive=not privileged) in that order where C is the predicited classification and where all not privileged values are considered equivalent. Assumes that predicted and sensitive have the same lengths. """ print("======\n unprotected_val:", unprotected_vals) unprotected_TP = 0.0 unprotected_TN = 0.0 unprotected_FP = 0.0 unprotected_FN = 0.0 protected_TP = 0.0 protected_TN = 0.0 protected_FP = 0.0 protected_FN = 0.0 for i in range(0, len(predicted)): protected_val = sensitive[i] predicted_val = predicted[i] actual_val = actual[i] if protected_val in unprotected_vals: if str(predicted_val) == str(positive_pred): if str(actual_val) == str(positive_pred): unprotected_TP += 1 else: unprotected_FP += 1 else: if str(actual_val) == str(positive_pred): unprotected_FN += 1 else: unprotected_TN += 1 else: if str(predicted_val) == str(positive_pred): if str(actual_val) == str(positive_pred): protected_TP += 1 else: protected_FP += 1 else: if str(actual_val) == str(positive_pred): protected_FN += 1 else: protected_TN += 1 return { "unprotected_TP": unprotected_TP, "unprotected_TN": unprotected_TN, "unprotected_FP": unprotected_FP, "unprotected_FN": unprotected_FN, "protected_TP": protected_TP, "protected_TN": protected_TN, "protected_FP": protected_FP, "protected_FN": protected_FN, }
def get_missed_cleavages(sequences:list, n_missed_cleavages:int) -> list: """ Combine cleaved sequences to get sequences with missed cleavages Args: seqeuences (list of str): the list of cleaved sequences, no missed cleavages are there. n_missed_cleavages (int): the number of miss cleavage sites. Returns: list (of str): the sequences with missed cleavages. """ missed = [] for k in range(len(sequences)-n_missed_cleavages): missed.append(''.join(sequences[k-1:k+n_missed_cleavages])) return missed
def split_index(index, n=8): """Split a conversions index, which is a list of tuples (file position, number of lines, alignment position), one for each read, into `n` approximately equal parts. This function is used to split the conversions CSV for multiprocessing. :param index: index :type index: list :param n: number of splits, defaults to `8` :type n: int, optional :return: list of parts, where each part is a list of (file position, number of lines, alignment position) tuples :rtype: list """ n_lines = sum(idx[1] for idx in index) target = (n_lines // n) + 1 # add one to prevent underflow # Split the index to "approximately" equal parts parts = [] current_part = [] current_size = 0 for tup in index: current_part.append(tup) current_size += tup[1] if current_size >= target: parts.append(current_part) current_size = 0 current_part = [] if current_part: parts.append(current_part) return parts
def imageMetadata(dicomImageMetadata, sampleBidsEntities): """ Dictionary with all required metadata to construct a BIDS-Incremental, as well as extra metadata extracted from the test DICOM image. """ meta = sampleBidsEntities.copy() meta.update(dicomImageMetadata) return meta
def get_elapsed_time(time_in_seconds): """ Helper function to get elapsed time in human-readable format. Parameters ---------- time_in_seconds : float runtime, in seconds Returns ------- str formatted human-readable string describing the time """ if time_in_seconds < 60: return '%.1f seconds' % (time_in_seconds) elif time_in_seconds < 3600: return '%.1f minutes' % (time_in_seconds / 60) else: return '%.1f hours' % (time_in_seconds / 3600)