content
stringlengths
42
6.51k
def dict_minus(d, *keys): """Delete key(s) from dict if exists, returning resulting dict""" # make shallow copy d = dict(d) for key in keys: try: del d[key] except: pass return d
def formatUs(time): """Format human readable time (input in us).""" if time < 1000: return f"{time:.2f} us" time = time / 1000 if time < 1000: return f"{time:.2f} ms" time = time / 1000 return f"{time:.2f} s"
def rectCenter(rect0): """Return the center of the rectangle as an (x, y) coordinate.""" (xMin, yMin, xMax, yMax) = rect0 return (xMin+xMax)/2, (yMin+yMax)/2
def NeededPaddingForAlignment(value, alignment=8): """Returns the padding necessary to align value with the given alignment.""" if value % alignment: return alignment - (value % alignment) return 0
def as_pairs_seq(iterable): """ Return the given Python iterable as a sequence of nested (item, next) pairs. No iteration! """ itr = iter(iterable) # Use a sentinel object to indicate the end of iteration rather than # exception handling because that is more functional in style. # Since any object can be an item in the iterable, use the iterator # itself as a sentinel object because it is a new object that cannot # be an item in the pre-existing iterable. head = next(itr, itr) # Return an empty list if the iterator is exhausted if head is itr: return () # Otherwise, return a list with this item as the head else: return (head, as_pairs_seq(itr))
def avgprecision(results): """ :param results: List of True/False values """ total_points = 0.0 correct = 0.0 for i, p in enumerate(results): position = i + 1 if p: correct += 1 points = correct / position total_points += points return total_points / len(results)
def marginal_coalitions(player, order): """ :param player: :type player: str :param order: :type order: list :return: :rtype (set, set) """ pos_player = order.index(player) return set(order[:pos_player]), set(order[:pos_player + 1])
def _parallelogram_alpha_beta(px, ax, bx, idetidx, idet): """ check point is in parallelogram; please convinced that this point is in the plane of this parallelogram. p = x + alpha (a-x) + beta (b-x) p - x = alpha (a-x) + beta (b-x) px[0] = alpha ax[0] + beta bx[0] px[1] = alpha ax[1] + beta bx[1] px[2] = alpha ax[2] + beta bx[2] detA0 = ax[1] bx[2] - ax[2] bx[1] detA1 = ax[0] bx[2] - ax[2] bx[0] detA2 = ax[0] bx[1] - ax[1] bx[0] ``` | a b | -> inverse | d -c | | c d | | -b a | / detA ``` if idetidx == 0: alpha = 1/detA (px[0] ax[1] + px[1] ax[2]) beta = 1/detA (px[0] bx[1] + px[1] bx[2]) if idetidx == 1: alpha = 1/detA (px[0] ax[0] + px[1] ax[2]) beta = 1/detA (px[0] bx[0] + px[1] bx[2]) if idetidx == 2: alpha = 1/detA (px[0] ax[0] + px[1] ax[1]) beta = 1/detA (px[0] bx[0] + px[1] bx[1]) """ alpha, beta = -1.0, -1.0 if idetidx == 0: # a1 a2 | b1 b2 alpha = px[1] * bx[2] - px[2] * bx[1] beta = px[2] * ax[1] - px[1] * ax[2] elif idetidx == 1: # a0 a2 | b0 b2 alpha = px[0] * bx[2] - px[2] * bx[0] beta = px[2] * ax[0] - px[0] * ax[2] else: # 2 a0 a1 | b0 b1 alpha = px[0] * bx[1] - px[1] * bx[0] beta = px[1] * ax[0] - px[0] * ax[1] return alpha * idet, beta * idet
def _ext_id(id): """ Extends id with / symbol if necessary :param id: id to extend :return: extended id """ if id[-1:] != "/": return id + "/" return id
def build_risk_curves(years): """Creating lines/curves for risk thresholds on risk assessment graph for critical, moderate, substantial and safe zones Notes: Safe zone is categorised as under moderate risk Args: years (int): No. of years for analysis Returns: critical_risk (list): moderate_risk (list): substantial_risk (list): TO DO: Allow flexibility of risk threshold definitions which are currently fixed """ crit_def_prob = 0.50 crit_def_years = 3 sub_def_prob = 0.25 sub_def_years = 5 mod_def_prob = 0.1 mod_def_years = 10 critical_risk =[] substantial_risk =[] moderate_risk = [] for y in range(years+1): critical_risk.append(y * crit_def_prob / crit_def_years) substantial_risk.append(y * sub_def_prob / sub_def_years) moderate_risk.append(y * mod_def_prob / mod_def_years) return critical_risk, substantial_risk, moderate_risk
def divisibility_by_7(number: int) -> bool: """ :type number: int """ num = list(map(int, list(str(number)))) num1 = int("".join(list(map(str, num[:-1])))) num2 = int(num[-1]) return ((num1 - (num2 * 2)) % 7) == 0
def scanlist(testlist): """ Process a testlist file """ tests = [t.strip() for t in testlist if not t.startswith('#')] return [t for t in tests if t]
def get_path_to_surface_density_dir(name: str, data_directory: str) -> str: """Get the path to the directory where the star formation data should be stored Args: name (str): Name of the galaxy data_directory (str): dr2 data directory Returns: str: Path to h1 dir """ return f"{data_directory}/surf/{name}"
def use_storage(storage_conf: str) -> bool: """Evaluates if the storage_conf is defined. The storage will be used if storage_conf is not None nor "null". :param storage_conf: Storage configuration file. :return: True if defined. False on the contrary. """ return storage_conf != "" and not storage_conf == "null"
def get_mismatched_bundles(base, other): """ Compares the bundles of two instances and returns the differences as list of dictionary. """ result = list() other_bundles = other[2] for key, value in base[2].items(): if key in other_bundles: if not value == other_bundles[key]: result.append({"base": value, "other": other_bundles[key]}) else: result.append({"base": value, "other": {'symbolicName': ""}}) return result
def parse_requirements(requirements): """ Returns the list of requirements found in the requirements file """ with open(requirements, "r") as req_file: return [l.strip('\n') for l in req_file if l.strip('\n') and not l.startswith('#')]
def calculate_results(sample_data, analysis, mass, dilution_factor=10, correction_factor=10000): """Calculate percentage results given raw results, dilution factor, and analysis type. Args: sample_data (dict): A dictionary of sample data. analysis (str): An analysis to calculate results for the analysis's analytes. mass (float): The recorded sample mass. dilution_factor (float): The dilution factor for the sample. correction_factor (float): A factor used to appropriately scale values to percentage. Returns: (dict): An updated dictionary of sample data. """ # FIXME: # analytes = get_analytes(analysis) analytes = [] for analyte in analytes: try: raw_value = float(sample_data[analyte]) sample_data[analyte] = ((raw_value * dilution_factor) / mass) / correction_factor except ValueError: continue return sample_data
def get_standard_format(seg): """ Receives a period of time in seconds, and return a string with time standard format. HH:MM:SS """ # hours h = int(seg // 3600) if h > 24: dias = seg // 86400 seg = seg % 86400 h = int(seg // 3600) seg = seg % 3600 # minutes m = int(seg // 60) seg = seg % 60 # seconds s = seg return "{:02d} Dias {:02d}:{:02d}:{:02d}".format(dias,h,m,s) else: seg = seg % 3600 # minutes m = int(seg // 60) seg = seg % 60 # seconds s = int(seg // 1) return "{:02d}:{:02d}:{:02d}".format(h,m,s)
def decorated_test_task(**kwargs): """ Test task, echos back all arguments that it receives. This one is registered using a decorator """ print(f"The decorated test task is being run with kwargs {kwargs} and will echo them back") return kwargs
def match(line,keyword): """If the first part of line (modulo blanks) matches keyword, returns the end of that line. Otherwise returns None""" line=line.lstrip() length=len(keyword) if line[:length] == keyword: return line[length:] else: return None
def h(a, b): """Return distance between 2 points""" return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2
def get_asset_dict( source_location, source_name, target_location="", target_name=None, copy_method="copy", ): """Helper function to generate asset for a particular file Args: source_location (str): path to directory containing source file source_name (str): filename of source file target_location (str, optional): sub-directory to which file will be written, relative to run directory root. Defaults to empty string (i.e. root of run directory). target_name (str, optional): filename to which file will be written. Defaults to None, in which case source_name is used. copy_method (str, optional): flag to determine whether file is copied ('copy') or hard-linked ('link'). Defaults to 'copy'. Returns: dict: an asset dictionary """ if target_name is None: target_name = source_name asset = { "source_location": source_location, "source_name": source_name, "target_location": target_location, "target_name": target_name, "copy_method": copy_method, } return asset
def unicode_subscript(num): """Converts an integer to the unicode subscript representation of that integer. Reference """ n = str(num) subscript_dict = { '0': u'\u2080', '1': u'\u2081', '2': u'\u2082', '3': u'\u2083', '4': u'\u2084', '5': u'\u2085', '6': u'\u2086', '7': u'\u2087', '8': u'\u2088', '9': u'\u2089'} uni = u"" for u in n: uni += subscript_dict[u] return uni
def key(data, key_name): """ Returns the value in the given key_name of a dict. """ return data.get(key_name)
def check_tx_type(tx: dict, kind: str) -> bool: """ :param tx: transaction dictionary :param kind: string containing tx type :return: boolean """ if tx["events"][0]["kind"] == kind: return True return False
def reverse(in_list1: list) -> list: """ Reverse a list :param in_list1: The input list :return: the reversed version """ if len(in_list1) == 0: return [] _list = reverse(in_list1[1:]) return _list + [in_list1[0]]
def parse_phone_numbers(phone_number_string): """ Arguments: phone_number_string {String} -- Phone number or numbers, as input by the user at registration. Commas separate phone numbers. Returns: phone_numbers {List} -- One string element per user phone number. """ phone_numbers = [phone_number.strip() for phone_number in phone_number_string.split(',')] return phone_numbers
def merge_parallel_outputs(data): """ parallel outputs returns an array of dicts, one from each parallel output. This method combines the dicts into a single dict. Args: data(list(dists): List of dicts Returns: (dict) merged dict """ merge = {} for items in data: merge.update(items) return merge
def retrieve_file(filename): """ Opens a file and returns its contents as a string after encoding to UTF-8 :param filename: :return: """ with open(filename, 'rb') as f: original_contents = f.read() decoded_contents = original_contents.decode('utf-8-sig').encode('utf-8') return decoded_contents
def split_query(query): """ Generate a list of all of the partial queries for a given complete query """ return [query[:i] for i in range(3, len(query) + 1)]
def _load_method_arguments(name, argtypes, args): """Preload argument values to avoid freeing any intermediate data.""" if not argtypes: return args if len(args) != len(argtypes): raise ValueError(f"{name}: Arguments length does not match argtypes length") return [ arg if hasattr(argtype, "_type_") else argtype.from_param(arg) for (arg, argtype) in zip(args, argtypes) ]
def exponential_1(x,x0,A,tau, offset): """ exponential function with one exponent """ from numpy import exp func = A*exp(-(x-x0)/tau)+offset return func
def unescape(string: str) -> str: """ Remove escaping symbols RediSearch stores for literal tokens. A SiteConfiguration configures "literal tokens" that indexing and querying should special-case to support searching with punctuation. These are tokens like "active-active". """ return string.replace("\\", "")
def make_edges(nodes, directed=True): """ Create an edge tuple from two nodes either directed (first to second) or undirected (two edges, both ways). :param nodes: nodes to create edges for :type nodes: :py:list, py:tuple :param directed: create directed edge or not :type directed: bool """ edges = [tuple(nodes)] if not directed: edges.append(nodes[::-1]) return edges
def check_file_isvid(filename): """ checks if a file has a video extension, accepted files are: '.mp4', '.mpg', '.avi' :param filename: (str) name of the file :return: (bool) """ list_extensions = ['.mpg', '.MPG', '.mp4', '.MP4', '.AVI', '.avi'] if filename[-4:] in list_extensions: return True else: return False
def threshold (x): """Function to apply to a pixel value to make a threshold operation args : int returns int equal to 0 or 255 """ threshold_1=0 if (x <threshold_1): return 0 else: return 255
def search(graph, start, target, avoiding=None): """Depth-first search through graph for path from start to target avoiding visited nodes""" # Initialize the set of visited nodes to avoid in the search if not passed as argument avoiding = {start} if not avoiding else avoiding for child in graph[start]: if child in avoiding: continue # In subsequent recursive calls to the search function we add to the set of visited nodes elif child == target or search(graph, child, target, avoiding | {child}): return True # No path exists from start to target (via any of its children, avoiding the specified nodes) return False
def list_roles(*args): """DEPRECATED: Use list""" return list(*args)
def shellsplit(text): """Very simple shell-like line splitting. :param text: Text to split. :return: List with parts of the line as strings. """ ret = list() inquotes = False current = "" for c in text: if c == "\"": inquotes = not inquotes elif c in ("\t", "\n", " ") and not inquotes: ret.append(current) current = "" else: current += c if current != "": ret.append(current) return ret
def parse_idna_test_table(inputstream): """Parse IdnaTest.txt and return a list of tuples.""" tests = [] for lineno, line in enumerate(inputstream): line = line.decode("utf8").strip() if "#" in line: line = line.split("#", 1)[0] if not line: continue tests.append((lineno + 1, tuple(field.strip() for field in line.split(u";")))) return tests
def strtobool(str_val): """Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. """ str_val = str_val.lower() if str_val in ('y', 'yes', 't', 'true', 'on', '1'): return True elif str_val in ('n', 'no', 'f', 'false', 'off', '0'): return False else: raise ValueError("invalid truth value %r" % (str_val,))
def _update_curr_pos(curr_pos, cmd, start_of_path): """Calculate the position of the pen after cmd is applied.""" if cmd[0] in 'ml': curr_pos = [curr_pos[0] + float(cmd[1]), curr_pos[1] + float(cmd[2])] if cmd[0] == 'm': start_of_path = curr_pos elif cmd[0] in 'z': curr_pos = start_of_path elif cmd[0] in 'h': curr_pos = [curr_pos[0] + float(cmd[1]), curr_pos[1]] elif cmd[0] in 'v': curr_pos = [curr_pos[0], curr_pos[1] + float(cmd[1])] elif cmd[0] in 'ctsqa': curr_pos = [curr_pos[0] + float(cmd[-2]), curr_pos[1] + float(cmd[-1])] return curr_pos, start_of_path
def seq(fr,to,by): """An analogous function to 'seq' in R Parameters: 1. fr: from 2. to: to 3. by: by (interval) """ if fr<to: return range(fr,to+abs(by),abs(by)) elif fr>to: if by>0: aseq = range(fr,to-by,-1*by) else: aseq = range(fr,to+by,by) else: aseq = [fr] if aseq[-1]>to: return aseq[:-1] else: return aseq
def _equals(a, b): """Checks recursively if two dictionaries are equal""" if isinstance(a, dict): for key in a: if key not in b or not _equals(a[key], b[key]): return False return True else: if isinstance(a, bytes): a = a.decode() if isinstance(b, bytes): b = b.decode() are_equal = a == b return are_equal if isinstance(are_equal, bool) else are_equal.all() return False
def replace_keyword(src: str, rpls: list) -> str: """ Replace keyword ::r:: in `src` with provided text in rpls. """ replace_count = src.count("::r::") if replace_count == 0: return src length = len(rpls) replaced = "".join(src) # Make a copy for text in rpls: replaced = replaced.replace("::r::", text, 1) return replaced
def _le_unpack(byte): """Converts little-endian byte string to integer.""" return sum([ b << (8 * i) for i, b in enumerate(byte) ])
def transpuesta_matriz_vec(mat): """ Funcion que realiza la transpuesta de una matriz o vector complejo. :param mat: lista que representa la matriz o vector complejo. :return: Lista que representa la transpues de la matriz o vector complejo. """ fila = len(mat) columnas = len(mat[0]) trans = [] for i in range(columnas): trans.append([]) for j in range(fila): trans[i].append(mat[j][i]) return trans
def _convert_block_indexers_to_array_indexers(block_indexers, chunks): """Convert a dict of dask block indexers to array indexers. Parameters ---------- block_indexers : dict Dictionary mapping dimension names to slices. The slices represent slices in dask block space. chunks : dict Dictionary mapping dimension names to tuples representing the chunk structure of the given dimension. Returns ------- dict """ array_indexers = {} for dim, block_indexer in block_indexers.items(): if block_indexer.start is None: start = 0 else: start = sum(chunks[dim][: block_indexer.start]) stop = sum(chunks[dim][: block_indexer.stop]) array_indexers[dim] = slice(start, stop) return array_indexers
def count_keys_less(equal, m): """ Supporting function for counting_sort. """ less = [0] * m for j in range(1, m): less[j] = less[j - 1] + equal[j - 1] return less
def shortenFEN(fen): """Reduce FEN to shortest form (ex. '111p11Q' becomes '3p2Q')""" return fen.replace('11111111','8').replace('1111111','7') \ .replace('111111','6').replace('11111','5') \ .replace('1111','4').replace('111','3').replace('11','2')
def labels_to_generate(epitope_files, ppi_files): """ Takes 2 lists of files and extracts the chains for which to generate labels. """ all_files = epitope_files + ppi_files chains = [] for f in all_files: chain = f.split('.')[0].split('_')[-1] if not chain in chains: chains.append(chain) return chains
def aio_s3_key(aio_s3_key_path, aio_s3_key_file) -> str: """A valid S3 key composed of a key_path and a key_file The key component of 's3://{bucket}/{key}' that is composed of '{key_path}/{key_file}'; the key does not begin or end with any delimiters (e.g. '/') :return: str for the key component of 's3://{bucket}/{key}' """ return f"{aio_s3_key_path}/{aio_s3_key_file}"
def get_ids(items): """ id fields from sorted list-of-dicts """ return tuple(x['id'] for x in items)
def get_ngrams(sequence, n): """ Given a sequence, this function should return a list of n-grams, where each n-gram is a Python tuple. """ ngrams = [] # When unigram, manually add START, as algorithm below skips it if n is 1: ngrams.append(('START',)) # Loop through corpus endRange = (len(sequence)+1) for index_word in range(0, endRange): # Range of tuples tuple_gram = () for index_gram in range(index_word-n+1, index_word+1): word = None # figure out word if index_gram < 0: word = 'START' elif index_gram >= len(sequence): word = 'STOP' else: word = sequence[index_gram] # constructor tuple if word: tuple_gram = tuple_gram + (word,) # append to list ngrams.append(tuple_gram) return ngrams
def position_to_index(v, nelx, nely): """ Convert a position vector to the index of the element containing it """ return int(v[0]) + int(v[1])*nelx
def float_or_null(v): """Return a value coerced to a float, unless it's a None. """ if v is not None: v = float(v) return v
def Devices_field_data(dataArray, deviceIds, inputField): """ #Retrieve device stats per device """ #get length of retrieved data dataLength = len(dataArray) deviceIdsLength = len(deviceIds) #initalize plotData plotData = [] #loop through deviceIds and append plotData for j in range(0, deviceIdsLength): deviceData = {} deviceData["deviceId"] = deviceIds[j] deviceData["fieldData"] = [] plotData.append(deviceData) #added the selected field's data to plotData for i in range(0, dataLength): for j in range(0, deviceIdsLength): if dataArray[i]["deviceID"] == plotData[j]["deviceId"]: plotData[j]["fieldData"].append(dataArray[i][inputField]) #create and return the output json return plotData
def update_active_output_renditions_metric(ml_channel_id, ml_channel_name, ml_channelgroup_names): """Update the metrics of the "Active Output Renditions (avg)" dashboard dashboard widget""" results = [] for groupname in ml_channelgroup_names: entry = ["MediaLive", "ActiveOutputs", "OutputGroupName", groupname, "ChannelId", ml_channel_id, "Pipeline", "0", {"label": ml_channel_name + "-0"}] results.append(entry) entry = ["MediaLive", "ActiveOutputs", "OutputGroupName", groupname, "ChannelId", ml_channel_id, "Pipeline", "1", {"yAxis": "right", "label": ml_channel_name + "-1"}] results.append(entry) return results
def remove_words(text:str, set_of_words): """ Removes any words present in the text that are included in the set_of_words """ words = text.strip().split() words = [word for word in words if word not in set_of_words] return ' '.join(words)
def basename(file_name): """ Extract base name from file_name. basename("test.e") -> "test" """ fileParts = file_name.split(".") base_name = ".".join(fileParts[:-1]) return base_name
def m_range(l): """ Routine to get magnetic quantum numbers in Orca format """ m = [0] for i in range(l): m += [(i + 1), -(i + 1)] return m
def removeDuplicates(li: list) -> list: """ Removes Duplicates from bookmark file Args: li(list): list of bookmark entries Returns: list: filtered bookmark entries """ visited = set() output = [] for a, b in li: if a not in visited: visited.add(a) output.append((a, b)) return output
def bubbleSort3(nums): """ Advanced version, cache the index of swaped element and stop at this index in the next loop, because the elements behind that are already sorted :type nums: List[int] :rtype: List[int] """ res = list(nums) # I don't want to change the input list flag = len(res) while flag > 0: k = flag flag = 0 for j in range(1, k): if res[j - 1] > res[j]: res[j - 1], res[j] = res[j], res[j - 1] flag = j return res
def toHex(version): """Converts a semantic version string to a hex string""" split = version.split(".") fullversion = "" for i, v in enumerate(split, start=0): fullversion += v.rjust(3, "0") return hex(int(fullversion))
def convert_path(url_rule: str) -> str: """ convert "/api/items/<int:id>/" to "/api/items/{id}/" """ subs = [] for sub in str(url_rule).split("/"): if "<" in sub: if ":" in sub: start = sub.index(":") + 1 else: start = 1 subs.append("{{{:s}}}".format(sub[start:-1])) else: subs.append(sub) return "/".join(subs)
def get_C(s, i): """ get number of smaller letters than s[i]. equiv. first occurence of s[i] in sorted s. """ c = s[i] s = list(sorted(s)) return s.index(c)
def stateful_flags(rep_restart_wait=None, quorum_loss_wait=None, standby_replica_keep=None): """Calculate an integer representation of flag arguments for stateful services""" flag_sum = 0 if rep_restart_wait is not None: flag_sum += 1 if quorum_loss_wait is not None: flag_sum += 2 if standby_replica_keep is not None: flag_sum += 4 return flag_sum
def is_param_constraint_broken(params, const_params): """Check param value for breaking constraints: Skip to next if so. Parameters ---------- params : dict Dictionary of parameters str identifiers to their values (Not tensors). const_params : dict Returns ------- bool True if a constraint is broken, False if no constraint is broken. """ return ( ('precision' in params and params['precision'] <= 0) or ( 'df' in params and ( ('covariance_matrix' in params and params['df'] <= 2) or params['df'] <= 0 ) ) )
def need_edit_content(content: str): """ check need edit by content """ # skip REDIRECT page if content.startswith("#REDIRECT"): return False # skip disambiguation page if content.find("{{Disambiguation}}") > -1: return False if content.find("{{disambiguation}}") > -1: return False return True
def hflip_augment(aug=None, is_training=True, **kwargs): """Horizontal flip augmentation.""" del kwargs if aug is None: aug = [] if is_training: return aug + [('hflip', {})] return aug
def vehicle_type_and_mav_sys_id(vehicle_id, vehicle_color): """Get the vehicle_type and mav_sys_id from the vehicle's id and color.""" # valid MAV_SYS_IDs 1 to 250 # the first 25 vehicles per team are iris # the second 25 vehicles per team are plane (delta_wing) vehicle_type = 'iris' if vehicle_id <= 25 else 'delta_wing' # BLUE uses 1 to 50 # GOLD uses 101 to 150 mav_sys_id = vehicle_id if vehicle_color == 'gold': mav_sys_id += 100 return vehicle_type, mav_sys_id
def theveninTheorem(Vth, Rth, Rl): """ In practice that a particular element in a circuit is variable (usually called the load) while other elements are fixed. Each time the variable element is changed, the entire circuit has to be analyzed all over again. Thevenin's theorem states that a linear two-terminal circuit can be replaced by an equivalent circuit consisting of a voltage Vth in series with a resistor Rth. Where VTh is the open-circuit voltage (v_oc) at the terminals and RTh is the input or equivalent resistance at the terminals when the independent sources are turned off (R_in). Theory: Case 1 (no dependent sources): Turn off all independent sources. Rth is the input resistance between terminals A & B. Case 2 (dependent sources): This method uses superposition. Turn off all independent sources. Apply a voltage (v_o; to determine the current) or current (i_o; to determine the voltage) as necessary. It can be any value, even 1V or 1A. The result will be the same. Rth = v_o/i_o Note: having dependent current means that you could end up with a negative Rth. This means that the circuit is actually supplying power. Variables: Vth = Thevenin Equivalent Voltage Rth = Thevenin Equivalent Resistance Rl = Load Resistance Ouput: Val1, Val2 """ Il = Vth / (Rth + Rl) Vl = Rl * Il return Vl, Il
def endpoint_name(endpoint, interface): """Create a single key from an endpoint, interface pair Args: endpoint (str): The name of an endpoint interface (str): The interface on the given endpoint Returns: str: A single key combining endpoint and interface """ return "%s:%s" % (endpoint, interface)
def get_site_url(site): """Get a ``site`` URL :param str site: the site to get URL for :return: a valid site URL :raise ValueError: when site is empty, or isn't well formatted The ``site`` argument is checked: its scheme must be ``http`` or ``https``, or a :exc:`ValueError` is raised. If the ``site`` does not have a scheme, ``http`` is used. If it doesn't have a TLD, a :exc:`ValueError` is raised. """ site = site.strip() if site else '' if not site: raise ValueError('What site do you want to check?') if not site.startswith(('http://', 'https://')): if '://' in site: protocol = site.split('://')[0] + '://' raise ValueError('Try it again without the %s' % protocol) site = 'http://' + site domain = site.split('/')[2].split(':')[0] if '.' not in domain: raise ValueError('I need a fully qualified domain name (with a dot).') if domain.endswith(('.local', '.example', '.test', '.invalid', '.localhost')): raise ValueError("I can't check LAN-local or invalid domains.") return site
def mlp_check_dimensions(x, y, ws, bs): """ Return True if the dimensions in double_u and beta agree. :param x: a list of lists representing the x matrix. :param y: a list output values. :param ws: a list of weight matrices (one for each layer) :param bs: a list of biases (one for each layer) :return: True if the dimensions of x, y, ws and bs match """ ## W rows should equal X columns, b col should equal W col result = True if len(ws) != len(bs): return False if len(x[0]) != len(ws[0]): return False if len(x) != len(y): return False if len(y[0]) != len(bs[len(bs) - 1][0]): return False for layer in range(len(ws)): if len(ws[layer][0]) != len(bs[layer][0]): return False if layer == 0: pass else: prev_w = ws[layer - 1] if len(ws[layer]) != len(prev_w[0]): return False return result
def absVal(num): """ Find the absolute value of a number. >>absVal(-5) 5 >>absVal(0) 0 """ if num < 0: return -num else: return num
def fake_legal_name(vasp): """ Given a string representing a VASP's name, return a valid dictionary for the faked name identifiers """ return { "name_identifiers": [ { "legal_person_name": vasp, "legal_person_name_identifier_type": "LEGAL_PERSON_NAME_TYPE_CODE_LEGL", } ] }
def calcOptimalStopLoss(M: float, gain: float, loss: float, chanceCorrect: float) -> float: """Calculates the optimal stop loss amount. Parameters ---------- M : float The larges OL (Open minus Low) or HO (Hight minus Open) in the history of the data. gain : float The average gain of the historical plays that ended in a gain. loss : float The average loss of the historical plays that ended in a loss. chanceCorrect : float The percent of historical plays that ended in a gain. Returns ------- float Returns the stop loss at which to close the play. """ return 0.5 * (M + (1.0 - chanceCorrect) * loss - chanceCorrect * gain)
def merge_dicts(dict1, dict2): """ Merges all the dictionaries, so in result bag of words can be created. """ if len(dict1) < len(dict2): dict1, dict2 = dict2, dict1 for key, value in dict2.items(): dict1[key] = dict1.get(key, 0) + value return dict1
def bfs(graph, start): """ Breadth first search Args: graph (dict): graph start (node): some key of the graph Time: O(|V| + |E|) """ seen = set() path = [] queue = [start] while queue: current = queue.pop(0) if current not in seen: seen.add(current) path.append(current) queue.extend(graph[current]) return path
def parse_variable_char(packed): """ Map a 6-bit packed char to ASCII """ packed_char = packed if packed_char == 0: return "" if 1 <= packed_char <= 10: return chr(ord('0') - 1 + packed_char) elif 11 <= packed_char <= 36: return chr(ord('A') - 11 + packed_char) elif 37 <= packed_char <= 62: return chr(ord('a') - 37 + packed_char) else: return "_"
def flip_case(phrase, to_swap): """Flip [to_swap] case each time it appears in phrase. >>> flip_case('Aaaahhh', 'a') 'aAAAhhh' >>> flip_case('Aaaahhh', 'A') 'aAAAhhh' >>> flip_case('Aaaahhh', 'h') 'AaaaHHH' """ return ''.join(letter.lower() if letter.lower() == to_swap.lower() and letter.isupper() else (letter.upper() if letter.lower() == to_swap.lower() else letter) for letter in phrase)
def mcd(a, b): """Restituisce il Massimo Comune Divisore tra a e b""" if a * b == 0: return 1 if a == b: return a elif a > b: return mcd(a - b, b) else: return mcd(b - a, a)
def selection_sort(seq): """Returns a tuple if got a tuple. Otherwise returns a list. >>> t = (3, 1, 1, 4, -1, 6, 2, 9, 8, 2) >>> selection_sort(t) (-1, 1, 1, 2, 2, 3, 4, 6, 8, 9) >>> l = [9, 9, 3, -1, 14, 67, 1] >>> selection_sort(l) [-1, 1, 3, 9, 9, 14, 67] """ alist = list(seq) for idx, i in enumerate(alist): minvalue_idx = idx minvalue = i for idx2, y in enumerate(alist[idx:]): if y < minvalue: minvalue_idx = idx + idx2 minvalue = y alist[idx] = alist[minvalue_idx] alist[minvalue_idx] = i if isinstance(seq, tuple): return tuple(alist) return alist
def isServerAlive(server_ip, active_server_list): """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect((server_ip, 22)) return True except socket.error as e: return False """ if not server_ip: return False for server in active_server_list: if server.ip == server_ip: return True return False
def IsMissing(argument): """Check if an argument was omitted from a function call Missing arguments default to the VBMissingArgument class so we just check if the argument is an instance of this type and return true if this is the case. """ try: return argument._missing except AttributeError: return 0
def get_starting_points(graph): """ Use the points with non-zero out degree and don't hang during execution. """ if graph == "testGraph": return ["1","2"] elif graph != "friendster": return ["17", "38", "47", "52", "53", "58", "59", "69", "94", "96"] else: # friendster takes a long time so use fewer starting points return ["101", "286", "16966", "37728", "56030", "155929"]
def check_item_relational(actual, expected): """Checks for acceptable lesser or greather values.""" if expected[:1] == '>' and int(actual) >= int(expected[1:]): return True elif expected[:1] == '<' and int(actual) <= int(expected[1:]): return True elif actual == expected: return True else: return False
def bits_list(number): """return list of bits in number Keyword arguments: number -- an integer >= 0 """ # https://wiki.python.org/moin/BitManipulation if number == 0: return [0] else: # binary_literal string e.g. '0b101' binary_literal = bin(number) bits_string = binary_literal.lstrip('0b') # list comprehension bits = [int(bit_character) for bit_character in bits_string] return bits
def convert_chemformula(string: str) -> str: """ Convert a chemical formula string to a matplotlib parsable format (latex). Parameters ---------- string or Adsorbate: str String to process. Returns ------- str Processed string. """ result = getattr(string, 'formula', None) if result is None: result = "" number_processing = False for i in string: if i.isdigit(): if not number_processing: result += '_{' number_processing = True else: if number_processing: result += '}' number_processing = False result += i if number_processing: result += '}' return f'${result}$'
def time2num(year): """ Convert year 2 labels :param year: :return: """ # time1 = [1948, 1952, 1956, 1960, 1964] # time2 = [1968, 1972, 1976, 1980, 1984, ] # time3 = [1988, 1992, 1996, 2000, 2004] # time4 = [2008, 2012, 2016] time1 = [1948, 1952, 1956] # Eisenhower and truman: mixed time2 = [1960, 1964, 1968] # kennedy and johnson: democratic time3 = [1972, 1976, 1980] # carter and ford, nixon: mixed time4 = [1984, 1988, 1992] # reagan and bush: republican time5 = [1996, 2000, 2004] # clinton and bush: mixed time6 = [2008, 2012, 2016] # obama democratic if year in time1: return 1 elif year in time2: return 2 elif year in time3: return 3 elif year in time4: return 4 elif year in time5: return 5 elif year in time6: return 6
def add_plus(split_fields): """Insert delimiters to convert the list (or pandas Series, etc.) into a suitable format to be used Accepted delimiters include a comma (,), a space ( ) or a plus (+). The default set by cimr is a plus (+). """ return '+'.join(map(str, split_fields))
def isCharOrDigit(ch: str) -> bool: """test if ch is letter or digit or - or _ this is for the gramparser, which can contain words for the recogniser """ return ch.isalpha() or ch.isdigit()
def double_number_with_hint(number: int) -> int: """Multiply given number by 2 :param number: :return: """ print("Inside double_number_with_hint()") return number * 2
def naiveFib(n): """ Naive implementation of nth Fibonacci number generator Time complexity - O(1.6^n) :param n: The nth term :return: The nth fibonnaci number """ if n == 0: return 0 elif n == 1: return 1 else: return naiveFib(n-1) + naiveFib(n-2)
def is_hashable(object_): """ Returns whether the object is hashable. Parameters ---------- object_ : `Any` The object to check. Returns ------- is_iterable : `bool` """ try: hasher_function = getattr(type(object_), '__hash__') except AttributeError: return False try: hasher_function(object_) except (TypeError, NotImplementedError): return False return True
def is_metadata(message): """Returns true iff the Shadow data stream message contains metadata rather than a measurement sample """ return message.startswith(b'<?xml')
def GetPatchDeploymentUriPath(project, patch_deployment): """Returns the URI path of an osconfig patch deployment.""" return '/'.join(['projects', project, 'patchDeployments', patch_deployment])
def remove_typedefs(signature: str) -> str: """ Strips typedef info from a function signature :param signature: function signature :return: string that can be used to construct function calls with the same variable names and ordering as in the function signature """ # remove * pefix for pointers (pointer must always be removed before # values otherwise we will inadvertently dereference values, # same applies for const specifications) # # always add whitespace after type definition for cosmetic reasons typedefs = [ 'const realtype *', 'const double *', 'const realtype ', 'double *', 'realtype *', 'const int ', 'int ', 'SUNMatrixContent_Sparse ', 'gsl::span<const int>' ] for typedef in typedefs: signature = signature.replace(typedef, '') return signature
def empty_results(num_classes, num_images): """Return empty results lists for boxes, masks, and keypoints. Box detections are collected into: all_boxes[cls][image] = N x 5 array with columns (x1, y1, x2, y2, score) Instance mask predictions are collected into: all_segms[cls][image] = [...] list of COCO RLE encoded masks that are in 1:1 correspondence with the boxes in all_boxes[cls][image] Keypoint predictions are collected into: all_keyps[cls][image] = [...] list of keypoints results, each encoded as a 3D array (#rois, 4, #keypoints) with the 4 rows corresponding to [x, y, logit, prob] (See: utils.keypoints.heatmaps_to_keypoints). Keypoints are recorded for person (cls = 1); they are in 1:1 correspondence with the boxes in all_boxes[cls][image]. """ # Note: do not be tempted to use [[] * N], which gives N references to the # *same* empty list. all_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)] all_segms = [[[] for _ in range(num_images)] for _ in range(num_classes)] all_keyps = [[[] for _ in range(num_images)] for _ in range(num_classes)] return all_boxes, all_segms, all_keyps