content
stringlengths
42
6.51k
def fix_mocov2_state_dict(state_dict): """ Ref: https://github.com/facebookresearch/CovidPrognosis/blob/master/cp_examples/sip_finetune/sip_finetune.py """ new_state_dict = {} for k, v in state_dict.items(): if k.startswith("model.encoder_q."): k = k.replace("model.encoder_q.", "") new_state_dict[k] = v return new_state_dict
def make_seconds(days, hours, minutes, seconds): """ >>> make_seconds(0,5,9,5) 18545 """ return seconds + (60 * minutes) + (60 * 60 * hours) + (24 * 60 * 60 * days)
def RemoveIntervalsContained(intervals): """remove intervals that are fully contained in another [(10, 100), (20, 50), (70, 120), (130, 200), (10, 50), (140, 210), (150, 200)] results: [(10, 100), (70, 120), (130, 200), (140, 210)] """ if not intervals: return [] new_intervals = [] intervals.sort() last_from, last_to = intervals[0] for this_from, this_to in intervals[1:]: # this is larger: if this_from <= last_from and this_to >= last_to: last_from, last_to = this_from, this_to continue # last is larger if last_from <= this_from and last_to >= this_to: continue # no complete overlap new_intervals.append((last_from, last_to)) last_from, last_to = this_from, this_to new_intervals.append((last_from, last_to)) return new_intervals
def _check_errors(results): """ <SEMI-PRIVATE> Checks whether the results from the Azure API contain errors or not. _check_errors(results) results: [str] The results from the Azure translation API call Returns: bool; True if the API results contain errors. """ errors = False for result in results: if 'translations' not in result: errors = True break return errors
def inverse_dict(my_dict): """ the func get a dictinary and reverse it, the keys become values and the values become keys. :param my_dict: the dictinary that need to be reversed. :return: a VERY pretty dictionary. """ result_dict = {} for key, value in my_dict.items(): if not value in result_dict.keys(): result_dict[value] = '' result_dict[value]=(key) return result_dict
def cpu_statistics(last, ref): """ Return the CPU loading for all the processors between last and ref measures. The average on all processors is inserted in front of the list. """ cpu = [] for unit in zip(last, ref): work = unit[0][0] - unit[1][0] idle = unit[0][1] - unit[1][1] total = work + idle cpu.append(100.0 * work / total if total else 0) return cpu
def show_run_error(exit_status, output): """An easy-to-read error message for assert""" return 'Failed with exit status %s\n' \ '--------------\n' \ '%s' % (exit_status, output)
def getDebuff(state, debuffType) : """Get the lists of debuff modifiers for a specific debuff type """ return [ b['props'][debuffType] for b in state['enemy']['debuff'] if debuffType in b['props'] ]
def block_3x3(i: int, j: int): """used to generate 3x3 block slices This can be usefull to fill out A and G in (10.68) arr[block33(0,1)] == arr[0:3, 3:6] arr[block33(1,2)] == arr[3:6, 6:9] ... Args: i (int): row in (10.68) j (int): column in (10.68) Returns: [type]: [description] """ return slice(i*3, (i+1)*3), slice(j*3, (j+1)*3)
def clean_triplets(triplets): """ Remove the triplet which contains <_> """ tobedeleted = [] for key in triplets: for each in triplets[key]: if each == '<_>': tobedeleted.append(key) break for key in tobedeleted: del triplets[key] return triplets
def valid_path(path: str) -> bool: """Check if the provided path is valid.""" if len(path) < 5: raise ValueError('File path to short!') extension = path[-4:] if extension != 'xlsx': raise ValueError('Expects a .xlsx file!') return True
def merge_dicts(*dicts, **kwargs): """ Melts several dicts into one. Useful when messing with feed dicts :param dicts: dictionaries :param check_conflicts: if True, raises error if several dicts have the same key Otherwise uses the key from the latest dict in *dicts :return: a dict that contains k-v pairs from all *dicts """ merged_dict = {} for d in dicts: merged_dict.update(d) if kwargs.get('check_conflicts'): assert len(merged_dict) == sum( map(len, dicts)), 'dicts have duplicate keys' return merged_dict
def replace_slash(name): """Replaces slash with division slash symbol for CheckStyle Jenkins plugin""" DIVISION_SLASH = '\u2215' return (name or '').replace('/', DIVISION_SLASH)
def greatest_common_divisor(a: int, b: int) -> int: """ Euclid's Lemma : d divides a and b, if and only if d divides a-b and b Euclid's Algorithm >>> greatest_common_divisor(7,5) 1 Note : In number theory, two integers a and b are said to be relatively prime, mutually prime, or co-prime if the only positive integer (factor) that divides both of them is 1 i.e., gcd(a,b) = 1. >>> greatest_common_divisor(121, 11) 11 """ if a < b: a, b = b, a while a % b != 0: a, b = b, a % b return b
def _getPyFile(filename): """Return the file and '.py' filename from a filename which could end with .py, .pyc, or .pyo""" if filename[-1] in 'oc' and filename[-4:-1] == '.py': return filename[:-1] return filename
def _str_to_version(version_str): """Return the tuple (major, minor, patch) version extracted from the str.""" version_ids = version_str.split(".") if len(version_ids) != 3 or "-" in version_str: raise ValueError( "Could not convert the {} to version. Format should be x.y.z".format( version_str)) try: version_ids = tuple(int(v) for v in version_ids) except ValueError: raise ValueError( "Could not convert the {} to version. Format should be x.y.z".format( version_str)) return version_ids
def format_args(args, kwargs): """Formats args and kwargs the way they look in a function call.""" return ', '.join([repr(arg) for arg in args] + ['%s=%r' % item for item in sorted(kwargs.items())])
def GetPersistTime(ar_filename): """Return time stored in ar_filename, or 0.0 if it does not exist.""" try: with open(ar_filename) as f: return float(f.read()) except (IOError, ValueError): return 0.0
def dsigmoid(y): """ derivative of sigmoid in this function y is already sigmoided """ return y * (1.0 - y)
def adverbize(number): """ Transforms a number to its numeral adverb representation. Since this method should be mostly used in logging messages, only English is supported. Examples: ```python from flashback.formatting import adverbize adverbize(1) #=> "once" adverbize(3) #=> "thrice" adverbize(144) #=> "144 times" ``` Params: number (int): the number for transform to a numeral adverb Returns: str: the numeral adverb """ number = int(number) if number == 1: numeral = "once" elif number == 2: numeral = "twice" elif number == 3: numeral = "thrice" else: numeral = f"{number} times" return numeral
def _query_single_line(query): """Reformats a query string to remove newlines and extra spaces :param query: The query to log. This will work for anything that will result in a string after str() is applied to it. Be aware of this conversion. E.g. sqlalchemy's TextClause objects. """ return " ".join(str(query).split())
def infer_dtype(value): """Infer the data type of the value passed. Args: value (unknown): Value. Raises: ValueError : When the type can't be inferred. """ if isinstance(value, bool): return value # for dtype in [float, int, bool, str]: for dtype in [float, int, str]: try: return dtype(value) except ValueError: pass raise ValueError('Unable to infer type.')
def validate_input(x): """Validates that input give is between 1 and 10 >>> validate_input(5) True >>> validate_input(-2) False >>> validate_input(12) False """ x = int(x) if 1 <= x <= 10: return True else: return False
def validate(identifier): """Validates a student id from the Pontifical Catholic University of Chile Args: identifier: student identifier (string or number) Returns: True if it is valid, False otherwise """ if not identifier: return False identifier = str(identifier) student_number = identifier[:-1] given_digit = identifier[-1].upper() counter = 2 total = 0 for char in reversed(student_number): if not char.isdigit(): return False total += int(char) * counter counter += 1 counter = 2 if counter > 8 else counter digit = str(11 - total % 11) if (11 - total % 11 != 10) else 'J' digit = '0' if digit == '11' else digit return given_digit == digit
def snake_to_camel(snake_string: str) -> str: """Convert snake_case to camelCase""" components = snake_string.split('_') return components[0] + ''.join(x.title() for x in components[1:])
def _get_org(aff): """Auxiliary function to extract org information from affiliation for authorgroup. """ try: org = aff['organization'] if not isinstance(org, str): try: org = org['$'] except TypeError: # Multiple names given org = ', '.join([d['$'] for d in org if d]) except KeyError: # Author group w/o affiliation org = None return org
def normalize_rewards(rewards, reward_is_penalty=False): """Corrects rewards to be in the interval 0-1 * If reward is actually a penalty it inverts Params: * rewards: list list with the rewards from a tls Returns: * reward_is_penalty: bool If True then the reward is actually a penalty default False """ # Switch signals if reward_is_penalty: _rewards = [-r for r in rewards] else: _rewards = rewards rmin = min(_rewards) rmax = max(_rewards) return [(rwr - rmin) / (rmax - rmin) for rwr in _rewards]
def header_maker(mode: str) -> str: """ make header and return as dict :param mode: :return: """ user_agents = { "FF": "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.17 Safari/537.36", "TIMELINE": "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36", "MOBILE": "Opera/9.80 (Android 4.1.2; Linux; Opera Mobi/ADR-1305251841) Presto/2.11.355 Version/12.10" } return user_agents[mode]
def floor(num): """Simple numerical ceiling function. Args: num (float): input number Returns: int: next lowest integer if num is non-integer, else: num """ if int(num) > num: # num is negative float # e.g. int(-2.5) = -2 return int(num) - 1 else: # num is integer, or num is positive float # e.g. int(2.0) = 2 # e.g. int(2.5) = 2 return int(num)
def average_word_length(txt): """ Gather statistics about the text, primarily the average word length Parameters ---------- txt : str Returns ------- word_length : float Average word length in the text """ # txt = re.subn(RE_REPEATS, '', txt)[0] nw = len(txt.split()) nc = len(txt) avgw = nc / (nw + 1) return avgw
def split_org_repo(name): """Split a potential organization name prefix from a repo's full name Returns ------- (None, reponame) or (orgname, reponame) """ split = name.split('/', maxsplit=1) if len(split) < 2: return None, name else: return split[0], split[1]
def data_storage_account_settings(conf): # type: (dict) -> str """Retrieve input data storage account settings link :param dict conf: configuration object :rtype: str :return: storage account link """ return conf['storage_account_settings']
def tag_function(entry): """ Default tag function Given a PDB report entry, generate a list of tags to apply to the entry when created in the database. This function can be overridden through the PDB_TAG_FUNCTION settings variable :param entry: :return: """ return [entry['diffrn_source.pdbx_synchrotron_beamline'].split()[-1],]
def split_hdf_path(fname, subgroup=None): """Split an hdf path of the form path.hdf/group, where the group part is optional, into the path and the group parts. If subgroup is specified, then it will be appended to the group informaiton. returns fname, group. The fname will be a string, and the group will be a string or None. Raises a ValueError if the fname is not recognized as a hdf file.""" for suf in [".hdf", ".h5"]: name, _, group = fname.rpartition(suf) if not name: continue name += suf if not group: return name, subgroup elif group[0] == "/": group = group[1:] if subgroup: group += "/" + subgroup return name, group raise ValueError("Not an hdf path")
def reorder_tuple(input_tuple) -> tuple: """Sort tuples alphabetically""" if input_tuple[0][0] > input_tuple[1][0]: return tuple((input_tuple[1], input_tuple[0])) else: return tuple((input_tuple[0], input_tuple[1]))
def createSoftLink(src, dest): """ Creates a new soft link. :type src: string :param Src: Source file or directory. :type dest: string :param Dest: Name of the soft link. """ return ["ln -f -s %s %s" % (src, dest)]
def linearInterpolate(v1, v2, alpha): """ Good interpolator when you have two values to interpolate between, but doesn't give fluid animation when more points are involved since it follows straight lines between the points. """ return v1 + alpha * (v2 - v1)
def jumpAndBackpedal(isMyNumber): """ :param isMyNumber: function that hides a secret number :return: secret number """ # start guess guess = 0 # if isMyNumber returns 0, guess is secretnumber if isMyNumber(guess) == 0: return guess # if isMyNumber does not return 0, guess is not yet correct else: # keep guessing until right guess foundNumber = False while not foundNumber: # if guess is too high, isMyNumber returns 1 # so guess needs to be decreased if isMyNumber(guess) == 1: guess = guess - 1 # else if guess is too low, isMyNumber returns -1 # so guess needs to be increased elif isMyNumber(guess) == -1: guess = guess + 1 # else if finally guess is secret number, isMyNumber returns 0 else: # break loop with flag foundNumber = True return guess
def marriage_transform(m_s_string): """Perform some simple manipulations.""" result = "Low" if m_s_string.startswith("Married"): result = "Medium" elif m_s_string.startswith("Widowed"): result = "High" return result
def jointKdlToList(q00): """ Return list converted from KDL JntArray""" if q00 is None: return None return [q00[i] for i in range(q00.rows())]
def token_begin_char_calibrate(token_value, text): """ Calibrate the begin char position of each token in the sentence. :param token_value: text of the token :param text: Sentence text :return: Calibrated positions """ res = {} ptr = 0 for key in sorted(token_value.keys()): token = token_value[key] ptr_t = 0 while ptr_t < len(token): if ptr >= len(text): print('Calibration failed!') return None if text[ptr] == token[ptr_t]: if ptr_t == 0: res[key] = ptr ptr += 1 ptr_t += 1 else: ptr += 1 assert len(token_value) == len(res) return res
def rotate(l, n): """ Rotate (shift) the list, moving values n places to the left/right """ n = -n # so that we step through the list in the correct direction return l[n:] + l[:n]
def write_line(index, member, stat_text, stat, emoji): """Write a line like charley does""" return f"**{index})** {member} \n{emoji} {stat_text}: {stat}"
def cget(mat, *i): """ Returns the column(s) '*i' of a 2D list 'mat' mat: matrix or 2D list *i: columns to extract from matrix NOTE: If one column is given, the column is returned as a list. If multiple columns are given, a list of columns (also lists) is returned """ if len(i) == 1: return [row[i[0]] for row in mat] else: return [[row[index] for row in mat] for index in i]
def calc_scanner_pos(scanner_height: int, time_step: int) -> int: """ Calculates the position of a scanner within its range at a given time step. """ cycle_midpoint = scanner_height - 1 full_cycle = cycle_midpoint * 2 cycle_position = time_step % full_cycle return ( cycle_position if cycle_position <= cycle_midpoint else full_cycle - cycle_position)
def index_by(func, values): """ Indexes values by func. Returns (dict): Keys produced by func, each pointing to one value. """ return {func(value): value for value in values}
def is_promo_box(game): """Ignore the Deutscher Spielepreile Goodie Boxes and Brettspiel Adventskalender as expansions and treat them like base games""" # return game["id"] in (178656, 191779, 204573, 231506, 256951, 205611, 232298, 257590, 286086) # Change this to look for board game family 39378 (Box of Promos) return any(39378 == family["id"] for family in game["families"])
def get_acache(cons_nets): """ Calculates the A-Cache of the given decomposition Parameters -------------- cons_nets List of considered nets Returns -------------- acache A-Cache """ ret = {} for index, el in enumerate(cons_nets): for lab in el[0].lvis_labels: if lab not in ret: ret[lab] = [] ret[lab].append(index) return ret
def file_names(files, directory, extension): """file_names(files, directory, extension) -> paths Returns the full path of the given File objects. Parameters ---------- files : list of :py:class:`bob.db.base.File` The list of file object to retrieve the file names for. directory : str The base directory, where the files can be found. extension : str The file name extension to add to all files. Returns ------- paths : list of :obj:`str` The paths extracted for the files, in the same order. """ # return the paths of the files, do not remove duplicates return [f.make_path(directory, extension) for f in files]
def get_path_in_ext(path, ext_name): """Transform a game path relative to an extension root into a game path relative to the game root. Arguments: path: game path relative to the extension path ext_name: extension name """ if ext_name: return "/extensions/{}/{}".format(ext_name, path) return path
def h_from_eps(gamma, eint): """ Given rho and internal energy, return h """ return 1 + gamma * eint
def _get_dict_value(src, value, default): """ Returns a default value for a dict key if it was not found, otherwise returns the dict item. """ if value in src: return src[value] else: return default
def joinStrings(s,l,toks): """ Join string split over multiple lines """ return ["".join(toks)]
def cleanup_absent_fields(obj): """ cleans up any field that is marked as state: absent. It needs to be removed from the object if it is present. :param obj: :return: Purged object """ if type(obj) != dict: return obj cleanup_keys = [] for k, v in obj.items(): if type(v) == dict: if (('state' in v and v['state'] == 'absent') or (v == "{'state': 'absent'}")): cleanup_keys.append(k) else: cleanup_absent_fields(v) if not v: cleanup_keys.append(k) elif type(v) == list: new_list = [] for elem in v: elem = cleanup_absent_fields(elem) if elem: # remove the item from list new_list.append(elem) if new_list: obj[k] = new_list else: cleanup_keys.append(k) elif isinstance(v, str) or isinstance(v, str): if v == "{'state': 'absent'}": cleanup_keys.append(k) for k in cleanup_keys: del obj[k] return obj
def guessChunk(shape_json, typesize): """ Guess an appropriate chunk layout for a dataset, given its shape and the size of each element in bytes. Will allocate chunks only as large as MAX_SIZE. Chunks are generally close to some power-of-2 fraction of each axis, slightly favoring bigger values for the last index. Undocumented and subject to change without warning. """ if shape_json is None or shape_json["class"] == 'H5S_NULL': return None if shape_json["class"] == 'H5S_SCALAR': return (1,) # just enough to store one item if "maxdims" in shape_json: shape = shape_json["maxdims"] else: shape = shape_json["dims"] if typesize == 'H5T_VARIABLE': typesize = 128 # just take a guess at the item size # For unlimited dimensions we have to guess. use 1024 shape = tuple((x if x != 0 else 1024) for i, x in enumerate(shape)) return shape
def roll(l): """rolls a list to the right e.g.: roll([0,1,1]) => [1,0,1] """ tmp1, tmp2 = l[:-1], l[-1] l[1:] = tmp1 l[0] = tmp2 return l
def ec_file(main_object): """Return File entity in Demisto format for use in entry context Parameters ---------- main_object : dict The main object from a report's contents. Returns ------- dict File object populated by report contents. """ name = main_object.get('filename') hashes = main_object.get('hashes', {}) md5 = hashes.get('md5') sha1 = hashes.get('sha1') sha256 = hashes.get('sha256') ssdeep = hashes.get('ssdeep') ext = main_object.get('info', {}).get('ext') file_ec = { 'File': { 'Name': name, 'MD5': md5, 'SHA1': sha1, 'SHA256': sha256, 'SSDeep': ssdeep, 'Extension': ext } } return file_ec
def bq_to_rows(rows): """Reformat BigQuery's output to regular pnguin LOD data Reformat BigQuery's output format so we can put it into a DataFrame Args: rows (dict): A nested list of key-value tuples that need to be converted into a list of dicts Returns: list: A list of dictionaries based on the input x """ def _reformat(x): pairs = x.items() row = {} for pair in pairs: key, value = pair row[key] = value return row return [_reformat(x) for x in rows]
def get_index_of_csr_data(i, j, indptr, indices): """ Get the value index of the i,j-element of a matrix in CSR format. Parameters ---------- i : int row index which is asked to get the CSR-index for j : int column index which is asked to get the CSR-index for indptr : ndarray index-ptr-Array of the CSR-Matrix. indices : ndarray indices array of CSR-matrix (represents the nonzero column indices) Returns ------- k : int index of the value array of the CSR-matrix, in which value [i,j] is stored. Notes ----- This routine works only, if the tuple i,j is acutally a real entry of the matrix. Otherwise the value k=0 will be returned and an Error Message will be provided. """ # indices for row i are stored in indices[indptr[k]:indptr[k+1]]; thus the indptr marks the start and end of the # part of the indices and val vector where all entries of a row are stored # set k to the start of data of row k k = indptr[i] # search for appearance of j in the nonzero column indices which are stored in indices[k] till # indices[k+indptr[i+1]] while j != indices[k]: # while column j not found search for j in next entry k += 1 # Check if next search would be in next (wrong) row if k > indptr[i + 1]: print('ERROR! The index in the csr matrix is not preallocated!') k = 0 break return k
def vanilla_grad_desc(para, grad_para, lr): """ Update function for the vanilla gradient descent: w = w - learningRate * grad_w :param para: Parameter to be updated :param grad_para: Gradient at the parameter :param lr: learning rate :return: """ return para - lr * grad_para
def max_common_prefix(a): """ Given a list of strings (or other sliceable sequences), returns the longest common prefix :param a: list-like of strings :return: the smallest common prefix of all strings in a """ if not a: return '' # Note: Try to optimize by using a min_max function to give me both in one pass. The current version is still faster s1 = min(a) s2 = max(a) for i, c in enumerate(s1): if c != s2[i]: return s1[:i] return s1
def get_host_port(hostport_str): """ Convert string in format `host:port` to host and port. """ host_port_args = hostport_str.split(':') #assume argument 1 on form host:port rotctld_host = host_port_args[0] rotctld_port = 4533 if len(host_port_args) > 1: rotctld_port = int(host_port_args[1]) return rotctld_host, rotctld_port
def factorial(number, show=False): """ -> Calculate the factorial of a integer number. :parameter number: Number to be calculate. :parameter show: (optional) Show the calculation process. :return: Factorial of number. """ num = count = 1 print('-'*40) while count <= number: num *= count if show == True: print(f'{count}', end = '') if count < number: print(end = ' x ') elif count == number: print(end = ' = ') count += 1 return num
def abs_length_diff(trg, pred): """Computes absolute length difference between a target sequence and a predicted sequence Args: - trg (str): reference - pred (str): generated output Returns: - absolute length difference (int) """ trg_length = len(trg.split(' ')) pred_length = len(pred.split(' ')) return abs(trg_length - pred_length)
def either_side(text, delimiter = "and", default = [-1, -1]): """Take form 12 AND 15 to return [12, 15] for example""" if delimiter in text: pos = text.index(delimiter) if text[pos - 1].isnumeric() and text[pos + 1].isnumeric(): return [int(text[pos - 1]), int(text[pos + 1])] else: return default else: return default
def is_palin(word): """.""" if word[::-1] == word: return True else: return False
def bounded_exponential(x, bounds=[1/10,10], base=2): """ Bounded exponential function Computes an exponential function where when x is 0, the output is bounds[0], and when x is 1, the output is bounds[1]. The relative probability of outputting bounds[0[ over bounds[1] is base. Useful for randomly sampling over large ranges of values with an exponential resolution. RH 2021 Args: x (float or np.ndarray): Float or 1-D array of the x-axis bounds (list): List of two floats, the lower and upper bounds base (float): The relative probability of outputting bounds[0] over bounds[1] Returns: output (float or np.ndarray): The bounded exponential output """ range_additive = bounds[1] - bounds[0] return (((base**x - 1)/(base-1)) * range_additive) + bounds[0]
def lrange(*args, **kwargs): """ >>> lrange(3) [0, 1, 2] >>> lrange(1, 3) [1, 2] >>> lrange(0, 3, 2) [0, 2] """ return list(range(*args, **kwargs))
def relevant_event(event): """Check if an event is relevant and returns the inner event dict if it is""" if "event" in event.keys(): e = event["event"] # only handle message type events if e["type"] == "message": return e return None
def checkPolyA(ignorePolyA): """Check if --ignorePolyA flag was used. If so, change polyA directory to no polyA""" if ignorePolyA: dirPolyA = 'noPolyA' else: # Default is to not add the flag dirPolyA = 'polyA' return dirPolyA
def bb_intersection_over_union(boxA, boxB): """ This function does intersection over union between two bounding boxes :param boxA: box x1 represented as [min_x1, min_y1, max_x1, max_y1] :param boxB: box x2 represented as [min_x2, min_y2, max_x2, max_y2 :return: iou: intersection over union - a number between 0 and 1 """ # determine the (x, y)-coordinates of the intersection rectangle xA = max(boxA[0], boxB[0]) yA = max(boxA[1], boxB[1]) xB = min(boxA[2], boxB[2]) yB = min(boxA[3], boxB[3]) if xA > xB or yA > yB: return 0 else: # compute the area of intersection rectangle interArea = (xB - xA + 1) * (yB - yA + 1) # compute the area of both the prediction and ground-truth # rectangles boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1) boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1) # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth # areas - the interesection area iou = interArea / float(boxAArea + boxBArea - interArea) # return the intersection over union value return iou
def class_ref(cls: type) -> str: """ >>> class_ref(int) 'builtins.int' """ return cls.__module__ + "." + cls.__name__
def isDiscordID(name): """ Test if a given integrer or string is a discord ID. Discord IDs are the same for servers, users, channels, ... """ name = str(name) if (len(name) == 17 or 18) and name.isdigit(): return True return False
def alpha_fixed(s: int) -> float: """ Alpha function: determines extent to which node matrix is altered over time Exponential function with half life of 160 iterations """ alpha_val = 0.5 * (2 ** (-s / 240)) return alpha_val
def handler_minDate(a, b): """Minimum date""" if a < b : return a else : return b
def is_ipv4(ip_addr): """ Check if ip_addr is a IPv4 address. If not it can be IPv6 or an invalid address :param ip_addr: :return: True iff ip_addr is an IPv4 address, False otherwise """ no_of_bytes = len(ip_addr.split('.')) if no_of_bytes == 4: return True else: return False
def _broadcastable_shapes(shape_1, shape_2): """Check if two array shapes are compatible for broadcasting.""" return all( (s1 == s2 or s1 == 1 or s2 == 1) for s1, s2 in zip(shape_1[::-1], shape_2[::-1]))
def _DivideAndCeil(dividend, divisor): """Returns ceil(dividend / divisor), taking care to avoid the pitfalls of floating point arithmetic that could otherwise yield the wrong result for large numbers. """ quotient = dividend // divisor if (dividend % divisor) != 0: quotient += 1 return quotient
def b(n, block): """N blocks""" return (n * block)
def build_diamond(validated_letter): """ >:param str validated_letter: A capital letter, that will be used to generate the list of strings needed to print out the diamond. >**Returns:** A list a strings that contains the correct spacing for printing the diamond. build_diamond is used to generate the list of strings needed to print the diamond structure. It takes a single argument of a letter (in string format), and returns a list of strings. This list of strings can then be printed with newline characters (using join) to output the diamond structure. """ a_ascii = ord('A') rows = ord(validated_letter) - a_ascii + 1 diamond = [] for row in list(range(rows)) + list(reversed(range(rows-1))): if row == 0: diamond.append('{: <{w1}}{current_letter}'.format('', w1=rows-1, current_letter=chr(a_ascii+row))) else: diamond.append('{: <{w1}}{current_letter}{: <{w2}}{current_letter}'.format('', '', w1=rows-row-1, current_letter=chr(a_ascii+row), w2=row*2-1)) return diamond
def is_pid_cmdline_correct(pid, match): """Ensure that the cmdline for a pid seems sane Because pids are recycled, blindly killing by pid is something to avoid. This provides the ability to include a substring that is expected in the cmdline as a safety check. """ try: with open('/proc/%d/cmdline' % pid) as f: cmdline = f.read() return match in cmdline except EnvironmentError: return False
def course_outline_initial_state(locator_to_show, course_structure): """ Returns the desired initial state for the course outline view. If the 'show' request parameter was provided, then the view's initial state will be to have the desired item fully expanded and to scroll to see the new item. """ def find_xblock_info(xblock_info, locator): """ Finds the xblock info for the specified locator. """ if xblock_info['id'] == locator: return xblock_info children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None if children: for child_xblock_info in children: result = find_xblock_info(child_xblock_info, locator) if result: return result return None def collect_all_locators(locators, xblock_info): """ Collect all the locators for an xblock and its children. """ locators.append(xblock_info['id']) children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None if children: for child_xblock_info in children: collect_all_locators(locators, child_xblock_info) selected_xblock_info = find_xblock_info(course_structure, locator_to_show) if not selected_xblock_info: return None expanded_locators = [] collect_all_locators(expanded_locators, selected_xblock_info) return { 'locator_to_show': locator_to_show, 'expanded_locators': expanded_locators }
def marathon_app_id_to_mesos_dns_subdomain(app_id: str): """Return app_id's subdomain as it would appear in a Mesos DNS A record. >>> marathon_app_id_to_mesos_dns_subdomain('/app-1') 'app-1' >>> marathon_app_id_to_mesos_dns_subdomain('app-1') 'app-1' >>> marathon_app_id_to_mesos_dns_subdomain('/group-1/app-1') 'app-1-group-1' """ return '-'.join(reversed(app_id.strip('/').split('/')))
def get_perc_99_len(input_length): """Get 99 percentile sequence length.""" lengths = sorted(input_length) perc_99 = len(input_length) * 99 // 100 perc_99_len = lengths[perc_99] return perc_99_len
def is_true(item): """ Given a value, determine if it is one of ``[True, 'true', 'yes', 'y', 'on', '1', 1,]`` (note: strings are converted to lowercase before comparison). Args: item: The item to convert to a boolean. Returns: bool: ``True`` if ``item`` equates to a true-ish value, ``False`` otherwise """ tstrings = ['true', 'yes', 'y', 'on', '1'] if isinstance(item, str) and item.lower() in tstrings: return True elif isinstance(item, bool) and item is True: return True elif isinstance(item, int) and item == 1: return True else: return False
def response_with_headers(headers): """ Content-Type: text/html Set-Cookie: user=gua """ header = 'HTTP/1.x 210 VERY OK\r\n' header += ''.join([ '{}: {}\r\n'.format(k, v) for k, v in headers.items() ]) return header
def _filter_x_only(value): """ Check for invalid Marker """ if value != [-1, -1]: return True return False
def weekday_name(day_of_week): """Return name of weekday. >>> weekday_name(1) 'Sunday' >>> weekday_name(7) 'Saturday' For days not between 1 and 7, return None >>> weekday_name(9) >>> weekday_name(0) """ DAYS = [ "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", ] if day_of_week < 1 or day_of_week > 7: return None return DAYS[day_of_week - 1]
def check_8_v2(oe_repos, srcoe_repos): """ All repositories' must have proper branches setting """ print("All repositories' must have proper branches setting") errors_found = 0 for repos, prefix in [(oe_repos, "openeuler/"), (srcoe_repos, "src-openeuler/")]: for repo in repos: branches = repo.get("branches", []) if not branches: print("ERROR! {pre}{name} doesn\'t have branches" .format(pre=prefix, name=repo["name"])) errors_found += 1 else: master_found = 0 for branch in branches: if branch["type"] != "protected" and branch["type"] != "readonly": print("ERROR! {pre}{name} branch {br} is not valid" .format(pre=prefix, name=repo["name"], br=branch["name"])) errors_found += 1 if branch["name"] == "master": master_found += 1 elif branch.get("create_from", "") == "": print("ERROR! {pre}{name} branch {br} has not valid parent branch" .format(pre=prefix, name=repo["name"], br=branch["name"])) errors_found += 1 else: if master_found != 1: print("ERROR! {pre}{name}'s master branch is not properly set" .format(pre=prefix, name=repo["name"])) errors_found += 1 if errors_found == 0: print("PASS WITHOUT ISSUES FOUND.") return errors_found
def factorial(n: int): """ calculate factorial of n number :param n: :return: """ assert n >= 0, "negative factorial not recognized" if n == 0: return 1 return factorial(n - 1) * n
def get_cipher_block(cipher_text): # 4 Blocks 16 bit each """ Divide cipher 16-hex digits into a list of 4 blocks(4 hex digits each) :param cipher_text: Ciphered text (16-Hex Digits) :return: Cipher text divided into a list of 4 blocks (converted to int) """ cipher_block = [] [cipher_block.append(int(cipher_text[i:i + 4], 16)) for i in range(0, len(cipher_text), 4)] return cipher_block
def talents_override(data): """determines if there are talent overrides in the original data""" return "${talents.mindbender}" in data or "${talents.void_torrent}" in data
def split_kernel_vars(var_couple, count): """ Split into kernel variables and not. :param var_couple: var-value pair. :param count: number of variable to put into the kernel. :type count: int :return: all the variable in the kernel (i.e. count vars). :rtype: list """ var_couple.sort(key=lambda x: -x[1]) head = var_couple[:count] kernel = [n for n, i in head] return kernel
def round_to_fraction(val, res, decimals=None): """ round to closest resolution """ if val is None: return 0.0 if decimals is None and "." in str(res): decimals = len(str(res).split('.')[1]) return round(round(val / res) * res, decimals)
def objective_limit(energy, limit): """ The algorithm stops as soon as the current objective function value is less or equal then limit. """ if energy <= limit : return True else : return False
def ssf(x0, x1): """slope sum function""" if x1 > x0: return x1 - x0 else: return 0
def get_objhash_from_object_desc(gitobjcontent): """returns object hash without control characters""" return gitobjcontent.split(" ")[1][:40]
def get_size(count, count_min, count_max, options): """ Returns the font size for a word. @param count count of the word in the text @param count_min minimum word count in the text @param count_max maximum word count in the text @return FONTSIZE_MIN..FONTSIZE_MAX """ result = options["FONTSIZE_MAX"] if count_min != count_max: ratio = count / float(count_max - count_min) lo, hi = options["FONTSIZE_MIN"], options["FONTSIZE_MAX"] result = int(lo + (hi - lo) * min(1, ratio ** 0.2)) return result
def extract_ranges(index_list, range_size_limit=32): """Extract consecutive ranges and singles from index_list. Args: index_list: List of monotone increasing non-negative integers. range_size_limit: Largest size range to return. If a larger consecutive range exists it will be returned as multiple ranges. Returns: ranges, singles where ranges is a list of [first, last] pairs of consecutive elements in index_list, and singles is all of the other elements, in original order. """ if not index_list: return [], [] first = index_list[0] last = first ranges = [] singles = [] for i in index_list[1:]: if i == last + 1 and (last - first) <= range_size_limit: last = i else: if last > first: ranges.append([first, last]) else: singles.append(first) first = i last = i if last > first: ranges.append([first, last]) else: singles.append(first) return ranges, singles
def trim_leading_lines(lines): """ Trim leading blank lines. """ lines = list(lines) while lines and not lines[0]: lines.pop(0) return lines