content
stringlengths
42
6.51k
def get_class(d): """Get class name from the whole dictionary E.g., '{'index': 'data/Deploy/KLAC/KLAC0570/KLAC0570_12.jpg', 'prediction': ..., 'label': ...}' ==> (str) 'KLAC' """ return d['index'].split('/')[2]
def encode_utf8(u): """ Encode a UTF-8 string to a sequence of bytes. Args: u (str) : the string to encode Returns: bytes """ import sys if sys.version_info[0] == 2: u = u.encode('utf-8') return u
def generate_file_name(report_file): """Generates a report file name based on the file metadata.""" # If no filename is specified, use the file ID instead. file_name = report_file['fileName'] or report_file['id'] extension = '.csv' if report_file['format'] == 'CSV' else '.xml' return file_name + extension
def doubleslash(text): """Replaces / with // Args: text (str): location Returns: str: formatted location """ return text.replace('\\' , '\\\\')
def get_args (indata, prefix=''): """ Slightly esoteric function to build a tuple to be used as argument to constructor calls. Python dicts are not ordered, so we have to look at the number in the parameter's name and insert items appropriately into an ordered list """ ident=prefix+'arg' # need to pregenerate list for using indices in loop args=len(indata)*[None] for i in indata: if i.find(ident)==-1: continue try: idx=int(i[len(ident)]) except ValueError: raise ValueError('Wrong indata data %s: "%s"!' % (ident, i)) if i.find('_distance')!=-1: # DistanceKernel args[idx]=eval(indata[i]+'()') else: try: args[idx]=eval(indata[i]) except TypeError: # no bool args[idx]=indata[i] # weed out superfluous Nones return [arg for arg in args if arg is not None]
def add_notebook_volume(notebook, volume): """ Add the provided podvolume (dict V1Volume) to the Notebook's PodSpec. notebook: Notebook CR dict volume: Podvolume dict """ podspec = notebook["spec"]["template"]["spec"] if "volumes" not in podspec: podspec["volumes"] = [] podspec["volumes"].append(volume) return notebook
def inject_function_id(gw_config, function_id, title="yappa gateway"): """ accepts gw config as dict, finds where to put function_id, returns new dict """ gw_config["info"].update(title=title) for path, methods in gw_config["paths"].items(): for method, description in methods.items(): yc_integration = description.get("x-yc-apigateway-integration") if yc_integration \ and yc_integration["type"] == "cloud_functions" \ and not yc_integration["function_id"]: yc_integration.update(function_id=function_id) return gw_config
def validate_comma_separated(ctx, param, value): """ Validate multiple string input values are comma-separated. Each of the value is put into a list, which is returned after validation. """ if value is None: return return value.split(',')
def is_number(s): """ Is s a number? True if 'tis so. False otherwise. """ try: float(s) return True except ValueError: return False
def _find_traces_for_automation(traces, item_id): """Find traces for an automation.""" return [trace for trace in traces if trace["item_id"] == item_id]
def clipstr(s, size, fill=" "): """Clips a string to a specific length, with an optional fill character.""" if len(s) > size: s = s[:size] if len(s) < size: s = fill * (size - len(s)) + s return s
def default_kernel_config(defn): """Creates a default kernel configuration suitable for general purpose inference. Parameters ---------- defn : hmm definition """ return [('beam', {}), ('hypers', { 'alpha_a': 4.0, 'alpha_b': 2.0, 'gamma_a': 3.0, 'gamma_b': 6.0 } )]
def standardize_template_names(t): """Standardize names of templates for matching purposes.""" return t.strip().lower().replace(" ", "_")
def standardize_tag(tag): """Convert tag to a uniform string.""" return tag.lower().replace(" ", "_")
def pre_order_traversal(tree, path=[]): """Visit curr -> left -> right""" if tree: path.append(tree.value) pre_order_traversal(tree.left, path) pre_order_traversal(tree.right, path) return path
def alpha_vals(a,n): """ This function generates the values of \alpha to be used for cross validation. """ itrs=[] for i in range(0,n): itrs.append(a*0.9**i) itrs.reverse() return itrs
def read_data_line(line): """Process a line from data file. Assumes the following format: WORD W ER D i.e. word tab separated with pronunciation where pronunciation has space separated phonemes Args: line: string representing the one line of the data file Returns: chars: list of characters phones: list of phones """ line = line.strip() word, pronunciation = line.split(" ") #line.split("\t") chars = list(word.strip()) phones = pronunciation.strip().split(" ") return chars, phones
def format_number(number): """ Formats a number to a more readable format; 10000 -> 10,000 """ if isinstance(number, int): return '{:,d}'.format(number) if number is None: return 'Unknown' return '{:3.2f}'.format(number)
def merge_sort(arr): """ Merge sort repeatedly divides the arr then recombines the parts in sorted order """ l = len(arr) if l > 1: mid = l//2 L = arr[:mid] R = arr[mid:] merge_sort(L) merge_sort(R) i = j = k = 0 while i < len(L) and j < len(R): if L[i] < R[j]: arr[k] = L[i] i += 1 else: arr[k] = R[j] j += 1 k += 1 while i < len(L): arr[k] = L[i] i += 1 k += 1 while j < len(R): arr[k] = R[j] j += 1 k += 1 return arr
def map_key(key) -> str: """Funcion para mapear las claves""" lookup = { "volume_level": "volume", "title": "title", "subtitle": "subtitle", "series_title": "series", "season": "season", "episode": "episode", "artist": "artist", "album_name": "album", "track": "track", "images": "image", "player_state": "state", "volume_muted": "mute", "status_text": "text", "icon_url": "icon", "duration": "duration", "app_id": "app_id", "position": "position", } return lookup[key]
def _sourcenames(short=False): """Return a list with the source names. :param short: True for shorter names, defaults to False :type short: bool, optional :return: Source names. :rtype: dict [list [str]] """ if short is False: sources = ["psicov", "ccmpred", "deepmetapsicov"] else: sources = ["psicov", "ccmpred", "dmp"] return sources
def time_sync(time1, time2): """ finds the row index to time sync the collected data and returns start and end index """ ref = time2[1] ind = 0 while abs(ref - time1[ind]) > 5: ind += 1 start = time1[ind] return ind, ind+len(time2) # return 3580, 1157
def parse_sexp(sexp_iter): """ Transforms a sequence of s-expression tokens (given in string form) into a corresponding tree of strings. The given sequence is interpreted as the elements of an enclosing list expression, i.e. with a prepended "(" token and an appended ")" token. Example: parse_sexp transforms the sequence ("(", "a", "(", "bc", "d", ")", "1", ")", "5") into the structure [["a" ["bc" "d"] "1"], "5"]. :param sexp_iter: An iterator iterating over the tokens contained in in a s-expression. :return: The tree-structure representation of sexp_iter, with s-expression lists being represented using Python lists. :raises ValueError when sexp_string is malformed. """ results = [] def __recursively_parse_sexp(): result = [] found_expression_end = False for token_ in sexp_iter: if token_ == ")": found_expression_end = True break elif token_ == "(": result.append(__recursively_parse_sexp()) else: result.append(token_) if not found_expression_end: raise ValueError("Unterminated symbolic expression") return result for token in sexp_iter: if token == "(": results.append(__recursively_parse_sexp()) else: results.append(token) return results
def line_with_jacoco_test_header(line): """Check if the given string represents JaCoCo unit test header.""" return line == "Code coverage report BEGIN"
def multQuatLists(q0, q1): """Multiply two quaternions that are represented as lists.""" x0, y0, z0, w0 = q0 x1, y1, z1, w1 = q1 return [ w0 * x1 + x0 * w1 + y0 * z1 - z0 * y1, w0 * y1 - x0 * z1 + y0 * w1 + z0 * x1, w0 * z1 + x0 * y1 - y0 * x1 + z0 * w1, w0 * w1 - x0 * x1 - y0 * y1 - z0 * z1, ]
def getSquareDistance(p1, p2): """ Square distance between two points """ dx = p1[0] - p2[0] dy = p1[1] - p2[1] return dx * dx + dy * dy
def validation(size, training): """ Obtain the validation set corresponding to the given training set """ result = [] for i in range(0, size): if i not in training: result.append(i) return result
def mean_and_error(_data): """A helper for creating error bar""" import numpy as np _data = np.array(_data) _two_sigma = 2*np.std(_data) _mean = np.mean(_data) print(f"{_mean:.0f} +- {_two_sigma:.0f}") return _mean, _two_sigma
def isclose(float_a, float_b, rel_tol=1e-9, abs_tol=0.0): """ Once Python3.5 is applicable this function can be replaced by math.isclose """ return (abs(float_a - float_b) <= max(rel_tol * max(abs(float_a), abs(float_b)), abs_tol))
def Dic_Subset_Begin(indic,end_num): """ subset a dictionary by retaining only the beginning "end_num" elements. Note: 1. Test has been done for only the case that indic[key] is 1D ndarray. """ outdic={} for key,value in indic.items(): try: newvalue=value[0:end_num] except: newvalue=value outdic[key]=newvalue return outdic
def inline_product(factors, seed): """Computes a product, using the __imul__ operator. Args: seed (T): The starting total. The unit value. factors (iterable[T]): Values to multiply (with *=) into the total. Returns: T: The result of multiplying all the factors into the unit value. """ for r in factors: seed *= r return seed
def _careful_add(a, b): """Return the sum `a + b`, else whichever is not `None`, else `None`.""" if a is None: return b if b is None: return a return a + b
def image_directive(image, width=None, align=None, alt=None): """Generate an RST Image directive.""" if width is None: width = '300' if align is None: align = 'center' if alt is None: alt = image return (".. image:: %s\n :width: %s\n :align: %s\n" " :alt: %s\n") % (image, width, align, alt)
def read_label_file(file_path): """ Function to read labels from text files. Args: file_path: File path to labels. Returns: list of labels """ with open(file_path, "r") as f: lines = f.readlines() ret = {} for line in lines: pair = line.strip().split(maxsplit=1) ret[int(pair[0])] = pair[1].strip() return ret
def partition(data): """ Partitions a list of data into three sections which are lower, equal, and equal to the pivot which is selected to be the last element in the list. """ pivot = len(data) -1 lower, equal, upper = [], [], [] for i in range(0, len(data), 1): if data[i] > data[pivot]: upper.append(data[i]) elif data[i] == data[pivot]: equal.append(data[i]) else: lower.append(data[i]) return lower, equal, upper
def createLineSegment(sequence, maxLineSegmentLength, sequenceStringIndex): """ Creates a segment of the pyramid on one line. Params: sequence - Full string representing the sequence. maxLineSegmentLength - Maximum length of one line segment sequenceStringIndex - A reference to an index for fetching sequence characters. Returns: currentLineSegment - A segment that will be part of the current line to print. """ currentLineSegment = "" for _ in range(maxLineSegmentLength): currentLineSegment += sequence[sequenceStringIndex[0]] sequenceStringIndex[0] += 1 return currentLineSegment
def _merge_dict(a, b): """ Given two dicts, merge them into a new dict as a shallow copy. Keys on dictionary b will overwrite keys on dictionary a. :param a: a dictionary, may be None :type a: None | dict :param b: a dictionary, may be None :type b: None | dict :return: the result of merging a with b :rtype: dict """ if a is None and b is None: return {} if a is None: return b.copy() if b is None: return a.copy() z = a.copy() z.update(b) return z
def get_num_from_curr(raw_val): """ strips the random currency generation and returns a value """ op_str = raw_val.strip('+').strip(',').strip(' ').strip('$') return float(op_str)
def ntyped(var, types): """ Ensure that the "var" argument is among the types passed as the "types" argument, or is None. @param var: The argument to be typed. @param types: A tuple of types to check. @type types: tuple @returns: The var argument. >>> a = ntyped('abc', str) >>> type(a) <type 'str'> >>> b = ntyped("abc", int) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Traceback (most recent call last): ... AssertionError: Value 'abc' of type <type 'str'> is not among the allowed types: NoneType, <type 'int'> >>> c = ntyped(None, int) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE """ assert var is None or isinstance(var, types), \ 'Value %r of type %r is not among the allowed types: NoneType, %r' % (var, type(var), types) return var
def flatten(l): """ inefficiently flattens a list l: an arbitrary list """ if not l: return list(l) if isinstance(l[0], (list, tuple)): return flatten(l[0]) + flatten(l[1:]) return [l[0]] + flatten(l[1:])
def conv_output_length(input_size, conv_size, stride, pad): """ calculates the output size along a single axis for a conv operation """ if input_size is None: return None without_stride = input_size + 2 * pad - conv_size + 1 # equivalent to np.ceil(without_stride / stride) output_size = (without_stride + stride - 1) // stride return output_size
def normalized_total_time(p, max_time=3600000): # by default 1 h (in ms) """If time was longer than max_time, then return max_time, otherwise return time.""" if p["result.totalTimeSystem"] == "3600.0": v = 3600000 # convert to ms (error in logging) else: v = int(float(p["result.totalTimeSystem"])) return max_time if v > max_time else v
def get_columns(table_name): """ It gets the content from any file with data in it(auto generated) and returns in list """ lines = [] try: with open(f"{table_name}/{table_name}_data.txt",encoding='UTF-8') as file: for line in file: line = line.strip() try: lines.append(line.removesuffix("-P").strip()) except Exception: lines.append(line) except FileNotFoundError: print("column file missing") return lines
def get_valid_place(place): """ Return valid place. Strip spaces and check for empty value.""" place = place.strip() if not place: raise ValueError('empty string') return place
def reciprocal(attrs, inputs, proto_obj): """Returns the reciprocal of the argument, element-wise.""" return 'reciprocal', attrs, inputs
def schema(): """Get record schema.""" return { 'allOf': [{ 'type': 'object', 'properties': { 'title_statement': { 'type': 'object', 'properties': { 'title': {'type': 'string'} } }, 'genre': {'type': 'string'}, }, }, { '$ref': 'http://inveniosoftware.org/schemas/' 'oaiserver/internal-v1.1.0.json', }] }
def dict_from_list(keyfunc, l): """ Generate a dictionary from a list where the keys for each element are generated based off of keyfunc. """ result = dict() for item in l: result[keyfunc(item)] = item return result
def write_file(filename, content, mode="w"): """ Write content to a filename """ with open(filename, mode) as filey: filey.writelines(content) return filename
def make_content_url(url, category=None): """ Returns URL with appropriate path appended if Skype shared content URL, e.g. "https://api.asm.skype.com/v1/objects/0-weu-d11-../views/imgpsh_fullsize" for "https://api.asm.skype.com/v1/objects/0-weu-d11-..". @param category type of content, e.g. "avatar" for avatar image """ if not url or "api.asm.skype.com/" not in url: return url if "avatar" == category and not url.endswith("/views/avatar_fullsize"): url += "/views/avatar_fullsize" elif "audio" == category and not url.endswith("/views/audio"): url += "/views/audio" elif "video" == category and not url.endswith("/views/video"): url += "/views/video" elif "sticker" == category and not url.endswith("/views/thumbnail"): url += "/views/thumbnail" elif "file" == category and not url.endswith("/views/original"): url += "/views/original" elif "/views/" not in url: url += "/views/imgpsh_fullsize" return url
def verify_metadata_version(metadata, version=None): """ Utility function to verify that the metadata has the correct version number. If no version number is passed, it will just extract the version number and return it. :param metadata: the content of an export archive metadata.json file :param version: string version number that the metadata is expected to have """ try: metadata_version = metadata['export_version'] except KeyError: raise ValueError('could not find the export_version field in the metadata') if version is None: return metadata_version if metadata_version != version: raise ValueError('expected export file with version {} but found version {}' .format(version, metadata_version))
def _parse_gcs_path(path: str): """Parses the provided GCS path into bucket name and blob name Args: path (str): Path to the GCS object Returns: bucket_name, blob_name: Strings denoting the bucket name and blob name """ header, rest = path.strip().split('//') if header != 'gs:': raise ValueError('Invalid GCS object path: %s', header) bucket_name, blob_name = rest.split('/', 1) return bucket_name, blob_name
def find_spaces(string_to_check): """Returns a list of string indexes for each string this finds. Args: string_to_check; string: The string to scan. Returns: A list of string indexes. """ spaces = list() for index, character in enumerate(string_to_check): if character == ' ': spaces.append(index) return spaces
def clean_float(num): """ Pasa de string a float """ ntmp = num.replace('.', '').replace(',', '.') return float(ntmp)
def teamid(runid): """ SF_UMass_IESL1 -> UMass_IESL """ return runid.split("_", 1)[-1][:-1]
def find_lca(node, n1, n2): """Finds LCA of 2 numbers by recursively searching in left and right subtree input args : current node, the numbers whose lca is to be found, isFound list telling if the particular element is found returns : lca if found else None Time complexity : O(n), Space complexity : O(1) """ if node == None: return None # if one element is found in current node, we return it # in normal case, both elements are found in different subtrees, called recursively, which is handeled later in this function # there may also be the case when one element is ancestor of another, which is handeled here, as this node is tha lca if node.data == n1: return node elif node.data == n2: return node # Recursive calls lca_left = find_lca(node.left, n1, n2) lca_right = find_lca(node.right, n1, n2) if ( lca_left and lca_right ): # if both are not none, that is 1 element is found in each subtree return node # that means, current node is the lca else: # both numbers in same sub tree return ( lca_left if lca_left != None else lca_right )
def evaluate(h,x): """evaluate an instance x with an hypothesis h, that is a function X->{Yes,No}""" for i,feature in enumerate(h): if feature=="0": return "No" if feature!="?" and feature!=x[i]: return "No" return "Yes"
def _parse_display(display): """Parse an X11 display value""" try: host, dpynum = display.rsplit(':', 1) if host.startswith('[') and host.endswith(']'): host = host[1:-1] idx = dpynum.find('.') if idx >= 0: screen = int(dpynum[idx+1:]) dpynum = dpynum[:idx] else: screen = 0 except (ValueError, UnicodeEncodeError): raise ValueError('Invalid X11 display') from None return host, dpynum, screen
def pluralize(count, singular, plural): """Return singular or plural depending on count""" if count == 1: return singular return plural
def dsigmoid(y): """Derivative function of the function represented in :py:func:`sigmoid`. * If :math:`y = tanh(x)`, then :math:`Dy = 1 - y^2`, * if :math:`y = s(x)`, then :math:`Ds = y - y^2`. * There are infinite sigmoid functions. Just put here the derivative of the ``sigmoid`` function. """ return 1 - y**2
def sigmoid(x): """sigmoid A sigmoid-like function. """ return x / (1 + abs(x))
def is_empty_line(line): """Checks whether a line is empty. A line is empty is has only whitespace or consists only of a carriage return. """ return (line.split == '') or (line in ['\n', '\n\r'])
def number_format(num, places=0): """Format a number with grouped thousands and given decimal places""" places = max(0,places) tmp = "%.*f" % (places, num) point = tmp.find(".") integer = (point == -1) and tmp or tmp[:point] decimal = (point != -1) and tmp[point:] or "" count = 0 formatted = [] for i in range(len(integer), 0, -1): count += 1 formatted.append(integer[i - 1]) if count % 3 == 0 and i - 1: formatted.append(",") integer = "".join(formatted[::-1]) return integer+decimal
def clean_ponctuation(text, activated=True): """Remove non alphanumeric characters from text (except spaces)""" if activated: text = ''.join(c if c.isalnum() else " " for c in text) return text
def multifilter(filters, result): """ Applies multiple filters to `result` . Returns: list: result, reduced by each filter. """ if not filters: return result for f in filters: result = filter(f, result) return result
def is_string(value: str): """Return true if given value is a string""" return isinstance(value, str)
def get_ordinal_indicator(number: int) -> str: """ Returns the ordinal indicator for an integer. Args: number (int): An integer for which the ordinal indicator will be determined. Returns: str The integer's ordinal indicator. """ ordinal_dict = {1: 'st', 2: 'nd', 3: 'rd'} if number > 13: number %= 10 if number in list(ordinal_dict.keys()): return ordinal_dict[number] return 'th'
def getGbLen(str): """get length""" return len(str.encode('gb2312'))
def convert_dico(dico) : """Gets the average age for every genre""" def normalize(dic) : """converts one of nested dictionaries from dico_average_age function in an integer age average in years""" average_in_days = (dic['nb_days'].days/dic['nb_people']) average_age = int(average_in_days/365) return average_age new_dic = {genre:normalize(dic) for genre, dic in dico.items()} return new_dic
def flat_result_2_row(predictions): """ flat the mitosis prediction result into rows Args: predictions: a tuple of (slide_id, ROI, mitosis_num, mitosis_location_scores), where mitosis_location_scores is a list of tuples (r, c, score) Return: a list of tuples(slide_id, ROI, mitosis_num, r, c, score) """ assert predictions is not None result = [] slide_id, ROI, mitosis_num, mitosis_location_scores = predictions for r, c, score in mitosis_location_scores: result.append((slide_id, ROI, mitosis_num, r, c, score)) return result
def clip_and_integer_coordinates(quad, im_dimentions): """ quad(list[list]): [[x1, y1], [x2, y2], ....] im_dimentions(tuple): w,h """ w, h = im_dimentions clip_x = lambda p: max(0, min(p, w)) clip_y = lambda p: max(0, min(p, h)) quad = [(int(clip_x(quad[i][0])), int(clip_y(quad[i][1]))) for i in range(len(quad))] return quad
def build_condition_pairs(conditions): """ Return the list of parameter pairs in a form of strings Inputs ------ conditions : dict dictionary with arguments Outputs ------- condition_pairs : list of str parameter pairs in strings """ condition_pairs = [] for key in conditions.keys(): if type(conditions[key]) == str: condition_pairs.append(key + '="' + conditions[key] + '"') else: condition_pairs.append(key + '=' + str(conditions[key])) condition_pairs = ' and '.join(condition_pairs) return condition_pairs
def fs_to_string(fs: float) -> str: """ Convert sampling frequency into a string for filenames Parameters ---------- fs : float The sampling frequency Returns ------- str Sample frequency converted to string for the purposes of a filename Examples -------- >>> from resistics.common import fs_to_string >>> fs_to_string(512.0) '512_000000' """ return (f"{fs:.6f}").replace(".", "_")
def eta_p(ranking, Ep, Vp): """Return the (normalised) eta_p measure. Parameters ---------- ranking : list or array-like ranking Ep : list or array-like comparison matrix. Ensure that it satisfies Ep[x][y] == 1 - Ep[y][x] and 0 <= Ep[x][y] <= 1. Vp : list or array-like variance matrix with the shape of (n, n). Ensure that it satisfies Vp[x][y] == Vp[y][x] and 0 <= Vp[x][y]. Returns ------- eta_p_mesure : float the value of eta_p measure for the ranking under the preference >>> eta_p([0, 1, 2], [[1, 1, 1], [0, 1, 1], [0, 0, 1]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]) 1.0 >>> eta_p([2, 1, 0], [[1, 1, 1], [0, 1, 1], [0, 0, 1]], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]) -1.0 >>> eta_p([2, 1, 3, 0], [[0.5, 0.2, 0.4, 0.3], [0.8, 0.5, 0.1, 0.4], [0.6, 0.9, 0.5, 0.4], [0.7, 0.6, 0.6, 0.5]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) 0.6666666666666667 """ N = len(ranking) ideal = 0 _eta = 0 for i in range(N): for j in range(i+1, N): x = ranking[i] y = ranking[j] ep = Ep[x][y] vp = Vp[x][y] delta_ij = 1 if i < j else -1 true_delta_ij = 1 if ep > 0.5 else -1 label = (2 * ep - 1) / (1 + vp) _eta += label * delta_ij ideal += label * true_delta_ij eta_p_measure = _eta / ideal return eta_p_measure
def dualzahl(integer): """rechnet eine "normale" Zahl (Basis 10) in eine Dualzahl (Basis 2) um""" bases = [1] base = 2 i = 1 while bases[-1]*2 <= integer: bases.append(2**i) i = i+1 bases.reverse() ziffern = "" for i in bases: ziffern = ziffern + str(int(integer/i)) integer = integer%i return int(ziffern)
def splitImageFrames(image_in): """Splits an image (or multiple images as a list) to its different frames, and returns a list containing the images. """ # Determine if images in are a list or a single image if isinstance(image_in, list): full_images = [] # Iterate through images, creating a sublist of frames for each image for image in image_in: split_image = [None]*image.n_frames # Iterate through frames and copy each frame independently, converting to RGB for i in range(image.n_frames): image.seek(i) split_image[i] = image.copy() full_images.append(split_image) return(full_images) else: split_image = [None]*image_in.n_frames # Iterate through frames and copy each frame independently, converting to RGB for i in range(image_in.n_frames): image_in.seek(i) split_image[i] = image_in.copy() return(split_image)
def validate_byr(birth_year: str) -> bool: """byr (Birth Year) - four digits; at least 1920 and at most 2002.""" return len(birth_year) == 4 and int(birth_year) >= 1920 and int(birth_year) <= 2002
def get_api_url(method): """ Returns API URL for the given method. :param method: Method name :type method: str :returns: API URL for the given method :rtype: str """ return 'https://slack.com/api/{}'.format(method)
def get_conv_fname(fname: str, dest_ext: str, output_dir=None) -> str: """Returns the filename and path of the file after conversion Arguments: fname -- the original filename dest_ext -- the extension of the destination file output_dir -- the output directory of the operation """ if output_dir is not None: fname_wo_path = fname.split('/')[-1][:-4] + '.' + dest_ext if output_dir[-1] != '/': output_dir = output_dir + '/' return output_dir + fname_wo_path else: return fname[:-4] + '.' + dest_ext
def vaporViscosity(T, vVP): """ vaporViscosity(T, vVP) vaporViscosity (micropoise) = A + B*T + C*T^2 Parameters T, temperature in Kelvin vVP, A=vVP[0], B=vVP[1], C=vVP[2] A, B, and C are regression coefficients Returns vapor viscosity in micropoise at T """ return vVP[0] + vVP[1]*T + vVP[2]*T**2
def convert_8_int_to_tuple(int_date): """ Converts an 8-digit integer date (e.g. 20161231) to a date tuple (Y,M,D). """ return (int(str(int_date)[0:4]), int(str(int_date)[4:6]), int(str(int_date)[6:8]))
def filter_json(in_list): """ filters and returns the list of unique sensors found""" found_list = [] for device in in_list: for key, val in device.items(): if key == "sensor": for k, v in val.items(): if k == "id": found_list.append(int(v)) myset = set(found_list) # stop duplicate values being added unique_list = list(myset) # print (sorted(unique_list)) return sorted(unique_list)
def bounded_binary_search(generator, length, target, lower_bound, upper_bound): """ efficient binary search for a <target> value within bounds [<lower_bound>, <upper_bound>] - converges to a locally optimal result within the bounds - instead of indexing an iterable, lazy evaluate a functor for performance :param generator: a generator or functor that yields a value of the search area given an index :param length: full length of the search area :param target: the value to search :param lower_bound: the lower bound up to which results are accepted :param upper_bound: the upper bound up to which results are accepted :return: success: (True, the index of the target) - fail: (False, -1) """ start, mid = 0, -1 end = length - 1 residual = 0.0 found = False num_iter = 0 while start <= end and not found: num_iter += 1 mid = (start + end) // 2 val = generator(mid) if lower_bound <= val <= upper_bound: residual = abs(val - target) if abs(generator(mid - 1) - target) <= residual: end = mid - 1 continue # refinement possible in left direction elif abs(generator(mid + 1) - target) < residual: start = mid + 1 continue # refinement possible in right direction else: found = True # converged if not found: if target < val: end = mid - 1 else: start = mid + 1 return found, mid, residual, num_iter
def preorder_traversal_recursive(root): """ Return the preorder traversal of nodes' values. - Worst Time complexity: O(n) - Worst Space complexity: O(n) :param root: root node of given binary tree :type root: TreeNode or None :return: preorder traversal of nodes' values :rtype: list[int] """ # basic case if root is None: return [] # preorder traversal: root + left + right left = preorder_traversal_recursive(root.left) right = preorder_traversal_recursive(root.right) return [root.val] + left + right
def api_card(text: str, color: str) -> str: """Create a HTML Card for API display""" html = f""" <div class='demo_card' style='background-color:{color}'> <span>{text}</span> </div> """ return html
def get_group(items, group_size, group_id): """Get the items from the passed in group based on group size.""" start = group_size * (group_id - 1) end = start + group_size if start >= len(items) or start < 0: raise ValueError("Invalid test-group argument") return items[start:end]
def modname(name): """Get a stylized version of module name""" return "[\x032{}\x0f]".format(name)
def get_continious(objects_l): """ just to fix the list of list of list to finally make it one list to keep track of images and qs per timestep """ # print(len(objects_l)) fixed_objs = [] for obj in objects_l: # print(img) if obj: # print("img = ", img) for _obj in obj: try: if _obj.any(): # for images # print("_obj = ", _obj) fixed_objs.append(_obj) except: if obj: fixed_objs.append(_obj) # pass # print(len(fixed_objs)) return fixed_objs
def _get_url(env, endpoint): """ This function returns the http url """ return '{}/{}'.format(env['SERVER_URL'], endpoint)
def derivative(f, x, eps=1e-6): """ Computes a numerical approximation of the first derivative of a function. f -- Function to find the first derivative of x -- The value to calculate the first derivative for eps -- The value of epsilon in the equation, default = 1e-6 returns approximation of the first derivative of the function """ return (f(x + eps/2) - f(x - eps/2))/eps
def make_memoryview(obj, offset=-1, size=-1): """Uses Python2 buffer syntax to make memoryview""" if offset < 0: return memoryview(obj) elif size < 0: return memoryview(obj)[offset:] else: return memoryview(obj)[offset:offset+size]
def _filter_nones(centers_list): """ Filters out `None` from input list Parameters ---------- centers_list : list List potentially containing `None` elements Returns ------- new_list : list List without any `None` elements """ return [c for c in centers_list if c is not None]
def bbox_for_points(coords): """ Return bbox for a sequence of points. """ bbox = min(coords), max(coords) return bbox
def transfer(i_list,target): """ Return all the consecutive element in i_list equal to target and copy it into the Out list1, all the remaining element will be put into the Out list2. :param i_list: The source list :param target: The target element :return: (Out_list1,Out_list2) """ i=0 _shallow1 = [] _shallow2 = [] done = False while i < len(i_list): if i_list[i] != target or done: _shallow2.append(i_list[i]) i+=1 continue else: _shallow1.append(i_list[i]) j=i+1 while j < len(i_list) and i_list[j] == target: _shallow1.append(i_list[j]) j=j+1 done = True i=j return _shallow1,_shallow2
def create_bcs(fields, **namespace): """ Return a dict of DirichletBCs. """ return dict((field, []) for field in fields)
def _filter_similarity(mols, distance, generator, query_fps, cutoff): """Filter molecules by a certain distance to the reference fingerprints. User must supply distance funtion, FP generator, query FPs and cutoff.""" return list(filter( lambda q: any(distance(generator(q), q_fp) >= float(cutoff) for q_fp in query_fps), mols))
def binary_search(arr, elem): """Return the index of the given element within a sorted array.""" low = 0 high = len(arr) - 1 mid = 0 while low < high: mid = (high + low) // 2 # Check if elem is present at mid if arr[mid] < elem: low = mid + 1 # If elem is greater, ignore left half elif arr[mid] > elem: high = mid - 1 # If elem is smaller, ignore right half else: return mid # If we reach here, then the element was not present return -1
def sec2days(seconds): """Seconds to number of days""" return seconds / (24.0 * 3600)
def play_opposite_corner(computer_piece, board_state): """Play on the corner opposite of the user's last move""" # All the possible corner pairs in Tic-Tac-Toe corner_pairs = [[1, 9], [3, 7]] squares_played = [] last_square_played = int(board_state[-1][1]) # Get all the squares from the board_state so we can check availability for move in board_state: squares_played.append(int(move[1])) # Check if the last move played is on a corner for pair in corner_pairs: if last_square_played in pair: pair.remove(last_square_played) # Return the opposite corner if it is available if pair[0] not in squares_played: return "{}{}".format(computer_piece, pair[0]) else: return None return None
def efficientnet_params(model_name): """Map EfficientNet model name to parameter coefficients. Args: model_name (str): Model name to be queried. Returns: params_dict[model_name]: A (width,depth,res,dropout) tuple. """ params_dict = { # Coefficients: width,depth,res,dropout 'efficientnet-b0': (1.0, 1.0, 512, 0.2), 'efficientnet-b1': (1.0, 1.1, 640, 0.2), 'efficientnet-b2': (1.1, 1.2, 768, 0.3), 'efficientnet-b3': (1.2, 1.4, 896, 0.3), 'efficientnet-b4': (1.4, 1.8, 1024, 0.4), 'efficientnet-b5': (1.6, 2.2, 1280, 0.4), 'efficientnet-b6': (1.8, 2.6, 1280, 0.5), 'efficientnet-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5), } return params_dict[model_name]
def filter_manifest_definition(manifest_definition, name_filter): """ Filters the manifest to only include functions that partially match the specified filter. :param manifest_definition: Dictionary of the manifest :param name_filter: A function name specified in the manifest :return: Filtered manifest definition """ manifest_definition['functions'] = {key: value for (key, value) in manifest_definition['functions'].items() if name_filter in key.lower()} return manifest_definition