content
stringlengths
42
6.51k
def _str_to_int(_str): """Convert the input str to an int if possible :param _str: input string :return: integer if text is a digit, else string """ return int(_str) if _str.isdigit() else _str
def set_sampling_params(im_per_scene_per_camera=1, intensity_transfer=False, target_aug_im_num=5000, excluded_camera_models=None, excluded_datasets=None, save_as_16_bits=True, remove_saturated_pixels=False, saturation_level=0.97, output_img_size=None, cropping=True, color_temp_balance=True, lambda_r=0.7, lambda_g=1.2, k=15): """ Sets sampling parameters. Args: im_per_scene_per_camera: number of sampled images per scene per camera model; the default is 1. intensity_transfer: transfer the intensity of target image into the source image. This is useful for methods that are not relying on the log-chroma space as an input; the default is False. target_aug_im_num: target number of images for augmentation; the default is 5,000 images. excluded_camera_models: a list of excluded camera models from the source set (i.e., camera models listed here will not have any image content in the augmented set). The default value is None. excluded_datasets: Similar to excluded_camera_models, but here you can use dataset names to determine the excluded images. Options are: Cube+, Gehler-Shi, NUS, Cube+_challenge, and Cube+_challenge_2. The default value is None. save_as_16_bits: boolean flag to save augmented images in 16-bit format. remove_saturated_pixels: mask out saturated pixels of the augmented images; the default is False. saturation_level: if remove_saturated_pixels is True, the saturation_level determines the threshold of saturation; default is 0.97. output_img_size: size of output images; the default is [256, 384]. cropping: boolean flag to apply a random cropping in the augmented images; default is True. color_temp_balance: boolean flag to apply color temperature balancing as described in the paper. The default value is True. lambda_r: scale factor for the random shift applied to the red chroma channel in sampling (see the paper for more info.); default is 0.7. lambda_g: scale factor for the random shift applied to the green chroma channel in sampling (see the paper for more info.); default is 1.2. k: number of nearest neighbors; default is 15. Returns: params: a dict of sampling parameters. """ if excluded_camera_models is None: excluded_camera_models = [] if excluded_datasets is None: excluded_datasets = [] if output_img_size is None: output_img_size = [256, 384] params = {'images_per_scene_per_camera': im_per_scene_per_camera, 'intensity_transfer': intensity_transfer, 'total_number': target_aug_im_num, 'excluded_camera_models': excluded_camera_models, 'excluded_datasets': excluded_datasets, 'save_as_16_bits': save_as_16_bits, 'remove_saturated_pixels': remove_saturated_pixels, 'saturation_level': saturation_level, 'cropping': cropping, 'output_image_size': output_img_size, 'color_temp_balancing': color_temp_balance, 'lambda_r': lambda_r, 'lambda_g': lambda_g, 'k': k} return params
def calculate_rating(rating): """Calculates overall rating from rating array.""" #Uses a simple averaging formula. A refinement could be to replace this with #a weighted formula. For instance giving greater weight for more popular options. cumulative = 0 weight = 0 for i in range(1,6): cumulative += rating[i] * i weight += rating[i] if weight > 0 and cumulative > 0: return cumulative / weight return 0
def invert(x): """ "Invert" a dictionary of dictionaries passed in i.e. swap inner & outer keys e.g. {"a":{"x":1,"y":2},"b":{"x":3,"y":4}} becomes {"x":{"a":1,"b":3},"y":{"a":2,"b":4}} """ # dict for output inv={} # iterate over the keys of first dictionary from input for k in list(list(x.values())[0].keys()): # index into each dictionary from input to get values for this key inv[k] = dict([(x,y[k]) for x,y in x.items()]) return inv
def get_clean_layer_name(name): """Remove any unwanted characters from the layer name. Args: name (string): the layer name. Returns: string: the filtered layer name. """ delimit_chars = ":_/" for char in delimit_chars: name = name.split(char)[0] return name
def do_full_save(experiment_result): """This is a simple check to see if the final OOF ROC-AUC score is above 0.75. If it is, we return True; otherwise, we return False. As input, your do_full_save functions should expect an Experiment's result dictionary. This is actually the dictionary that gets saved as the Experiment's "description" file, so for more information on what's in there, look at any description file or see :attr:`hyperparameter_hunter.recorders.DescriptionRecorder.result` (the object passed to `do_full_save`)""" return experiment_result["final_evaluations"]["oof"]["roc_auc_score"] > 0.75
def maybe_quote(s): """ Enclose the string argument in single quotes if it looks like it needs it. Spaces and quotes will trigger; single quotes in the argument are escaped. This is only used to compose the --print output so need only satisfy shlex. """ NEED_QUOTE = u" \t\"\\'" clean = True for c in s: if c in NEED_QUOTE: clean = False break if clean: return s # Something needs quoting or escaping. QUOTE = u"'" ESC = u"\\" arg = [QUOTE] for c in s: if c == QUOTE: arg.append(QUOTE) arg.append(ESC) arg.append(QUOTE) elif c == ESC: arg.append(ESC) arg.append(c) arg.append(QUOTE) return ''.join(arg)
def islower(char): """ Indicates if the char is lower """ if len(char) == 1: if ord(char) >= 0x61 and ord(char) < 0x7A: return True return False
def is_slice(idx): """ Check if `idx` is slice. """ #return isinstance(idx,slice) return type(idx)==slice
def get_param(dict, key, default): """ Return value from dictionary if key present, otherwise return default value""" val = dict[key] if key in dict else default return val
def pos_in_interval(pos, intervalstart, intervalend): """Check if position is in interval. Return boolean""" pos = int(pos) intervalstart = int(intervalstart) intervalend = int(intervalend) return pos >= intervalstart and pos <= intervalend
def undo(data): """Remove all `DotDict` instances from `data`. This function will recursively replace all `DotDict` instances with their plain Python equivalent. """ if not isinstance(data, (list, tuple, dict)): return data # Recursively convert all elements in lists and dicts. if isinstance(data, (list, tuple)): return [undo(_) for _ in data] else: return dict({k: undo(v) for k, v in data.items()})
def get_canonical_container_name(container): """Return the canonical container name, which should be of the form dusty_<service_name>_1. Containers are returned from the Python client with many names based on the containers to which they are linked, but simply taking the shortest name should be sufficient to get us the shortest one.""" return sorted(container['Names'], key=lambda name: len(name))[0][1:]
def _concept_to_lf(concept): """ Parse concept. Since fixed recursion we don't have to worry about precedence """ op = "" if "or" in concept: op = "or" elif "and" in concept: op = "and" if op: op_index = concept.index(op) left = concept[:op_index] right = concept[op_index + 1:] return ( op, _concept_to_lf(left), _concept_to_lf(right) ) if "not" in concept: assert len(concept) == 2, f"unable to parse {concept}" return ("not", (concept[1], )) assert len(concept) == 1, f"unable to parse {concept}" return (concept[0], )
def replace_urls_from_entinies(html, urls): """ :return: the html with the corresponding links from the entities """ for url in urls: link = '<a href="%s">%s</a>' % (url['url'], url['display_url']) html = html.replace(url['url'], link) return html
def recursive_update(default, custom): """A recursive version of Python dict#update""" if not isinstance(default, dict) or not isinstance(custom, dict): raise TypeError('Params of recursive_update() must be a dictionnaries.') for key in custom: if isinstance(custom[key], dict) and isinstance( default.get(key), dict): default[key] = recursive_update(default[key], custom[key]) else: default[key] = custom[key] return default
def _collector_url_from_hostport(secure, host, port, use_thrift): """ Create an appropriate collector URL given the parameters. `secure` should be a bool. """ if secure: protocol = 'https://' else: protocol = 'http://' if use_thrift: return ''.join([protocol, host, ':', str(port), '/_rpc/v1/reports/binary']) else: return ''.join([protocol, host, ':', str(port), '/api/v2/reports'])
def rgbToGray(r, g, b): """ Converts RGB to GrayScale using luminosity method :param r: red value (from 0.0 to 1.0) :param g: green value (from 0.0 to 1.0) :param b: blue value (from 0.0 to 1.0) :return GreyScale value (from 0.0 to 1.0) """ g = 0.21*r + 0.72*g + 0.07*b return g
def contain_filter(file, filters=None): """ Check if a file contains one or many of the substrings specified in filters :param file: :param filters: :return bool: """ if filters is None: return True for filter in filters: if len(file.split(filter)) >= 2: return True return False
def rstrip(s): """rstrip(s) -> string Return a copy of the string s with trailing whitespace removed. """ return s.rstrip()
def string_prepend(prefix: str, string: str): """Prepends each line in `string` with `prefix`.""" sub = "\n" + prefix return prefix + string.replace("\n", sub)
def shake_shake_eval(xa, xb): """Shake-shake regularization in testing mode. Args: xa: Input, branch A. xb: Input, branch B. Returns: Mix of input branches. """ # Blend between inputs A and B 50%-50%. return (xa + xb) * 0.5
def massage_decl(decl): """ Tart-up a C function declaration: remove storage qualifiers, smush onto one line, escape asterisks. """ for storage in 'extern static inline'.split(): decl = decl.replace(storage + ' ', '') fixed_lines = ' '.join(line.strip() for line in decl.splitlines()) return fixed_lines.replace('*', '\\*')
def collections_from_dict(dic): """ construct a Jekyll yaml collection (a dictionary) from a menu dict example input: {'ReadingNotes': ['Book_NeuralNetworksAndDeepLearning', 'test']} example output: {'ReadingNotes_Book_NeuralNetworksAndDeepLearning': {'output': True, 'permalink': '/ReadingNotes/Book_NeuralNetworksAndDeepLearning/:path'}, 'ReadingNotes_test': {'output': True, 'permalink': '/ReadingNotes/test/:path'}} """ collection = {} for k, v in dic.items(): menu_name_en = k for collection_directory in v: menu_iterm = 'collection_' + menu_name_en + '_' + collection_directory permalink = '/{0}/{1}/:path'.format(menu_name_en, collection_directory) collection[menu_iterm] = {'output': True, 'permalink': permalink} return collection
def gt_with_none(a, b): """Implementation of greater than return False if a or b are NoneType values""" if a is None or b is None: return False else: return a > b
def CtoKtoC_Conversion(inputTemp, outputTempType, isReturn): """This method converts to Celsius if given a Kelvin and vice versa. inputTemp = The input temperature value. outputTempType = Type of output temperature required. isReturn = Whether output is required in return.""" if outputTempType == 1: outputTemp = inputTemp + 273; else: outputTemp = inputTemp - 273; if isReturn: return outputTemp; else: print(outputTemp, outputTempType); exit();
def create_reverse_dns(*resource_name_parts) -> str: """ Returns a name for the resource following the reverse domain name notation """ # See https://en.wikipedia.org/wiki/Reverse_domain_name_notation return "io.simcore.storage" + ".".join(map(str, resource_name_parts))
def convert_c_to_f(temperature_c): """Convert Celsius to Fahrenheit.""" temperature_f = temperature_c * (9/5) +32 return temperature_f
def sumNumbers(root): """ :type root: TreeNode :rtype: int """ if root is None: return 0 parent, leaves = {}, [] stack = [root] while stack: node = stack.pop() if node.left is None and node.right is None: leaves.append(node) continue if node.left is not None: parent[node.left] = node stack.append(node.left) if node.right is not None: parent[node.right] = node stack.append(node.right) to_return, carry, i = 0, 0, 0 while leaves: to_add = sum([node.val for node in leaves]) + carry carry, digit = divmod(to_add, 10) to_return += digit*(10**i) leaves = [parent[node] for node in leaves if node in parent] i += 1 to_return += carry*(10**i) return to_return
def str_space_to_int_list(string): """ Converts a spaced string in a list of integer. The string consists of the succession of the elements of the list separated by spaces. :param string: the string to convert. :return: the corresponding list of integer. """ string_list = string.split(" ") int_list = [] if string_list != '': for element in string_list: if element != '': int_list.append(int(element)) return int_list
def valueForKeyList(d, keys, default=None): """Returns the value at the end of the list of keys. >>> d = {'a': 1, 'c': {'e': 5, 'd': 4}, 'b': 2, 'f': {'g': {'h': 8}}} >>> valueForKeyList(d, ('q',), "foo") 'foo' >>> valueForKeyList(d, (), "foo") {'a': 1, 'c': {'e': 5, 'd': 4}, 'b': 2, 'f': {'g': {'h': 8}}} """ for key in keys: if not key in d: return default d = d[key] return d
def check_hash(hash: bytes, leading_zeros: int)-> bool: """Check that the provided hash is prefixed with at least `leading_zeros` zero bits""" if leading_zeros >= 32 * 8: raise Exception(f"Requirement of {leading_zeros} leading zero bits is impossible; max is {32 * 8}") # convert bits to bytes, flooring leading_zero_bytes = leading_zeros // 8 # the bits that leak into the first non-zero byte remaining_bits = leading_zeros - leading_zero_bytes * 8 # take 0b11111111 and shift `remaining_bits` leading 0s into it # if the byte at index `leading_zero_bytes` exceeds this, there are insufficient leading 0s max_first_nonzero_byte = (2 ** 8 - 1) >> remaining_bits for i in range(leading_zero_bytes): if hash[i] != 0: return False if hash[leading_zero_bytes] > max_first_nonzero_byte: return False return True
def format_context_as_squad(fiche_id, context): """ For fiches which have no question, add them without qas. """ res = { "title": fiche_id, "paragraphs": [ { "context": context, } ], } return res
def _calculate_num_learner_steps( num_observations: int, min_observations: int, observations_per_step: float, ) -> int: """Calculates the number of learner steps to do at step=num_observations.""" n = num_observations - min_observations if n < 0: # Do not do any learner steps until you have seen min_observations. return 0 if observations_per_step > 1: # One batch every 1/obs_per_step observations, otherwise zero. return int(n % int(observations_per_step) == 0) else: # Always return 1/obs_per_step batches every observation. return int(1 / observations_per_step)
def get_by_name(yaml, ifname): """Return the BondEthernet by name, if it exists. Return None,None otherwise.""" try: if ifname in yaml["bondethernets"]: return ifname, yaml["bondethernets"][ifname] except KeyError: pass return None, None
def serialize_ambito_publico(ambito_publico): """ # $ref: '#/components/schemas/ambitoPublico' """ if ambito_publico: return ambito_publico.codigo if ambito_publico.codigo else "EJECUTIVO" return "EJECUTIVO"
def redraw_in_scale(shape, scale): """ Takes a shape and redraws it in a different bigger scale. The positions are offseted but the colors are preserved. For simplicity the algorithm first rescales Ys then Xs. Each rescale uses anchor to calculate the position of new/old cells. The further away we are from the anchor the bigger the offset needs to be since we already added (scale-1) number of extra cells up to that point. The algorithm is new_pos = pos + (scale-1) * (pos - anchor) + 0:scale >>> redraw_in_scale([(0, 0, 5), (0, 1, 9)], 2) [(0, 0, 5), (0, 1, 5), (1, 0, 5), (1, 1, 5), (0, 2, 9), (0, 3, 9), (1, 2, 9), (1, 3, 9)] """ temp_new_shape = [] # For simplicity first rescale Ys anchor_y, _, _ = min(shape, key=lambda c: c[0]) # anchor for Y - used for progressive scaling for cell in shape: y, x, colour = cell for s in range(scale): new_y = y + (scale - 1) * (y - anchor_y) + s # rescale algorithm temp_new_shape.append((new_y, x, colour)) new_shape = [] # Then rescale Xs _, anchor_x, _ = min(temp_new_shape, key=lambda c: c[1]) # anchor for X - used for progressive scaling for cell in temp_new_shape: y, x, colour = cell for s in range(scale): new_x = x + (scale - 1) * (x - anchor_x) + s # rescale algorithm new_shape.append((y, new_x, colour)) return new_shape
def myshortcode(context, var): """ This is as example of a user-defined shortcode. """ return var.upper()
def get_items_from_triggers(triggers): """Given a list of Item rule triggers, extract the names of the Items and return them in a list. Arguments: - triggers: the list of rule trigger strings. Returns: A list of item_names. """ return [t.split(" ")[1] for t in triggers if t.startswith("Item")]
def task_accuracy_metrics(reward_list): """ Accuracy as percentage of examples that received rewards """ accuracy = sum(reward_list)*100/float(len(reward_list)) print("Total Reward: %s, Accuracy: %s %%"%(sum(reward_list),accuracy)) return accuracy
def flat_file_add_to_output_dict(output_dict, location_in_json, data_dict): """Add key value pairs to the output_dict""" for key, value in data_dict.items(): add_command = location_in_json + "[\"" + key + "\"] = " + value try: exec(add_command) except (NameError, SyntaxError): add_command = location_in_json + "[\"" + key + "\"] = \"" + value + "\"" exec(add_command) return output_dict
def is_iterable(x): """ Returns if given :math:`x` variable is iterable. Parameters ---------- x : object Variable to check the iterability. Returns ------- bool :math:`x` variable iterability. Examples -------- >>> is_iterable([1, 2, 3]) True >>> is_iterable(1) False """ try: for _ in x: break return True except TypeError: return False
def stirling_second(n: int, k: int): """ Computes Stirling number of the second kind Parameters ---------- n: int Numeric. If non-integer passed in, will attempt to cast as integer k: int Numeric. If non-integer passed in, will attempt to cast as integer Returns ------- int Stirling number of the second kind """ try: k, n = int(k), int(n) except (ValueError, TypeError): raise TypeError("`k` and `n` must both be integers") assert 0 <= k <= n, "`k` must be in the range of [0, `n`]" if n == 0 or n == k: return 1 if k == 0: return 0 s = [1, *[0] * (k - 1)] for _ in range(1, n): last_row = [*s] for i in range(1, k): s[i] = (i + 1) * last_row[i] + last_row[i - 1] return s[-1]
def _lexographic_lt0(a1, a2): """ Compare two 1D numpy arrays lexographically Parameters ---------- a1: ndarray 1D numpy array a2: ndarray 1D numpy array Returns ------- comparison: True if a1 < a2, False otherwise """ for e1, e2 in zip(a1, a2): if e1 < e2: return True elif e1 > e2: return False return len(a1) < len(a2)
def calculate_management_strategy(fef, mtbfa, mtbfgp): """ Function to calculate the minimum required management strategy for the entire program or a test phase. :param float fef: the average fix effectiveness factor over the period to calculate the management strategy. :param float mtbfa: the average MTBF over the first test phase. :param float mtbfgp: the growth protential MTBF. :return: _avg_ms :rtype: float """ try: _avg_ms = (1.0 - (mtbfa / mtbfgp)) / fef except ZeroDivisionError: _avg_ms = 1.0 return _avg_ms
def pk(y_true, y_pred, k): """ Function to calculate precision at k :param y_true: list of values, actual classes :param y_pred: list of values, predicted classes :param k: the value for k :return: precision at given value k """ # we are only interested in top-k predictions y_pred = y_pred[:k] # convert predictions to set pred_set = set(y_pred) # convert actual values to set true_set = set(y_true) # find common values common_values = pred_set.intersection(true_set) # return length of common values over k return len(common_values) / len(y_pred[:k])
def build_quarter_string(operational_year, operational_quarter): """ Helper function to build quarter name for JIRA. :param String operational_year: :param String operational_quarter: :return: Formatted Quarter name :rtype: String """ return 'Y%s-Q%s' % (operational_year, operational_quarter)
def filter_evidence_fn( example, y_input): # pylint: disable=unused-argument """Filter out claims/evidence that have zero length. Args: example: The encoded example y_input: Unused, contains the label, included for API compat Returns: True to preserve example, False to filter it out """ # Bert encodes text in evidence_text_word_ids. # Word embedding model uses evidence_text. if 'evidence_text_word_ids' in example: evidence_length = len(example['evidence_text_word_ids']) else: evidence_length = len(example['evidence_text']) # Explicit length check required. # Implicit length check causes TensorFlow to fail during tracing. if evidence_length != 0: return True else: return False
def _split_version_id(full_version_id): """Return server and version. Args: full_version_id: Value in the format that is set in the 'CURRENT_VERSION_ID' environment var. I.e. 'server:server_version.minor_version'. Returns: (server, server_version) tuple, or (None, server_version) if this is the default server. """ server_and_version = full_version_id.split('.')[0] result = server_and_version.split(':') if len(result) == 2: return (result[0], result[1]) else: return (None, result[0])
def is_good(res): """ is_good Check res is not None and res.attrib['stat'] == "ok" for XML object """ return False\ if res is None\ else (not res == "" and res.attrib['stat'] == "ok")
def twos_comp(val, bits): """Compute the 2's complement of int value val.""" if (val & (1 << (bits - 1))) != 0: # if sign bit is set val = val - (1 << bits) # compute negative value return val # return positive value as is
def sort_sons(items): """Sort sons from node.sons.items()""" return sorted(items, key=lambda it: it[1].data['rank'])
def intensity2color(scale): """Interpolate from pale grey to deep red-orange. Boundaries: min, 0.0: #cccccc = (204, 204, 204) max, 1.0: #ff2000 = (255, 32, 0) """ assert 0.0 <= scale <= 1.0 baseline = 204 max_rgb = (255, 32, 0) new_rbg = tuple(baseline + int(round(scale * (component - baseline))) for component in max_rgb) return new_rbg
def build_suggest_result(prefix, wd_search_results): """ Build extend result set. Parameters: prefix (str): suggest prefix entery. wd_search_results (obj): Search result from Wikidata. Returns: extend_results (obj): Result for the data extension request. """ suggest_result_data = {} suggest_result_data["result"] = [] if "wikitext" in prefix.lower() or prefix.lower() in "wikitext": result_item = {} result_item["id"] = "wikitext" result_item["name"] = "Wikitext" result_item["description"] = "Text associated with the file, in wiki markup" # Criteria to get "notable" will be determined later suggest_result_data["result"].append(result_item) if wd_search_results is not None: for result in wd_search_results: result_item = {} result_item["id"] = result["id"] result_item["name"] = result["label"] result_item["description"] = result["description"] # Criteria to get notables will be determined later suggest_result_data["result"].append(result_item) return suggest_result_data
def duns_screener(duns): """ Takes a duns number and returns a modified string to comply with DUNS+4 format common DUNS errors: * leading zero removed: len == 8 --> add leading zero back + '0000' trailing * 9-digits --> duns + '0000' * else: 'error' """ if len(duns) == 9: duns = duns + '0000' return duns elif len(duns) == 8: duns = '0' + duns + '0000' return duns else: return 'error'
def name_converter(name: str) -> str: """ Returns the correct dataset name for datasets begining with numbers. :param name: The name of the dataset to convert :return: The converted dataset name if required, else passed in name is returned. """ return { "jid_editorial_images_2018": "2018 JID Editorial Images" }.get(name, name)
def mutation_frequency(H, D): """ # ======================================================================== MUTATION FREQUENCY PURPOSE ------- Calculates the mutation frequency. INPUT ----- [INT] [H] The number of haplotypes. [2D ARRAY] [D] A distance matrix of haplotypes pair-wise genetic distances (fraction of nt differences). RETURN ------ [FLOAT] The mutation frequency. # ======================================================================== """ sumd = 0 for i in range(0, H): sumd += D[0][i] Mfe = float(sumd) / float(H) return Mfe
def check(file_path: str) -> bool: """ True if the given file path is a baseline file. """ return file_path.endswith('.baseline')
def isWinner(data,c1,c2): """ This function takes the preference ranking data as an input parameter. It computes the head to head winner for the two candidates that are passed into it as parameters. If the first candidate passed as a paramter wins against the second candidate, then the function will return true, else, the function will return false. Parameters: data - The preference ranking data as a list of lists. Return Value: True - if first candidate wins False - if first candidate does not win """ lenC1 = 0 lenC2 = 0 countC1 = 0 countC2 = 0 for c in data: for i in range(len(c)): if c[i] == c1: lenC1 = i elif c[i] == c2: lenC2 = i if lenC2 >= lenC1: countC1 = countC1 + 1 elif lenC2 <= lenC1: countC2 = countC2 +1 if countC1 >= countC2: return True return False
def fits(bounds_inside, bounds_around): """Returns True if bounds_inside fits entirely within bounds_around.""" x1_min, y1_min, x1_max, y1_max = bounds_inside x2_min, y2_min, x2_max, y2_max = bounds_around return (x1_min >= x2_min and x1_max <= x2_max and y1_min >= y2_min and y1_max <= y2_max)
def is_hash160(s): """ Returns True if the considered string is a valid RIPEMD160 hash. """ if not s or not isinstance(s, str): return False if not len(s) == 40: return False for c in s: if (c < '0' or c > '9') and (c < 'A' or c > 'F') and (c < 'a' or c > 'f'): return False return True
def used_interfaces(list_of_address): """Returns a set of interfaces that the addresses are configured on""" interfaces = set() for address in list_of_address: interfaces.update([address.get_interface()]) return interfaces
def key_safe_data_access(data, key): """Safe key based data access. Traditional bracket access in Python (foo['bar']) will throw a KeyError (or IndexError if in a list) when encountering a non-existent key. foo.get(key, None) is solves this problem for objects, but doesn't work with lists. Thus this function serves to do safe access with a unified syntax for both lists and dictionaries. Args: data: The data object to search for the key within. key: The key to use for access. Returns: data[key] if key is present or None otherwise. """ try: return data[key] except (KeyError, IndexError): return None
def drop_placeholders(peaks): """Removes all placeholder peaks from an iterable of peaks Parameters ---------- peaks : Iterable of FittedPeak Returns ------- list """ return [peak for peak in peaks if peak.mz > 1 and peak.intensity > 1]
def calc_matrix_size(ver): """\ Returns the matrix size according to the provided `version`. Note: This function does not check if `version` is actually a valid (Micro) QR Code version. Invalid versions like ``41`` may return a size as well. :param int ver: (Micro) QR Code version constant. :rtype: int """ return ver * 4 + 17 if ver > 0 else (ver + 4) * 2 + 9
def cli_parser_var(var: str): """Parse runflow `--var` value.""" key, value = var.split("=") return key.strip(), value.strip()
def starts_with(value: str, arg: str) -> bool: """ Simple filter for checking if a string value starts with another string. Usage: ```django {% if request.url | starts_with:"/events" %} ... {% endif %} ``` """ return value.startswith(arg)
def arrayNetworkLength(networkLength, clusterNodesLength): """ Define how many cell switches (DCell) and PODs (FatTree) wil be created on each cluster node or worker """ arrayNetworkLength = [networkLength / clusterNodesLength] * clusterNodesLength restNetworkLength = networkLength % clusterNodesLength for i in range(restNetworkLength): arrayNetworkLength[i] = arrayNetworkLength[i] + 1 return(arrayNetworkLength)
def chunks(l, n): """Yield successive n-sized chunks from l.""" ll=[] for i in range(0, len(l), n): ll.append(l[i:i+n]) return ll
def dump_flags(flag_bits, flags_dict): """Dump the bits in flag_bits using the flags_dict""" flags = [] for name, mask in flags_dict: if (flag_bits & mask) != 0: flags.append(name) if not flags: flags = ["0"] return "|".join(flags)
def badge(value, bg_color=None, show_empty=False): """ Display the specified number as a badge. Args: value: The value to be displayed within the badge bg_color: Background color CSS name show_empty: If true, display the badge even if value is None or zero """ return { 'value': value, 'bg_color': bg_color or 'secondary', 'show_empty': show_empty, }
def _extract_pipeline_of_pvalueish(pvalueish): """Extracts the pipeline that the given pvalueish belongs to.""" if isinstance(pvalueish, tuple): pvalue = pvalueish[0] elif isinstance(pvalueish, dict): pvalue = next(iter(pvalueish.values())) else: pvalue = pvalueish if hasattr(pvalue, 'pipeline'): return pvalue.pipeline return None
def str_or_null(res): """Return res as a string, unless res is None, in which case it returns the empty string. """ if res is None: return '' return str(res)
def center(width, n): """ Computes free space on the figure on both sides. :param width: :param n: number of algorithms :return: """ max_unit = 1 free_space = width - n * max_unit free_space = max(0, free_space / max_unit) free_left = free_space / 2 free_right = free_space / 2 return free_left, free_right
def extract_tokens(ngrams): """Extract and count tokens from JSTOR ngram data""" article_words = [] text = ngrams.rstrip() text_list = text.split('\n') for item in text_list: word_count = item.split('\t') if len(word_count[0]) < 3: # data cleanup: eliminate ocr noise and most roman numerals continue word = word_count[0] + ' ' count = int(word_count[1]) word_string = word * count word_string = word_string.rstrip() # remove extra space at the end word_list = word_string.split(' ') for word in word_list: article_words.append(word) token_string = ' '.join(article_words) return token_string
def most_common(hist): """Makes a list of word-freq pairs in descending order of frequency. hist: map from word to frequency returns: list of (frequency, word) pairs """ t = [] for key, value in hist.items(): t.append((value, key)) t.sort() t.reverse() return t
def de_unit_key(key: str) -> str: """Remove the unit from a key.""" if key.endswith("f"): return key[:-1] if key.endswith("in"): return key[:-2] if key.endswith("mph"): return key[:-3] return key
def find(predicate, seq): """A helper to return the first element found in the sequence that meets the predicate. For example: :: comment = find(lambda comment: comment.author.name == 'SilverElf', mod.comments.flatten()) would find the first :class:`.Comment` whose author's name is 'SilverElf' and return it. If no entry is found, then ``None`` is returned. This is different from `filter`_ due to the fact it stops the moment it finds a valid entry. .. _filter: https://docs.python.org/3.6/library/functions.html#filter Parameters ----------- predicate A function that returns a boolean-like result. seq : iterable The iterable to search through. """ for element in seq: if predicate(element): return element return None
def RGBtoHSL( rgb ): """ return a triple tuple containing HSL values given a triple tuple of rgb data ( blue, green, red ) format """ # R' = R/255 (G' = G/255, B' = B/255) Rp = rgb[2]/255 Gp = rgb[1]/255 Bp = rgb[0]/255 Cmax = max(Rp,Gp,Bp) Cmin = min(Rp,Gp,Bp) Delta = Cmax - Cmin if Delta == 0: Hue = 0 elif Cmax == Rp: Hue = 60*(((Gp-Bp)/Delta)%6) elif Cmax == Gp: Hue = 60*((Bp-Rp)/Delta + 2) else: Hue = 60*((Rp-Gp)/Delta + 4) Lit = (Cmax+Cmin)/2 if Delta == 0: Sat = 0 else: Sat = Delta/(1-abs(2*Lit-1)) #print("H:",Hue,"S:",Sat,"L:",Lit) return (Hue,Sat,Lit)
def bulk_lookup(license_dict, pkg_list): """Lookup package licenses""" pkg_licenses = {} for pkg in pkg_list: # Failsafe in case the bom file contains incorrect entries if not pkg.get("name") or not pkg.get("version"): continue pkg_key = pkg["name"] + "@" + pkg["version"] if pkg.get("vendor"): pkg_key = pkg.get("vendor") + ":" + pkg["name"] + "@" + pkg["version"] for lic in pkg.get("licenses"): if lic == "X11": lic = "MIT" elif "MIT" in lic: lic = "MIT" curr_list = pkg_licenses.get(pkg_key, []) match_lic = license_dict.get(lic) if match_lic: curr_list.append(match_lic) pkg_licenses[pkg_key] = curr_list return pkg_licenses
def lr_poly(base_lr, i_iter, max_iters, power): """ Poly_LR scheduler """ return base_lr * ((1 - float(i_iter) / max_iters) ** power)
def AP(target, results): """ Description: Return AP(Average Precision) with target and results Parameters: target: list of K retrieved items (type: list, len: K) [Example] [tag1, tag2, ..., tagK] results: list of N retrieved items (type: list, shape: (N, ?)) [Example] [[tagA, tagB, ..., tagG], ..., [tagX, tagY, ..., tagZ]] """ # initiate variables for average precision n = 1 # the number of result hit = 0 # the number of hit ap = 0 # average precision = 1/hit * sum(precision) len_target = len(target) for res in results: (small_set, big_set) = (target, res) if len_target < len(res) else (res, target) for item in small_set: if item in big_set: # hit hit += 1 ap += hit / n break n += 1 return ap / hit
def list2str(lists): """ list to str """ return str(list(lists)).replace('\'', '').replace('\\n', '\n').replace(', ', '\n')[1:-1]
def get_letterbox_image_embedding(img_h, img_w, target_letterbox_dim): """ ---------- Author: Damon Gwinn (gwinndr) ---------- - Computes embedding information for a letterbox input format - Information is the size of the embedded image and where the embedded image is in the letterbox ---------- """ ratio = img_w / img_h if(img_w >= img_h): embed_w = target_letterbox_dim embed_h = round(embed_w / ratio) else: embed_h = target_letterbox_dim embed_w = round(embed_h * ratio) start_x = (target_letterbox_dim - embed_w) // 2 start_y = (target_letterbox_dim - embed_h) // 2 return embed_h, embed_w, start_y, start_x
def count_periods(start, end, period_length): """ :param start: unix time, excluded :param end: unix time, included :param period_length: length of the period :return: """ return (int(end)-int(start)) // period_length
def serialize_int(x: int) -> bytes: """ Efficiently convert a python integer to a serializable byte-string. """ byte_size = (int.bit_length(x) + 8) // 8 return int.to_bytes(x, length=byte_size, byteorder='big')
def get_emails(data): """Extracts only emails from below structure and returns them as list data = [ { ...: ..., 'Attributes': [ { 'Name': 'email' 'Value': '...' }, ... ], }, { ... } ] Args: data (List): List of objects representing user in Cognito Returns: List: List of emails """ emails = [] for item in data: email = [i['Value'] for i in item['Attributes'] if i['Name'] == 'email'][0] emails.append(email) return emails
def decodeString(s: str) -> str: """Return decoded string, given encoded string 's'.""" def decodeScope(i: int = 0, repeat: int = 1): """Decode the encoding scope beginning at index 'i', given the enclosing number of 'repeat's. Returns a tuple of the next index to process along with the decoded string for this scope.""" decoded = [] while i < len(s): if s[i].isdigit(): # Found 'k[encoded_string]'. # ^ # Parse numerical value for the repeat of the inner scope. nextRepeat = 0 while s[i] != '[': nextRepeat *= 10 nextRepeat += int(s[i]) i += 1 # Found 'k[encoded_string]', parsed 'k' as 'nextRepeat'. # ^ # Inner scope begins at index 'i + 1'. i, innerDecoded = decodeScope(i + 1, nextRepeat) decoded += innerDecoded elif s[i] == ']': i += 1 break else: decoded.append(s[i]) i += 1 return i, (decoded * repeat) _, decoded = decodeScope() return ''.join(decoded)
def parse_both_2(image_results): """ parses the tags and repos from a image_results with the format: { 'image': [{ 'pluginImage': { 'ibmContainerRegistry': 'internalRepo/name' 'publicRegistry': 'repo/name' }, 'driverImage': { 'ibmContainerRegistry': 'internalRepo/name' 'publicRegistry': 'repo/name' }, 'pluginBuild': 'X.X.X', 'driverBuild': 'X.X.X', 'pullPolicy': 'Always' }], 'pluginImage': [{EXACT SAME CONTENTS AS ABOVE}], 'driverImage': [{EXACT SAME CONTENTS AS ABOVE}] } Current known apps with this format: ibm-object-storage-plugin """ tags = [] repos = [] image_info = image_results['image'][0] for k, v in image_info.items(): if "Build" in k: tags.append(v) elif "Image" in k: repos.append(v['publicRegistry']) return tags, repos
def find_parent(lst, i, dist): """Finds the parent node of the given node in a pre-order traversal list. Args: lst: a list that contains a pre-order traversal of a free-tree i: the index of the actual node dist: the distance of the actual node Returns: int: the index of the node's parent (-1 if it has no parent) """ while i >= 0: if lst[i] < dist: break i -= 1 return i
def EVLAGetSessionCode( fileDict ): """ Get the project session code from a fileDict returned by PipeUtil.ParseASDM. * fileDict = dictionary returned by ParseASDM """ # Get session from archive file name session = 'XX' #VLBA pattern = re.compile(r'EVLA_[A-Za-z]+[0-9]+([A-Za-z]+)') #VLBA match = re.match( pattern, fileDict['logical_file'] ) #VLBA if match: #VLBA session = match.group(1) return session
def extract_datainput_name(stanza_name): """ stansa_name: string like aws_s3://my_s3_data_input """ sep = "://" try: idx = stanza_name.index(sep) except ValueError: return stanza_name return stanza_name[idx + len(sep):]
def parse_indices(in_: str) -> list: """Parse indices from comma-separated and ranged index string.""" comps = in_.split(',') indices = set() for comp in comps: if '-' in comp: low, high = comp.split('-') indices.update(range(int(low), int(high) + 1)) else: indices.add(int(comp)) return sorted(indices)
def plot_landmarks(axis, landmarks, **kwargs): """Plot markers at ``landmark`` locations in ``axis``.""" color = kwargs.pop('color', 'k') lbl = kwargs.pop('label', '') marker = kwargs.pop('marker','^') for x, y in landmarks: axis.plot([x], [y], marker=marker, color=color, label=lbl, **kwargs) return axis
def lineStartingWith(string, lines): """ Searches through the specified list of strings and returns the first line starting with the specified search string, or None if not found """ for line in lines: if line.startswith(string): return line else: return None
def reduce_value(value, default=''): """ :return: a single value from lists, tuples or sets with one item; otherwise, the value itself if not empty or the default if it is. """ if hasattr(value, '__len__'): vlen = len(value) if vlen == 0: return default elif vlen == 1: if isinstance(value, set): return value.pop() elif isinstance(value, (list, tuple)): return value[0] return default if value is None else value
def count_a_in_b_unique(a, b): """Count unique items. Args: a (List): list of lists. b (List): list of lists. Returns: count (int): number of elements of a in b. """ count = 0 for el in a: if el in b: count += 1 return count
def get_url(position, location): """Generate url from position and location""" # remember to replace so it results q= {] and l={} placeholder = 'https://nz.indeed.com/jobs?q={}&l={}' # Format correctly to use url for multiple jobs. url = placeholder.format(position, location) return url
def line_from_two_points(x1, y1, x2, y2): """ Helper function to return the equation of a line passing through any two points. :Parameters: x1: float X coordinate of first point y1: float Y coordinate of first point x2: float X coordinate of second point y2: float Y coordinate of second point :Returns: (slope, intercept) or (None, xposition) if the slope is infinite. """ xdiff = (x2-x1) if abs(xdiff) > 0.0: ydiff = (y2-y1) slope = ydiff / xdiff intercept = y1 - slope * x1 return (slope, intercept) else: return (None, x1)
def factorial(num): """ Calculatte the factorial of a positive integer num Assumption : num is not less than or equal to 0""" if num == 1: return num else: return num * factorial(num - 1)