content
stringlengths
42
6.51k
def noise_pos(feature, thres): """ This function is for creating position list The list contains those features occurs less than given number """ n_pos = [] for i in range(len(feature[0][1])): max_num = 0 count = 0 for j in range(len(feature)): max_num = max(feature[j][1][i], max_num) count += feature[j][1][i] if max_num < thres: n_pos.append(i) return n_pos
def cipherpass(passwd): """Inputs a string. Ciphers it using the following algorithm and returns the ciphered password Algo: 1. Takes the string. 2. Tranverse though each letter. 3. Take the ascii value of that letter and doubles it using `chr` function 4. Converts the new ascii value back to a new letter. 5. Adds that letter to an empty string and repeat from Step 1 until all letters are traversed. 6. Returns the `ciphered` string. """ tmp = "" for i in passwd: tmp += chr(ord(i)*2) return tmp
def format_vcf_genotype(vcf_allele1_char, vcf_allele2_char, ploidy): """ Create a VCF representation of the genotype based on alleles. Format appropriately if haploid. Args: vcf_allele1_char (string): 0,1,2 etc. vcf_allele2_char (string): 0,1,2, etc. vcf_record (vcf._Record): Record for the entry analyzed ploidy(int): Expected ploidy. Returns string: String representation of genotype (e.g., "0/1"), adjusted for haploid calls if applicable """ assert ploidy < 3 # haploid calls if ploidy == 1: if vcf_allele1_char == vcf_allele2_char: return str(vcf_allele1_char) vcf_genotype = "" if vcf_allele2_char < vcf_allele1_char: vcf_genotype = str(vcf_allele2_char) + "/" + str(vcf_allele1_char) else: vcf_genotype = str(vcf_allele1_char) + "/" + str(vcf_allele2_char) return vcf_genotype
def get_fully_qualified_name(instance: object) -> str: """ Get at the fully qualified name of an object, e.g. an instance of a :class:`~py_trees.composites.Sequence` becomes 'py_trees.composites.Sequence'. Args: instance (:obj:`object`): an instance of any class Returns: :obj:`str`: the fully qualified name """ module = instance.__class__.__module__ # if there is no module, it will report builtin, get that # string via what should remain constant, the 'str' class # and check against that. builtin = str.__class__.__module__ if module is None or module == builtin: return instance.__class__.__name__ else: return module + '.' + instance.__class__.__name__
def add_comma_separation(input: int) -> str: """Adds comma-separation for thousands to an integer.""" return f'{input:,.0f}'
def centripetal_force(m, v, r): """ Centripetal force is the net force that acts on an object with mass 'm' to keep it moving with a velocity 'v' tracing a circular path of radius 'r' Parameters ---------- m : float v : float r : float Returns ------- float """ return m * v**2 / r
def setup_caves(cave_numbers): """ Create the starting list of caves """ caves = [] for i in cave_numbers: caves.append([]) return caves
def build_config_keys_map(spec): """Build config keys map Return ------ Dict where each item: <config_key>: { "group": <grouping>, "type": <http|message_router|data_router> } where grouping includes "streams_publishes", "streams_subscribes", "services_calls" """ # subscribing as http doesn't have config key ss = [ (s["config_key"], { "group": "streams_subscribes", "type": s["type"] }) for s in spec["streams"]["subscribes"] if "config_key" in s] sp = [ (s["config_key"], { "group": "streams_publishes", "type": s["type"] }) for s in spec["streams"]["publishes"] ] sc = [ (s["config_key"], { "group": "services_calls" }) for s in spec["services"]["calls"] ] return dict(ss+sp+sc)
def _clean_string(s): """ Removing special characters that might not be able to render HTML """ return s.replace('"', '').replace('\'', '-')
def formatRegion(r): """Format lat/lon region specifier as string suitable for file name.""" if isinstance(r, str): return r else: strs = [str(i).replace('-', 'm') for i in r] return 'region-%s-%sby%s-%s' % tuple(strs)
def isCompleteRotate(tup): """Returns is the given parm tuple represents a full set of Euler Rotates""" bool = True if len(tup) < 3: bool = False for p in tup: if p.isHidden() or p.isLocked(): bool = False if tup.node().isLockedHDA(): # take the first component as sample and get all referenced parms of name 'r' # then iterate over remaining components and check for common parm tuple refs = (ref.tuple() for ref in tup[0].parmsReferencingThis() if ref.tuple().name() == "r") for p in tup: _tmprefs = () for p in p.parmsReferencingThis(): if p.tuple() in refs: _tmprefs += (p.tuple(),) refs = _tmprefs if not refs: bool = False return bool
def handlerInit(parent, n): """This handler requires no initialization routine, but must return a dict anyways""" return {"users":{}}
def step_nfa(N, q, c): """In : N (consistent NFA) q (state in N) c (symbol in N's sigma or "") Out: The set of states reached via N's Delta. EClosure is NOT performed. If the move is not defined, return {}. """ assert( c in (N["Sigma"] | {""}) ), "c given to step_nfa not in Sigma." assert( q in N["Q"] ), "q given to step_nfa not in Q." # We have to run it wrt the total version of the NFA. # Since that is expensive to do each time, special-case this check. if (q,c) in N["Delta"].keys(): return N["Delta"][(q,c)] else: # If a move is undefined, it is a transition to the empty set return set({})
def realizar_conteo(arr): """ Cuenta las combinaciones entre clases calculadas y reales """ res = {} for itm in arr: if not itm[0] in res: res[itm[0]] = {} if not itm[1] in res[itm[0]]: res[itm[0]][itm[1]] = 1 else: res[itm[0]][itm[1]] += 1 return res
def mk_uni_port_num(intf_id, onu_id): """ Create a unique virtual UNI port number based up on PON and ONU ID :param intf_id: :param onu_id: (int) ONU ID (0..max) :return: (int) UNI Port number """ return intf_id << 11 | onu_id << 4
def validate_medical_image_specific(in_dict): """Validates the inputs of the incoming dictionary This function receives a dictionary as input. Within this dictionary are a patient's medical record number as well as a specific file name for that patient. This function checks to ensure that the medical record number is an int and checks to ensure that the file name is a string. If these are not the case, it will inform the user. Otherwise, it will return True Parameters ---------- in_dict : dict Gives the patient medical record number and file name Returns ------- bool True if successful """ my_keys = list(in_dict.keys()) for key in my_keys: if key == "patient": if type(in_dict[key]) == int: continue else: return "A valid patient id was not provided, try again" if key == "file_name": if type(in_dict[key]) == str: continue else: return "A valid filename was not provided, try again" else: return "The input dictionary has unusable information, try again" return True
def _relative_degree(z, p): """ Return relative degree of transfer function from zeros and poles. This is simply len(p) - len(z), which must be nonnegative. A ValueError is raised if len(p) < len(z). """ degree = len(p) - len(z) if degree < 0: raise ValueError("Improper transfer function. " "Must have at least as many poles as zeros.") return degree
def get_program_argument(problem_details): """ In here you can make the mapping and translation of the different parameter values and attributes for the Incident Software of your desire """ nr = problem_details['displayName'] status = problem_details['status'] severity = problem_details['severityLevel'] element = problem_details['impactLevel'] tags = problem_details['tagsOfAffectedEntities'] msg = "Problem [{0}]: Status={1}, Severity={2}, ImpactLevel={3}, Tags={4}".format(nr, status, severity, element, tags) # Get the elements. Key from ProblemID differs from ProblemFeed (rankedImpacts/rankedEvents) if 'rankedImpacts' in problem_details: elements = problem_details['rankedImpacts'] else: elements = problem_details['rankedEvents'] # For each ranked Impact (Entity), a call to the Incident Software shall be made arguments_list = [] for element in elements: e_name = element['entityName'] e_severity = element['severityLevel'] e_impact = element['impactLevel'] e_eventType = element['eventType'] element_msg = msg element_msg += " Entity details: Entity={0}, impactLevel={1}, severity={2}, eventType={3}".format(e_name, e_severity, e_impact, e_eventType) arguments_list.append(element_msg) return arguments_list
def ipv4_filter (value, index=0, pattern=None): """ IPv4 address filter: - check if string length is >7 (e.g. not just 4 digits and 3 dots) - check if not in list of bogon IP addresses return True if OK, False otherwise. """ ip = value # check if string length is >7 (e.g. not just 4 digits and 3 dots) if len(ip) < 8: return False # BOGON IP ADDRESS RANGES: # source: http://www.team-cymru.org/Services/Bogons/bogon-dd.html # extract 1st and 2nd decimal number from IP as int: ip_bytes = ip.split('.') byte1 = int(ip_bytes[0]) byte2 = int(ip_bytes[1]) #print 'ip=%s byte1=%d byte2=%d' % (ip, byte1, byte2) # 0.0.0.0 255.0.0.0 if ip.startswith('0.'): return False # actually we might want to see the following bogon IPs if malware uses them # => this should be an option # 10.0.0.0 255.0.0.0 if ip.startswith('10.'): return False # 100.64.0.0 255.192.0.0 if ip.startswith('100.') and (byte2&192 == 64): return False # 127.0.0.0 255.0.0.0 if ip.startswith('127.'): return False # 169.254.0.0 255.255.0.0 if ip.startswith('169.254.'): return False # 172.16.0.0 255.240.0.0 if ip.startswith('172.') and (byte2&240 == 16): return False # 192.0.0.0 255.255.255.0 if ip.startswith('192.0.0.'): return False # 192.0.2.0 255.255.255.0 if ip.startswith('192.0.2.'): return False # 192.168.0.0 255.255.0.0 if ip.startswith('192.168.'): return False # 198.18.0.0 255.254.0.0 if ip.startswith('198.') and (byte2&254 == 18): return False # 198.51.100.0 255.255.255.0 if ip.startswith('198.51.100.'): return False # 203.0.113.0 255.255.255.0 if ip.startswith('203.0.113.'): return False # 224.0.0.0 240.0.0.0 if byte1&240 == 224: return False # 240.0.0.0 240.0.0.0 if byte1&240 == 240: return False # also reject IPs ending with .0 or .255 if ip.endswith('.0') or ip.endswith('.255'): return False # otherwise it's a valid IP adress return True
def dist_reward(old, act): """ Returns a reward given the movement of the snake [-1 -> 1] 1: moving direct to target -1: moving away from target :param old: Old distance between snake head and food :param act: New distance between snake head and food :type old: float :type act: float :return: :rtype: float """ d = act - old if -10 <= d <= 10: return -d / 10 else: return 0
def validate_eye_color(eye_color: str) -> bool: """Validate an eye color.""" return eye_color in ("amb", "blu", "brn", "gry", "grn", "hzl", "oth")
def insert_gamelog(predict_gamelog, training_gamelog): """ Insert front a game into the gamelog """ temp = [] for game in training_gamelog: temp.append(game) for game in predict_gamelog: temp.append(game) return temp
def strip_langs(url, lang): """Strip the part of the url containing any language code """ toks = url.split('/') if lang in toks: toks.remove(lang) url = '/'.join(toks) return url
def create_ip_list_from_operator_log(json_response_operator_log): """ creates a list with deleted device at IMC Only added to the list, if the operation was successful """ deleted_ips = list() for i in json_response_operator_log: print(i) if i["resultStr"] == "Success": start_index = i["description"].rindex("(") + 1 end_index = i["description"].rindex(")") deleted_ips.append(i["description"][start_index:end_index]) return deleted_ips
def _iterate(discovery_iter): """Iterate discovered endpoints.""" hostports = [] for (_, hostport) in discovery_iter: if not hostport: continue host, port = hostport.split(':') hostports.append((host, int(port))) return hostports
def prime_factorization(n): """ from https://scientific-python-101.readthedocs.io/python/exercises/prime_factorization.html Return the prime factorization of `n`. Parameters ---------- n : int The number for which the prime factorization should be computed. Returns ------- dict[int, int] List of tuples containing the prime factors and multiplicities of `n`. """ prime_factors = {} i = 2 while i**2 <= n: if n % i: i += 1 else: n /= i try: prime_factors[i] += 1 except KeyError: prime_factors[i] = 1 if n > 1: try: prime_factors[n] += 1 except KeyError: prime_factors[n] = 1 return prime_factors
def convert_case(text): """Converts text to lower case.""" return text.lower()
def application_error(e): """Return custom 500 error.""" return 'Application error: {}'.format(e), 500
def split_to_bins(A, S=4): """Split the A array into bins of size N.""" m, n = divmod(len(A), S) return [A[i * S:(i + 1) * S] for i in range(m + bool(n))]
def format_headers(unformatted_dict): """ Utility method for formatting a dictionary of headers. Params: unformatted_dict (dict): Unformatted dictionary being submitted for formatting. Returns: formatted_headers (dict): Dictionary that has been camel cased with spaces replacing underscores("_") """ # Split and title the headers_dicionary for better display. formatted_headers = list() for field in unformatted_dict['ModelBase']: formatted_headers.append(field.replace('_', " ").title()) return formatted_headers
def Dot(x, y): """ Parameters ---------- x : iterator of varElement or number y : iterator of varElement or number Returns ------- inner product of x and y """ return sum(_x * _y for _x, _y in zip(x, y))
def common_prefix(n: int) -> str: """ For a given number of subgraphs, return a common prefix that yields around 16 subgraphs. >>> [common_prefix(n) for n in (0, 1, 31, 32, 33, 512+15, 512+16, 512+17)] ['', '', '', '', '1', 'f', '01', '11'] """ hex_digits = '0123456789abcdef' m = len(hex_digits) # Double threshold to lower probability that no subgraphs match the prefix return hex_digits[n % m] + common_prefix(n // m) if n > 2 * m else ''
def features_to_protein_fasta_file(feature_list, file_name): """ Create a protein fasta file from features with amino acid sequences. Parameters ---------- feature_list: list List of features (each entry is a dict with keys that vary based on available data) file_name : str Path to output fasta file Returns ------- int Number of features with amino acid sequences stored in fasta file """ num_added = 0 with open(file_name, 'w') as handle: for feature in feature_list: # Skip the feature if there is no amino acid sequence. if 'aa_sequence' in feature: if feature['annotation'] == 'PATRIC': aa_id = 'patric_id' elif feature['annotation'] == 'RefSeq': aa_id = 'protein_id' else: raise ValueError('Annotation must be either "PATRIC" or "RefSeq"') handle.write('>{0}\n{1}\n'.format(feature[aa_id], feature['aa_sequence'])) num_added += 1 return num_added
def only_python_traceback(info, level=9, **kwargs): """Shows only the Python traceback """ if level > 0: return info["simulated_python_traceback"] else: return info["python_traceback"]
def bottom_val(type): """ Return bottom val depending on the type passed as input. """ one = ["transmission"] two = ["memory"] three = ["processing"] if type in one: return 0 if type in two: return 0 if type in three: return 0 print("type not found. Exiting.") exit()
def defaults(items, expected, *default_values): """Pads the items list up to the expected length with the provided defaults.""" if len(items) == expected: return items if len(items) + len(default_values) < expected: raise Exception("Too few items, even with defaults.") items = list(items) items.extend(default_values[len(items) - expected - 1 :]) return items
def is_dict_equal(d1, d2, keys=None, ignore_none_values=True): """ Compares two dictionaries to see if they are equal :param d1: the first dictionary :param d2: the second dictionary :param keys: the keys to limit the comparison to (optional) :param ignore_none_values: whether to ignore none values :return: true if the dictionaries are equal, else false """ if keys or ignore_none_values: d1 = {k: v for k, v in d1.items() if (keys is None or k in keys) and (v is not None or not ignore_none_values)} d2 = {k: v for k, v in d2.items() if (keys is None or k in keys) and (v is not None or not ignore_none_values)} return d1 == d2
def HHMMSS_to_seconds(string): """Converts a colon-separated time string (HH:MM:SS) to seconds since midnight""" (hhs,mms,sss) = string.split(':') return (int(hhs)*60 + int(mms))*60 + int(sss)
def fmt_quil_str(raw_str): """Format a raw Quil program string Args: raw_str (str): Quil program typed in by user. Returns: str: The Quil program with leading/trailing whitespace trimmed. """ raw_quil_str = str(raw_str) raw_quil_str_arr = raw_quil_str.split('\n') trimmed_quil_str_arr = [qs.strip() for qs in raw_quil_str_arr] trimmed_quil_str = '\n'.join([x for x in trimmed_quil_str_arr]) return trimmed_quil_str
def list_of_int_from_list_of_str(list_str): """Converts ['0001', '0202', '0203', '0204',...] to [1, 202, 203, 204,...] """ return [int(s) for s in list_str]
def is_prime(num): """ Assumes num > 3 """ if num % 2 == 0: return False for p in range(3, int(num**0.5)+1, 2): # Jumps of 2 to skip odd numbers if num % p == 0: return False return True
def number_of_words(input): """Return number of words in a string. """ return len(input.split())
def partial_kendall(s1,s2,p=0.5): """ Kendall distance with penalty p between two partial rank lists (lists with ties). This could be sped up with more efficient computation of the size of the sets R1, R2, and D. I normalize the distance by dividing by k*(k-1)/2. """ cardD = 0 cardR1 = 0 cardR2 = 0 k = len(s1) for i in range(len(s1)): for j in range(len(s2)): if s1[i] < s1[j] and s2[i] > s2[j]: cardD += 1 elif s1[i] == s1[j] and s2[i] != s2[j]: cardR1 += 1 elif s2[i] == s2[j] and s1[i] != s1[j]: cardR2 += 1 return (2/(k*(k-1)))*(cardD + p*(cardR1 + cardR2))
def is_s3_path(path: str) -> bool: """Returns True if the path points to a S3 bucket, False otherwise.""" return path.startswith("s3://")
def loose_tight_to_list(img_infos, pipeline_dict): """ Args: img_infos (dict): dict saving the label information pipeline_dict (dict): pipeline information Returns: list(dict): select the training sample list """ reslist = list() for key, value in img_infos.items(): try: # load the 'bboxes' information texts = value['content_ann']['texts'] if not isinstance(texts, (list, tuple)): texts = [texts] if not texts[0]: continue if 'bboxes' in img_infos[key]["content_ann"] and len(img_infos[key]["content_ann"]['bboxes'][0]) == 8: # load the 'bboxes' information if length of the 'bboxes' equals 8 bboxes = img_infos[key]["content_ann"]['bboxes'][0] else: # 'bboxes' is not in img_infos, use the image size as 'bboxes' information height = img_infos[key]["height"] width = img_infos[key]["width"] bboxes = [0, 0, width - 1, 0, width - 1, height - 1, 0, height - 1] except KeyError as _: print(value) continue for _, text in enumerate(texts): reslist.append({ 'filename': key, 'ann': { 'text': text if pipeline_dict[0]["sensitive"] else text.lower(), 'bbox': bboxes, 'label': value["content_ann"]['labels'][0] if 'labels' in value["content_ann"] else -1, } }) return reslist
def _parse_lsp_head(line: bytes): """Extract the content length from an input line.""" if line.startswith(b"Content-Length: "): _, value = line.split(b"Content-Length: ") value = value.strip() try: return int(value) except ValueError as e: raise ValueError("Invalid Content-Length header: {}".format(value)) from e return None
def filter_by_id(json_blob, lookup): """Copy objects with valid source_name properties into a new JSON blob.""" valid_sources = ['mitre-attack', 'mitre-pre-attack', 'mitre-mobile-attack'] output = [] for obj in json_blob: if obj['type'] != 'relationship': try: for ext_ref in obj['attributes']['external_references']: if ext_ref['source_name'] in valid_sources and ext_ref['external_id']: output.append(obj) except KeyError as ex: pass else: if (obj['attributes']['source_ref'] in lookup and obj['attributes']['target_ref'] in lookup): output.append(obj) return output
def is_import( x: str) \ -> bool: """ Whether line contains an import statement. """ return x.startswith('import ') or x.startswith('from ') and ' .' not in x
def find_best_odds(books): """ Takes two-dimensional array and looks through each collumn to find the highest odd for each outcome: [ [5, 7.7, 1.2], [4, 6.25, 1.6] ] would produce ([5, 7.7, 1.6], [0, 0, 1]) """ if len(books) < 2: raise ValueError("Must contain at least two bookers odds.") best = [0]*len(books[0]) book_id = [0]*len(books[0]) for id in range(len(books)): bookie = books[id] for i in range(len(bookie)): if bookie[i] > best[i]: best[i] = bookie[i] book_id[i] = id return (best, book_id)
def gcd_recursion(sign_a, sign_b, r, s, t): """ Internal recursive call to calculate the GCD. Remarks: See the Q# source for the "_gcd" function at https://github.com/Microsoft/QuantumLibraries/blob/master/Canon/src/Math/Functions.qs """ # Unpack the tuples (r_1, r_2) = r (s_1, s_2) = s (t_1, t_2) = t if r_2 == 0: return (s_1 * sign_a, t_1 * sign_b) quotient = r_1 // r_2 r_ = (r_2, r_1 - quotient * r_2) s_ = (s_2, s_1 - quotient * s_2) t_ = (t_2, t_1 - quotient * t_2) return gcd_recursion(sign_a, sign_b, r_, s_, t_)
def get_map(tokens): """ Returns a dictionary with each unique token in @tokens as keys. The values are lists: the index of the position/s in global @TEXT that the token is found. """ token_index = enumerate(tokens) token_map = {} for k, v in token_index: if v in token_map.keys(): token_map[v].append(k) else: token_map[v] = [k] return token_map
def _collect_layer_metadata(config, layer_config): """Collect metadata for adding it to a layer.""" meta = config.get("common_items", dict()).copy() meta.update(layer_config) if "title" not in meta and "title_pattern" in meta: meta["title"] = meta["title_pattern"] return meta
def word_probabilities(counts, total_spams, total_non_spams, k=0.5): """turn the word_counts into a list of triplets w, p(w | spam) and p(w| ~spam)""" return [(w, (spam + k) / (total_spams + 2 * k), (non_spam + k) / (total_non_spams + 2 * k)) for w, (spam, non_spam) in counts.items()]
def argsort(array): """ Highly efficient argsort for pure python, this is also good for arrays where you only want the sort in the first dimesion """ return sorted(list(range(len(array))), key=array.__getitem__)
def find_factors(num): """Find factors of num, in increasing order. >>> find_factors(10) [1, 2, 5, 10] >>> find_factors(11) [1, 11] >>> find_factors(111) [1, 3, 37, 111] >>> find_factors(321421) [1, 293, 1097, 321421] """ f = [1] # factors of n include 1 and n, if n > 1 for x in range(2, num): if num % x == 0: f.append(x) if (num > 1): f.append(num) return f # other useful info # https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python
def evaluateModel(model, data): """ Evaluates a model given all datapoints required. string model the model to be evaluated dict or recarray variables the variables of the model Remark Need to restrict the global values used for evaluation. return float """ return eval(model.replace('variable_', ''), globals(), data) #return DataFrame.from_records(data).eval(model.replace('variable_', ''))
def scale_dimensions(dic, size=2): """ for mac retina displays """ for x in dic: dic[x] = int(dic[x] * size) return dic
def dict_setitem(data, item, value): """Implement `dict_setitem`.""" return {**data, item: value}
def es_vocal(letra): """ Decide si una letra es vocal >>> es_vocal('A') True >>> es_vocal('ae') False >>> es_vocal('Z') False >>> es_vocal('o') True :param letra: :return: """ return len(letra) == 1 and letra in 'aeiouAEIOU'
def is_int(arange): """ Check if a range is int Args: test_range ([int/float, int/float]): range to test Returns: .boolean """ if (isinstance(arange[0], int) and isinstance(arange[1], int)): return True return False
def short(text): """Changeset hash. Returns the short form of a changeset hash, i.e. a 12 hexadecimal digit string. """ return text[:12]
def parse_substring(allele, pred, max_len=None): """ Extract substring of letters for which predicate is True """ result = "" pos = 0 if max_len is None: max_len = len(allele) else: max_len = min(max_len, len(allele)) while pos < max_len and pred(allele[pos]): result += allele[pos] pos += 1 return result, allele[pos:]
def sort_by_username(user): """Sort students by their username""" return user['username']
def parse_input(input): """Seperates the interface input from the user into the name and number ex. Gi1/0/1 becomes ('Gi', '1/0/1') or GigabitEthernet1/0/1 becomes ('GigabitEthernet', '1/0/1')""" interface_name = '' interface_number = '' x = 0 for letter in input: if letter.isdigit(): interface_number = input[x:] break else: interface_name += letter x += 1 return interface_name.strip(' '), interface_number.strip(' ')
def _check_do_transform(df, reference_im, affine_obj): """Check whether or not a transformation should be performed.""" try: crs = getattr(df, 'crs') except AttributeError: return False # if it doesn't have a CRS attribute if not crs: return False # return False for do_transform if crs is falsey elif crs and (reference_im is not None or affine_obj is not None): # if the input has a CRS and another obj was provided for xforming return True
def verifyParsing(policyNums, policies): """ Generate report for debugging. """ num = len(policyNums) st = f"Found {num} firewall policy section(s).\n" for x in range(num): st += f"Found {len(policyNums[x])} and parsed {len(policies[x])} policies in section {x}.\n" # for x in range(num): # st += f"Policy numbers found in section { x } (in order):\n" # st += ",".join([item[0] for item in policyNums[x]]) + "\n" for x in range(num): notparsed = set([item[0] for item in policyNums[x]]) - \ set([item[0] for item in policies[x]]) if notparsed: st += f"Policies not parsed in section { x }: " st += ",".join(list(notparsed)) + "\n" return st
def replaceCase( caseTypes, str, wordToReplace, replaceWithWord): """ Substitutes a word with different casings. @param caseTypes: List of chars: c(orrect case) u(pper case) l(ower case). @param str: String to change. @param wordToReplace: Word in str to replace. @param replaceWithWord: Word to replace wordToReplace with. @return: Copy of input string with words replaced. """ result = str; for i in caseTypes.lower(): if i == "c": result = result.replace(wordToReplace, replaceWithWord) elif i == "u": result = result.replace(wordToReplace.upper(), replaceWithWord.upper()) elif i == "l": result = result.replace(wordToReplace.lower(), replaceWithWord.lower()) else: assert i in "ulc" return result
def discrete_log(a, b, mod): """ Returns smallest x > 0 s.t. pow(a, x, mod) == b or None if no such x exists. Note: works even if a and mod are not coprime. """ n = int(mod**0.5) + 1 # tiny_step[x] = maximum j <= n s.t. b * a^j % mod = x tiny_step, e = {}, 1 for j in range(1, n + 1): e = e * a % mod if e == b: return j tiny_step[b * e % mod] = j # find (i, j) s.t. a^(n * i) % mod = b * a^j % mod factor = e for i in range(2, n + 2): e = e * factor % mod if e in tiny_step: j = tiny_step[e] return n * i - j if pow(a, n * i - j, mod) == b else None
def _length_checker(length, content): """Helper function to check if a string is shorter than expected length of not. :param int length: Maximum length of an expected string. :param str content: A string to be validated. :return str: A string of error message if validation fails, or an empty string if validation succeeds. """ if len(content) > length: return 'Invalid label: {0} has {1} characters. The maximum is {2}.\n'.format(content, len(content), length) else: return ''
def get_ice_thickness(fjord): """ Get the approximate ice thickness (in m) at the grounding line for a given fjord """ # Note: these values are approximated manually by inspecting the ice thickness layer in BedMachine v3 ~10 km in from the terminus # and choosing a value at the upper end of gridded values. thickness = {"JI": 1500, "KB": 800} try: return thickness.pop(fjord) except KeyError: print("The current fjord does not have an ice thickness entry - using a default value!") return 1500
def make_key(*criteria): """Make a string key out of many criteria.""" parts = [] for criterion in criteria: if criterion is None: continue criterion = str(criterion) criterion = criterion.replace(":", "#") parts.append(criterion) return ":".join(parts)
def bubbleSort(x): """ Goals: The goal is to sort the array by looking at two element at a time and switching them if the first is less than the second and move up the array. Eventually after traverseing N times through the list everything will be completley sorted. Variables: x is the array inputed into the function. Assign counts when ever an index is moved or when two elements are switched Condition counts everytime two elements are compared n is the lengh of the array Output: Array of... first is sorted array second is amount of conditionals called thris is the number of assignments """ Assign = 0 Condition = 0 n = len(x) # Traverse through all array elements for i in range(n): Assign += 1 # Last i elements are already in place for j in range(0, n-i-1): Assign += 1 Condition += 1 # traverse the array from 0 to n-i-1 # Swap if the element found is greater # than the next element if x[j] > x[j+1] : x[j], x[j+1] = x[j+1], x[j] Assign += 2 return x, Assign, Condition
def get_nsamples_image(filename): """Specific method in order to get number of samples of image Args: filename ([str]): filename on expected image Returns: [int]: number of samples """ return int(filename.split('_')[-1].replace('.png', ''))
def count(items: list) -> list: """ Generate a list of counts for each item in the input list. Outputs with highest counted item at index 0 Args: items (list): A list of objects to be sorted Returns: list: A list of unique items in the input and the number of occurances for each item """ def row2(k): return k[1] if isinstance(items[0], list): items = [tuple(x) for x in items] uniqs = [[x, items.count(x)] for x in set(items)] uniqs.sort(key=row2, reverse=True) return uniqs
def searchsorted(arr, N, x): """N is length of arr """ L = 0 R = N-1 done = False m = (L+R)//2 while not done: if arr[m] < x: L = m + 1 elif arr[m] > x: R = m - 1 elif arr[m] == x: done = True m = (L+R)//2 if L>R: done = True return L
def successful_task(x, y): """Add x and y.""" return {"sample": "json"}
def gauss(pair, x): """ @returns Chi-squared from a nuisance parameter """ return ((pair[0] - x) / pair[1])**2
def as_variable(identifier: str) -> str: """ Translate the identifier of a mapry composite to a variable name in C++. :param identifier: mapry identifier of a composite :return: translated to a C++ variable name >>> as_variable('Some_URL_class') 'some_url_class' """ return identifier.lower()
def sub_to_ind(n, i, j): """Convert pair of coordinates of a symmetric square array into consecutive index of flattened upper triangle. This is slimmed down so it won't throw errors like if i>n or j>n or if they're negative. Only checking for if the returned index is negative which could be problematic with wrapped indices. Parameters ---------- n : int Dimension of square array i,j : int coordinates Returns ------- int """ if i<j: k = 0 for l in range(1,i+2): k += n-l assert k>=0 return k-n+j elif i>j: k = 0 for l in range(1,j+2): k += n-l assert k>=0 return k-n+i else: raise Exception("Indices cannot be the same.")
def invert_label_keyed_string_dict(dictionary): """Returns a dictionary where keys are bazel label objects and values are strings. Bazel does not currently support a dictionary attr where keys are strings and values are a list of labels. Example: {"-y": [":abc", ":def"], "-x": [":abc"]} --> {":abc": "-x -y", ":def": "-y"} """ result = dict() for key, val in dictionary.items(): for label in val: if label in result: result[label] += " {}".format(key) else: result[label] = key return result
def time_string(seconds): """ Gets a string representing the number of seconds in hours, minutes, and seconds, but will not print leading zeroes. """ minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) hour_string = "%dh " % hours if hours > 0 else "" minute_string = "%dm " % minutes if minutes > 0 else "" second_string = "%ds" % seconds if seconds > 0 else "" return "%s%s%s" % (hour_string, minute_string, second_string)
def sort_array(source_array): """This function is the solution to the Codewars Sort the odd Kata that can be found at: https://www.codewars.com/kata/578aa45ee9fd15ff4600090d/train/python.""" odds = [n for n in source_array if n % 2 == 1] sorted_odds = sorted(odds) return [n if n % 2 == 0 else sorted_odds.pop(0) for n in source_array]
def review_features(review): """feature engineering for product reviews""" # CREATE A DICTIONARY OF FEATURES (such as sentiment analysis, product id, helpfulness, etc.) features = {"sample": 5} return features
def esc2(string): """ Escape double quotes """ return string.replace('"', r'\"')
def to_rating(value): """ Converts the given value to a valid numerical skill rating. Args: value (str, int, or float): The value to convert. Returns: float: The converted value. Raises: ValueError: If ``value`` cannot be converted to a float, or if the converted value is less than zero. """ if type(value) not in [int, float, str]: raise ValueError("Cannot convert %s value '%s' to a rating. Only str and numerical types are allowed." % (type(value), value)) try: rating = float(value) except ValueError as e: raise ValueError("Failed to convert '%s' to a numerical rating" % value, e) if rating < 0: raise ValueError("Invalid rating: '%s'. Ratings must be larger than or equal to zero." % value) return rating
def clean_up_filename(file_name): """Return the cleaned-up file title.""" return file_name.strip().replace(' ', '_')
def add_subtracted_nan_ranges(nan_ranges, ranges): """Add masked out regions to signal or noise spike ranges.""" for nan_lower, nan_upper in nan_ranges: for i, (lower, upper) in enumerate(ranges): if lower > nan_lower: add_value = (nan_upper - nan_lower) ranges[i] = [lower + add_value, upper + add_value] return ranges
def sum_sub_image(sub_image): """ give a 3x3 matrix ,find the sum of all its elements """ total = 0 for row in range(3): for col in range(3): total += sub_image[row][col] return total // 9
def clusters_to_assignments(clusters): """Turn a list of clusters into a list of cluster assignments. Parameters ---------- clusters : list. List of clusters. Every cluster is a list of nodes (dtype=int). Returns ------- assignments : list List of length N where N is the number if total number of nodes the ith element is the number of the cluster to which node i belongs. """ assignments = [] for i, cluster in enumerate(clusters): for node in cluster: assignments.append(i) return assignments
def create_url(user_id, tweet_type): """create urls to use fetch nomal tweets and mention tweets.""" if tweet_type == "mention": url = "https://api.twitter.com/2/users/{}/mentions".format(user_id) else: url = "https://api.twitter.com/2/users/{}/tweets".format(user_id) return url
def is_formula(obj): """ Is obj a Formula? """ return hasattr(obj, "_formula_flag")
def __read_sequence_ids(data): """ Reads SequenceIDs.txt (file included in OrthoFinder Output) and parses it to a dict :param data: list of lines in SequenceIDs.txt :return: dict with key: OrthoFinder ID en value: the proper name """ output = {} for l in data: if l.strip() != '': k, v = l.split(': ') output[k] = v return output
def level_up_reward(level, new_level): """ Coins rewarded when upgrading to new_level. """ coins = 0 while level < new_level: coins += 5 + level // 15 level += 1 return coins
def _strip_jscomments(string): """Strip JS comments from a 'string'. Given a string 'string' that represents the contents after the "@tags:" annotation in the JS file, this function returns a string that can be converted to YAML. e.g. [ "tag1", # double quoted * 'tag2' # single quoted * # line with only a comment * , tag3 # no quotes * tag4, # trailing comma * ] If the //-style JS comments were used, then the example remains the, same except with the '*' character is replaced by '//'. """ yaml_lines = [] for line in string.splitlines(): # Remove leading whitespace and symbols that commonly appear in JS comments. line = line.lstrip("\t ").lstrip("*/") yaml_lines.append(line) return "\n".join(yaml_lines)
def Pf(f, P_x0, f_c, Q, P_detector): """The equation to calculate :math:`P_x(f)` from the cantilever parameters and detector noise floor. f Frequency, the independent variable. P_x0 The zero frequency power spectral density of position fluctuations. f_c The cantilever resonance frequency. Q The quality factor of the cantilever. P_detector The detector noise floor. """ return (P_x0 * f_c**4 / ((f**2 - f_c**2)**2 + f**2 * f_c**2 / Q**2) + P_detector)
def get_comp_optional_depends_text(conditions_list, config_file): """ format optional Config.in string, like if (((cond1 || cond2) && cond3) || (cond4 || cond5)) source $AOS_SDK_PATH/core/cli/Config.in endif condition_list is [[cond1, cond2, and cond3], [cond4, cond5]] config_file is filename of Config.in """ line = "if (" conds_line = "" for conds in conditions_list: conds_line += "(" cond_line = "" for cond in conds: if cond == "and": cond_line = "(" + cond_line[:-4] + ") && " else: cond_line += "%s || " % cond conds_line += cond_line[:-4] conds_line += ") || " conds_line = conds_line[:-4] line += conds_line line += ")\n" + 'source "$AOS_SDK_PATH/%s"\n' % config_file + "endif\n" return line, conds_line
def sanitize_field(field_text): """ Remove line breaks and other characters that might mess up table. """ strip_chars = ['\r', # Carriage return '\n', # Newline '\t', # Tab ] sanitized_text = field_text for char in strip_chars: sanitized_text = sanitized_text.replace(char, " ") return sanitized_text
def _get_filter_from_job(job): """Creates a job filter from |job|.""" return [str(job)] if job else None
def cubeEach(input_matrix): """ input_matrix is a 1-D array. Return: sumMatrix is the sum of the cubes """ matrix_3=[i**3 for i in input_matrix] sumMatrix=sum(matrix_3) return sumMatrix
def count_answers(text_input): """ aoc day6 part 1: Count the number of questions for which anyone answered 'yes' from the puzzle input """ total = 0 target = 'abcdefghijklmnopqrstuvwxyz' for group in text_input: for char in target: if char in group: total += 1 return total