content
stringlengths
42
6.51k
def human_readable_bytes(value, digits=2, delim="", postfix=""): """ Return a human-readable bytes value as a string. Args: value (int): the bytes value. digits (int): how many decimal digits to use. delim (str): string to add between value and unit. postfix (str): string to add at the end. Returns: str: the human-readable version of the bytes. """ chosen_unit = "B" for unit in ("KiB", "MiB", "GiB", "TiB"): if value > 1000: value /= 1024 chosen_unit = unit else: break return f"{value:.{digits}f}" + delim + chosen_unit + postfix
def fromdB(x): """ translate from dB values to linear values """ return 10.0**(0.1*x)
def shard(items, num_shards): """Split items into num_shards groups.""" sharded = [] num_per_shard = len(items) // num_shards start = 0 for _ in range(num_shards): sharded.append(items[start:start + num_per_shard]) start += num_per_shard remainder = len(items) % num_shards start = len(items) - remainder for i in range(remainder): sharded[i].append(items[start + i]) assert sum([len(fs) for fs in sharded]) == len(items) return sharded
def count(matches): """Count occurrences of taxa in a map. Parameters ---------- matches : dict of str or dict Query-to-taxon(a) map. Returns ------- dict Taxon-to-count map. """ res = {} for taxa in matches.values(): try: # unique match (scalar) res[taxa] = res.get(taxa, 0) + 1 except TypeError: # multiple matches (dict of subject : count), to be normalized by # total match count k = 1 / sum(taxa.values()) for taxon, n in taxa.items(): res[taxon] = res.get(taxon, 0) + n * k return res
def _checkerror(fulloutput): """ Function to check the full output for known strings and plausible fixes to the error. Future: add items to `edict` where the key is a unique string contained in the offending output, and the data is the reccomended solution to resolve the problem """ edict = {'multiply': ('NOTE: you might(?) need to clean the `tmp/` folder!'), 'already defined': ('NOTE: you probably (might?) need to clean the `tmp/` folder!'), 'unresolved externals': ('NOTE: consider recompiling the linked libraries to' 'have the correct name mangling for cl.exe:' 'ifort: /names:lowercase /assume:underscore '), "KeyError: 'void'": ('There may be an issue with public/private function ' 'definitions or a missing variable definition in the last ' 'function listed above. For the first error consider using ' 'the parameter `functiondict` or checking to ensure all ' 'module functions are public... For the second error, check ' 'that all of the parameters in the subroutine are defined'), "No such file or directory": ('There may be a space in the path to one of the ' 'source code or library folders'), "LINK : fatal error LNK1104: cannot open file": ('The pyd is currently in use, ' 'restart any kernels using it !') } # iterate through the keys in the error dictionary and see if the key is in the full output extramessage = '' for error_key in edict.keys(): if error_key in fulloutput: extramessage = edict[error_key] return extramessage
def nth_even(n): """Function I wrote that returns the nth even number.""" return (n * 2) - 2
def depth_to_qid_col_name(depth): """ Derives the proper name of the column for qids for depth based assignment. """ return "sub_" * depth + "qid"
def spec_v1(app): """ A spec loader for jupyter_lsp """ return { "robotframework_ls": dict( version=1, argv=["robotframework_ls"], languages=["robotframework", "plain"], mime_types=["text/x-robotframework"], urls=dict( home="https://github.com/robocorp/robotframework-lsp", issues="https://github.com/robocorp/robotframework-lsp/issues", ), ) }
def watch_for_pystol_timeouts(stop): """ Watch for action with timeouts. This method will listen for custom objects that times out. """ while True: return True
def idx2xy(idx): """convert an index to a x-y-coordinate of a Cell""" return [idx % 9 + 1, idx // 9 + 1]
def fibonacci_iterative(n): """Iterative implementation of the fibonacci function Time: O(n) """ last, curr = 0, 1 for _ in range(n): last, curr = curr, last + curr return last
def is_compliant(path: str) -> bool: """ Whether the "{...}" are closed """ unclosed_count = 0 for c in path: if c == "{": unclosed_count += 1 elif c == "}": unclosed_count -= 1 # count("}") > count("{") if unclosed_count < 0: return False return unclosed_count == 0
def apk(actual, predicted, k=7): """ Computes the average precision at k. This function computes the average prescision at k between two lists of items. Parameters ---------- actual : list A list of elements that are to be predicted (order doesn't matter) predicted : list A list of predicted elements (order does matter) k : int, optional The maximum number of predicted elements Returns ------- score : double The average precision at k over the input lists """ if len(predicted)>k: predicted = predicted[:k] score = 0.0 num_hits = 0.0 for i,p in enumerate(predicted): if p in actual and p not in predicted[:i]: num_hits += 1.0 score += num_hits / (i+1.0) if not actual: return 0.0 return score / min(len(actual), k)
def chomp( s ): """Return string without terminal newline if present""" return s[:-1] if s.endswith('\n') else s
def extract_github_owner_and_repo(github_page): """Extract only owner and repo name from GitHub page. e.g. https://www.github.com/psf/requests -> psf/requests Args: github_page - a reference, e.g. a URL, to a GitHub repo Returns: str: owner and repo joined by a '/' """ if github_page == "": return "" # split on github.com split_github_page = github_page.split("github.com") # take portion of URL after github.com and split on slashes github_url_elements = split_github_page[1].split("/") # rejoin by slash owner and repo name github_owner_and_repo = ("/").join(github_url_elements[1:3]) # strip off new line characters github_owner_and_repo = github_owner_and_repo.strip("\n") return github_owner_and_repo
def check_auth(username, password): """This function is called to check if a username password combination is valid.""" return username == 'admin' and password == 'secret'
def jacobian(adfuns, advars): """ Calculate the Jacobian matrix Parameters ---------- adfuns : array An array of AD objects (best when they are DEPENDENT AD variables). advars : array An array of AD objects (best when they are INDEPENDENT AD variables). Returns ------- jac : 2d-array Each row is the gradient of each ``adfun`` with respect to each ``advar``, all in the order specified for both. Example ------- :: >>> x, y, z = adnumber([1.0, 2.0, 3.0]) >>> u, v, w = x + y + z, x*y/z, (z - x)**y >>> jacobian([u, v, w], [x, y, z]) [[ 1.0 , 1.0 , 1.0 ], [ 0.666666, 0.333333, -0.222222], [ -4.0 , 2.772589, 4.0 ]] """ # Test the dependent variables to see if an array is given try: adfuns[0] except (TypeError, AttributeError): # if only one dependent given adfuns = [adfuns] # Test the independent variables to see if an array is given try: advars[0] except (TypeError, AttributeError): advars = [advars] # Now, loop through each dependent variable, iterating over the independent # variables, collecting each derivative, if it exists jac = [] for adfun in adfuns: if hasattr(adfun, 'gradient'): jac.append(adfun.gradient(advars)) else: jac.append([0.0]*len(advars)) return jac
def split_list(l, N): """ Split list l into N sublists of equal size """ step = int(len(l) / N) div_points = range(0, len(l) + 1, step) return [l[div_points[i]:div_points[i + 1]] for i in range(N)]
def _extract_group(encr_text, fst_group_pos, snd_group_pos, min_group_len): """ Extract the largest group of characters may match at each position ARGUMENT NOTES: min_group_len -- The min length of the group RETURN NOTES: If the group has no minimum size, None. Otherwise, the following tuple: (fst_group_pos, snd_group_pos, group_str) USAGE: >>> _extract_group('CSASTPKVSIQUTGQUCSASTPIUAQJB', 0, 16, 3) (0, 16, 'CSASTP') """ old_fst_group_pos, old_snd_group_pos = fst_group_pos, snd_group_pos group = "" while encr_text[fst_group_pos] == encr_text[snd_group_pos]: group += encr_text[fst_group_pos] fst_group_pos += 1 snd_group_pos += 1 if fst_group_pos >= len(encr_text) or snd_group_pos >= len(encr_text): break if not group or len(group) < min_group_len: return None else: return (old_fst_group_pos, old_snd_group_pos, group)
def _join_ljust(words, width=9): """join list of str to fixed width, left just""" return ' '.join(map(lambda s: s.ljust(width), words)).strip()
def force_string(val=None): """Force a string representation of an object Args: val: object to parse into a string Returns: str: String representation """ if val is None: return '' if isinstance(val, list): newval = [str(x) for x in val] return ';'.join(newval) if isinstance(val, str): return val else: return str(val)
def none_to_str(item: None) -> str: """[summary] Args: item (None): [description] Returns: str: [description] """ """Converts None to a str.""" return 'None'
def is_nt(path_in: str): """Check if the path is a windows path.""" return path_in[1] == ":"
def merge_bytes(payload_list: list) -> int: """ Gives an 8 bytes value from a byte list :param payload_list: Byte list (max size is 8) :return: """ while len(payload_list) < 8: payload_list.append(0x00) result = payload_list[0] for i in range(1, 8): result = (result << 8) | payload_list[i] return result
def transpose(in_data, keys, field): """Turn a list of dicts into dict of lists Parameters ---------- in_data : list A list of dicts which contain at least one dict. All of the inner dicts must have at least the keys in `keys` keys : list The list of keys to extract field : str The field in the outer dict to use Returns ------- transpose : dict The transpose of the data """ out = {k: [None] * len(in_data) for k in keys} for j, ev in enumerate(in_data): dd = ev[field] for k in keys: out[k][j] = dd[k] return out
def crc_chain(in_text, in_crc=0x00): """Computes the CRC for the given 8-bit text with given CRC8. Keyword arguments: in_text -- (unsigned 8-bit integer) input text for which CRC to be generated in_crc -- (unsigned 8-bit integer) initial CRC8 value Return: (unsigned 8-bit integer) CRC8 value for the given 8-bit text """ crc = in_crc crc ^= in_text i = 0 while i < 8: crc = (((crc << 1) & 0xFF) ^ 0x31) if bool(crc & 0x80) else ((crc << 1) & 0xFF) i += 1 return crc
def _make_filter_string(filters): """ Create a filter string used to modify a SignalFX query :param filters: a list of (filter_name, value) tuples :returns: a SignalForm filter string -- 'filter("filter_1", "value_1") and filter("filter_2", "value_2")' """ if not filters: return "None" fstring = "" for name, value in filters: fstring += f'filter("{name}", "{value}") and ' return fstring[:-5]
def bytes2human(n): """ 2199023255552 byte --> 2.0T :param n: memory size transfer to pretty :return: """ symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols): prefix[s] = 1 << (i + 1) * 10 for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] return '%.1f%s' % (value, s) return "%sB" % n
def delete_duplicates(list): """ Delete duplicate dictionary in 'List' """ return [dict(t) for t in {tuple(d.items()) for d in list}]
def rename_category_for_flattening(category, category_parent=""): """ Tidy name of passed category by removing extraneous characters such as '_' and '-'. :param category: string to be renamed (namely, a category of crime) :param category_parent: optional string to insert at the beginning of the string (in addition to other edits) :return: new string name for category passed """ if category_parent == "": return category.lower().replace(" ", "_").replace("/", "").replace("(", "").replace(")", "").replace(",", "").replace(";", "_").replace("-", "") return category_parent + "_" + category.lower().replace(" ", "_").replace("/", "").replace("(", "").replace(")", "").replace(",", "").replace(";", "").replace("-", "")
def prod2Gaussian(mu1,var1,mu2,var2): """ Product of Two Gaussian distributions with different mean and variance values. """ var12 = 1.0/(1.0/var1+1.0/var2) mu12 = var12*(mu1/var1 + mu2/var2) C12 = (mu2-mu1)*(mu2-mu1)/(var1+var2) return mu12, var12, C12
def parse_prefix(identifier): """ Parse identifier such as a|c|le|d|li|re|or|AT4G00480.1 and return tuple of prefix string (separated at '|') and suffix (AGI identifier) """ pf, id = (), identifier if "|" in identifier: pf, id = tuple(identifier.split("|")[:-1]), identifier.split("|")[-1] return pf, id
def jacobi(numerator: int, denominator: int) -> int: """Compute the Jacobi Symbol.""" if denominator <= 0 or denominator & 1 == 0: raise ValueError('Jacobi parameters are out of function domain') j_symbol: int = 1 numerator %= denominator while numerator: while numerator & 1 == 0: numerator = numerator // 2 if denominator % 8 in (3, 5): j_symbol *= -1 numerator, denominator = denominator, numerator if numerator % 4 == denominator % 4 == 3: j_symbol *= -1 numerator %= denominator if denominator == 1: return j_symbol return 0
def add_grch37(genomes, ftp_link): """gencode's GRCh37 is filed with a unique system""" # no mouse in genomes yet latest_human = f"GRCh{max([int(g[-2:]) for g in genomes])}" latest_annot = genomes[latest_human]["annotations"][0] release = [r for r in latest_annot.split("/") if "release_" in r][0][-2:] genomes["GRCh37"] = { "annotations": [ f"{ftp_link}/Gencode_human/release_{release}/GRCh37_mapping/" f"gencode.v{release}lift37.annotation.gtf.gz" ], "taxonomy_id": 9606, "species": "Homo sapiens", "text_search": "human", } return genomes
def count_no_alphabets(POS): """Function to calculate no. of variables used in POS expression""" i = 0 no_var = 0 # As expression is standard so total no.of alphabets will be equal to alphabets before first '.' character while (POS[i]!='.'): # checking if character is alphabet if (POS[i].isalpha()): no_var+= 1 i+= 1 return no_var
def slice_string_for_completion(string): """ Split a string in significant parts for use in completion. Example: "University of Paris 13" => "University of Paris 13", "of Paris 13", "Paris 13", "13" This is useful to enable autocompletion starting from any part of a name. If we just use the name directly in the ES completion type, it will only return options that match on the first characters of the whole string, which is not always suitable. """ parts = [part for part in string.split(" ") if part != ""] return [" ".join(parts[index:]) for index, _ in enumerate(parts)]
def crearEncabezado(elNombreCabezado,listaLanzamientos): """ Funcion auxiliar que nos devuelve una lista de nombres de encabezado. @param elNombreEncabezado String @param listaLanzamiento lista Devuelve lista de encabezados """ listaEncabezado=[] nombreEncabezado=elNombreCabezado for i in range(len(listaLanzamientos) +1) : nombreEncabezado= nombreEncabezado + " " + str(i) print (i) listaEncabezado.append(nombreEncabezado) nombreEncabezado= elNombreCabezado return listaEncabezado
def recall(actual, predicted, k=10): """ """ m = len(actual) if m == 0: return -1 actual = set(actual) hit = 0. for i in range(k): p = predicted[i] if p in actual: hit += 1 return hit / m
def compute_figure_score(target, result): """ Compute the score corresponding to the found result, knowing that target was supposed to be found """ if target == result: return 10 elif abs(target - result) == 1: return 8 elif abs(target - result) == 2: return 7 elif abs(target - result) == 3: return 6 elif abs(target - result) == 4: return 5 elif 5 <= abs(target - result) <= 6: return 4 elif 7 <= abs(target - result) <= 8: return 3 elif 9 <= abs(target - result) <= 10: return 2 elif abs(target - result) <= 100: return 1 else: return 0
def construct_left_right_pattern(points, gcd): """ This function build two dictionaries used for finding the next interval when executing the left or right moving operation. Args: points (list of distinct points in the dataset): gcd (the great common divisor in the program): Returns: two dictionary objects """ points = list(points) points.sort() left_end_point = points[0] right_end_point = points[0] + gcd pattern = set() # the pattern's range [x, x+gcd], all other points will fall into this range for item in points: tmp = item while True: if left_end_point <= tmp <= right_end_point: pattern.add(tmp) break if tmp > right_end_point: tmp -= gcd else: tmp += gcd pattern = list(pattern) pattern.sort() intervals = [] for i in range(len(pattern) - 1): item = pattern[i + 1] - pattern[i] intervals.append(item) right_interval_search_dict = dict() pattern = [] for i in range(len(intervals)): pattern.append(str(intervals[i])) if i == len(intervals) - 1: right_interval_search_dict["#".join(pattern)] = intervals[0] else: right_interval_search_dict["#".join(pattern)] = intervals[i + 1] left_interval_search_dict = dict() pattern = [] for i in range(len(intervals) - 1, -1, -1): pattern = [str(intervals[i])] + pattern if i == 0: left_interval_search_dict["#".join(pattern)] = intervals[len(intervals) - 1] else: left_interval_search_dict["#".join(pattern)] = intervals[i - 1] if len(left_interval_search_dict) == 0: left_interval_search_dict["1.0"] = 1 if len(right_interval_search_dict) == 0: right_interval_search_dict["1.0"] = 1 return left_interval_search_dict, right_interval_search_dict
def wbi(b2, b4): """ Water Body Index (Domenech and Mallet, 2014). .. math:: WBI = (b2 - b4) / (b2 + b4) :param b2: Blue. :type b2: numpy.ndarray or float :param b4: Red. :type b4: numpy.ndarray or float :returns WBI: Index value .. Tip:: Domenech, E., Mallet, C. 2014. Change Detection in High \ resolution land use/land cover geodatabases (at object level). \ EuroSDR official publication, 64. """ WBI = (b2 - b4) / (b2 + b4) return WBI
def rayleigh_range(w0, k): """ Computes the rayleigh range, which is the distance along the propagation direction of a beam from the waist to the place where the area of cross section is doubled. Args: w0: waist radius of beam k: wave number in the direction of propagation of beam Returns: rayleigh range """ return k * w0**2
def script_guard(line): """Detect the idiomatic trick C{if __name__ == "__main__":}""" return (len(line) == 5 and line[1][1] == '__name__' # this is the most selective and line[0][1] == 'if' and line[2][1] == '==' and line[4][1] == ':' and line[3][1][1:-1] == '__main__')
def ceil(n: float) -> int: """ calculate the smallest integer >= n :param n: any number :return: the smallest integer >= n """ if n.is_integer(): return int(n) else: return int(n + 1.0)
def _if_unmodified_since_passes(last_modified, if_unmodified_since): """ Test the If-Unmodified-Since comparison as defined in section 3.4 of RFC 7232. """ return last_modified and last_modified <= if_unmodified_since
def mean_percentage_error(y_true, y_pred): """ This function calculates mpe :param y_true: list of real numbers, true values :param y_pred: list of real numbers, predicted values :return: mean percentage error """ # initialize error at 0 error = 0 # loop over all samples in true and predicted list for yt, yp in zip(y_true, y_pred): # calculate percentage error # and add to error error += (yt - yp) / yt # return mean percentage error return error / len(y_true)
def write_save(file, message): """Agrega texto a un archivo nuevo o existente""" with open(file, "a", encoding="utf8") as output: output.write(message+'\n') output.close() return 1
def _uidXform(line): """ Parse a line of the response to a UIDL command. The line from the UIDL response consists of a 1-based message number followed by a unique id. @type line: L{bytes} @param line: A non-initial line from the multi-line response to a UIDL command. @rtype: 2-L{tuple} of (0) L{int}, (1) L{bytes} @return: The 0-based index of the message and the unique identifier for the message. """ index, uid = line.split(None, 1) return int(index) - 1, uid
def lowercaseAbout(about): """Lowercase about values, leaving URLs alone. @param about: A C{unicode} about value. @return: An appropriately lowercased about value. """ if not (about.startswith('http://') or about.startswith('https://')): return about.lower() else: return about
def load_mlp_generateSamples(samples, pos_index, hist_index, config): """ Loads the pre-generate samples in the mlp_call or mlp_call_grad routine by index. Parameters --------- samples: dict Dict of the samples of the current mlp realization pos_index: tuple Index which determines at which position in the MLP algortihm the sample is drawn. It is of fixed length 6 of the form (level, M, n, sum, l,i) where level \in {0,1,...,M-1}, with e.g. level = 0 corresponding to all samples which are drawn in the top recursive level of the MLP algorithm; where M, n, l, i correspond to the sum index in the MLP algorithm; and sum \in {1,2}, where sum = 1 corresponds to the summand where the terminal condition g is involved and sum = 2 corresponds to the summand where the nonlinearity f is involved. hist_index: tuple Index which determines at which recursive path in the MLP algorithm the sample is drawn. It is of variable length and it is constructed as follows: Assume that we want to compute V_{5,5}, then in the first iteration our index is given by (5). In this iteration we call 1x V_{5,4} for each i in {1,...,5} 2x V_{5,3} for each i in {1,...,5**2} 2x V_{5,2} for each i in {1,...,5**3} 2x V_{5,1} for each i in {1,...,5**4} 1x V_{5,0} for each i in {1,...,5**5} in the summand involving f. For example if we call V_{5,3}, then the new history index for this call is given by (5,i,3) if we call it in the left-hand term of the difference. If we would call V_{5,3} as the right-hand term of the difference then the index would be (5,i,-3). The other indicies are constructed in the same procedure. config: array An array of boolean values which determines which samples are needed for the respective sampling method. This array is defined as self.sampleNeeded in each sampling method in the module sampler. Returns ------ The samples in the samples dict to the corresponding index. """ dW = {} if(config[1]): dW[(1,)] = samples[pos_index+(1,)+hist_index] # sample dW if(config[2]): dW[(2,)] = samples[pos_index+(2,)+hist_index] # sample Ikpw if(config[3]): dW[(3,)] = samples[pos_index+(3,)+hist_index] # sample Iwik if(config[4]): dW[(4,)] = samples[pos_index+(4,)+hist_index] # sample Imr if(config[5]): dW[(5,)] = samples[pos_index+(5,)+hist_index] # sample Itilde if(config[6]): dW[(6,)] = samples[pos_index+(6,)+hist_index] # sample Ihat if(config[7]): dW[(7,)] = samples[pos_index+(7,)+hist_index] # sample Xi return dW
def chain(source, *transforms): """Chain a series of generators in intuitive order http://tartley.com/?p=1471 """ args = source for transform in transforms: args = transform(args) return args
def to_camel_case(text): """ Convert string with underscores to camel case. .. note:: Source StackOverflow <https://stackoverflow.com/a/19053800>`_ :param str text: The string to convert :rtype: str """ components = text.split("_") return components[0].lower() + "".join(x.title() for x in components[1:])
def build_position_tex(position): """ Takes a position in the form of JSON schema and produces tex output for that env. """ tex_env = "positionenv" end = '' comments = [] skills = '' if "end" in position: end = position["end"] if "comments" in position: # If we find comments then assume a positionenv, i.e. build the comments # into a list tex_env = "positionlist" comments = position["comments"] if "skills" in position: skills = "; ".join(position["skills"]) position_data = { "tex_env": tex_env, "title": position["title"], "start": position["start"], "end": end, "skills": skills } output = "\\begin{{{tex_env}}}{{{title}}}{{{start}}}{{{end}}}{{{skills}}}" output = output.format(**position_data) for comment in comments: output += ("\n \\item " + comment) output += ("\n\\end{{{tex_env}}}\n\n".format(**position_data)) return output
def get_namespace_from_node(node): """Get the namespace from the given node Args: node (str): name of the node Returns: namespace (str) """ parts = node.rsplit("|", 1)[-1].rsplit(":", 1) return parts[0] if len(parts) > 1 else u":"
def _join_tag_version(tag, version): """ Join a tag (not image tag) and version by prepending the version to the tag with a '-' character. """ return '-'.join((version, tag))
def smaller_starting_year(year, starting_year): """ If the year is older than the defined "starting_year", i.e the year from when we start counting, set it to the "starting_year" This is just internal handling of the code and won't change the data """ if year < starting_year: year = starting_year return year
def s3_url(bucket: str, key: str) -> str: """ Turn a bucket + key into a S3 url """ return '/'.join(["s3:/", bucket, key])
def int_max_value(bits, signed=True): """Returns the maximum int value of a signed or unsigned integer based on used bits. Arguments: bits -- How many bits, e.g., 16 signed -- True if a signed int Returns: max_value -- The maximum int value based on given parameters """ if signed: max_value = pow(2, bits - 1) - 1 else: max_value = pow(2, bits) - 1 return max_value
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y): """ Find control points of the Bezier curve passing through (*c1x*, *c1y*), (*mmx*, *mmy*), and (*c2x*, *c2y*), at parametric values 0, 0.5, and 1. """ cmx = .5 * (4 * mmx - (c1x + c2x)) cmy = .5 * (4 * mmy - (c1y + c2y)) return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def sourceJoin(nick, ident, host): """sourceJoin(nick, ident, host) -> str Join a source previously split by sourceSplit and join it back together inserting the ! and @ appropiately. """ return "%s!%s@%s" % (nick, ident, host)
def hash(text): """Calcualte the hash of a string, using the MD5 (Message Digest version 5) alorithm. Return value: ACSCII encoded hexadecimal number of 32 digits""" import hashlib m = hashlib.md5() m.update(text) hash = m.hexdigest() return hash
def double_two(nums): """ Uses built-in max() method and duplicated list. """ if len(nums) < 2: raise ValueError('Must have at least two values') my_max = max(nums) copy = list(nums) copy.remove(my_max) return (my_max, max(copy))
def convert_spark_datatype_to_presto(spark_datatype: str) -> str: """ Map from https://spark.apache.org/docs/latest/sql-ref-datatypes.html to https://docs.aws.amazon.com/athena/latest/ug/create-table.html """ spark_datatype_lower = spark_datatype.lower() # e.g varchar(100), decimal(10, 4), char(5) if spark_datatype_lower.startswith("varchar") or spark_datatype_lower.startswith("decimal") or spark_datatype_lower.startswith("char"): return spark_datatype_lower.upper() else: return { "boolean": "BOOLEAN", "float": "FLOAT", "double": "DOUBLE", "string": "STRING", "date": "DATE", "timestamp": "TIMESTAMP", "long": "BIGINT", "integer": "INT", "binary": "BINARY", "byte": "TINYINT", "short": "SMALLINT", }.get(spark_datatype_lower, "VARCHAR(65535)")
def least_common(column): """ >>> least_common(["0", "0", "1"]) '1' >>> least_common(["1", "1", "0", "0", "1"]) '0' """ occurs = {} for bit in column: occurs[bit] = occurs.get(bit, 0) + 1 return min(occurs, key=lambda v: occurs[v])
def fmt_f2(val, missing, _trace, _tzinfo): """Simple 2 place formatter.""" if val is None: return missing return "%.2f" % (val, )
def rank_as_string(list1, alphanum_index): """ Convert a ranked list of items into a string of characters based on a given dictionary `alph` of the format that contains the ranked items and a random alphanumeric to represent it. Parameters ---------- list1 : list A list of rankings to be converted to characters alph : dict A dictionary containing the items in list1 as keys and unique characters as the values. """ return ''.join([alphanum_index[l] for l in list1])
def calculation(a,operator,b):#No tratare de tratar errores en esta funcion para asegurar velocidad. """ ----------------------------------------------------- | Funcion que toma 1 numero, un string y 1 numero. | | con esos numeros usa el string para retorna | | una operacion matematica. | | | Example: | | >> calculation(2,'*',2); | | 4 | Return: int or float or None. ------------------------------------------------------- """ if operator=='+': return a+b; elif operator=='-': return a-b; elif operator=='*': return a*b; elif operator=='/': return a/b; else:#No es necesario. return None;
def flatten(mapping, parent_name=""): """ Flattens a mapping tree so that all leaf nodes appears as tuples in a list containing a path and a value, like: >>> flatten({'a': 5, 'b': {'c': 6, 'd': 7}}) [('a', 5), ('b.c', 6), ('b.d', 7)] """ if not mapping: return [] result_list = [] for key, value in mapping.iteritems(): path = parent_name + key # print path if isinstance(value, dict) or hasattr(key, 'iteritems'): result_list.extend( flatten(value, path + '.') ) else: result_list.append( (path, value) ) return result_list
def import_object(name): """Imports an object by name. import_object('x.y.z') is equivalent to 'from x.y import z'. """ parts = name.split('.') obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0) return getattr(obj, parts[-1])
def validate_equalsize(a, b): """Check if two arrays have the same lenght.""" if len(a) != len(b): raise ValueError("Arrays must have the same lenght.") return None
def is_digit_unk(texts_lst): """ texts_lst = ["my", " SS", "N", " is", " 123", "456"] return: [0, 0, 0, 0, 1, 1] """ is_private = [] for tok in texts_lst: if tok.strip().isdigit(): is_private.append(1) elif tok.strip() in ['unk']: is_private.append(1) else: is_private.append(0) return is_private
def get_cell_content(browser, author): """ get novel cells return [cell, cell, cell] """ content = list() try: cells = browser.find_all(class_='t t2') except: return "[block]\n" for cell in cells: if cell.find(class_='r_two').b.string != author: continue for cell_content in cell.find(class_=['tpc_content do_not_catch', 'tpc_content']).strings: content.append(cell_content.strip()) return "\n".join(content)
def ratio_label(s): """A label for a ratio (1.2x, 8.8x) which is encoded from the server as an integer 10x the floating point ratio - e.g. '12', '88' """ return str(float(s) / 10.0);
def escape_html(t): """HTML-escape the text in `t`.""" return (t .replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") .replace("'", "&#39;").replace('"', "&quot;") )
def dxDC_calc(Vdc,Vdc_ref,Ki_DC): """Calculate derivative of xDC""" dxDC = Ki_DC*(Vdc_ref - Vdc) return dxDC
def midpoint_rule(f, M=100000): """Integrate f(x) over [0,1] using M intervals.""" from numpy import sum, linspace dx = 1.0/M # interval length x = linspace(dx/2, 1-dx/2, M) # integration points return dx*sum(f(x))
def not_found(_error): """ The 404 page is built by bracket so we have to link to the dist file it is built in. This is quite vulnerable as a result. See static_site/src/pages.py:NotFoundPage for where this is defined. """ return 'not found', 404
def topological_sort(graph): """ Order the whole graph topologically @param graph graph to bem ordered """ count = { } for node in graph: count[node] = 0 for node in graph: for successor in graph[node]: count[successor] += 1 ready = [ node for node in graph if count[node] == 0 ] result = [ ] while ready: node = ready.pop(-1) result.append(node) for successor in graph[node]: count[successor] -= 1 if count[successor] == 0: ready.append(successor) return result
def try_get_local_path(target): """ Try to get the local path for a tracked object """ try: return target.local except AttributeError: return target
def is_emoji(string, n_emoji, emoji_pattern): """Return True if get_emojis() detects emoji. Also check if number of emoji exceeds threshold, and if there are non-emoji characters. :param string: string to check for emojis :param emoji_pattern: emoji pattern :param emoji_bin: compiled regex pattern """ try: string = string.replace(' ', '') regex_res = emoji_pattern.findall(string) return len(regex_res) == n_emoji and not len(emoji_pattern.sub('', string)) > 0 except (TypeError, AttributeError): return False
def note(name, source=None, contents=None, **kwargs): """ Add content to a document generated using `highstate_doc.render`. This state does not preform any tasks on the host. It only is used in highstate_doc lowstate proccessers to include extra documents. .. code-block:: yaml {{sls}} example note: highstate_doc.note: - name: example note - require_in: - pkg: somepackage - contents: | example `highstate_doc.note` ------------------ This state does not do anything to the system! It is only used by a `proccesser` you can use `requisites` and `order` to move your docs around the rendered file. .. this message appare aboce the `pkg: somepackage` state. - source: salt://{{tpldir}}/also_include_a_file.md {{sls}} extra help: highstate_doc.note: - name: example - order: 0 - source: salt://{{tpldir}}/HELP.md """ comment = "" if source: comment += "include file: {0}\n".format(source) if contents and len(contents) < 200: comment += contents return {"name": name, "result": True, "comment": comment, "changes": {}}
def word_show(vol, guess, store): """ param vol: str, the word from def random_word param guess: str, the letter user guessed param store: str, the string showing correct letters user guessed return: str, answer """ answer = '' if guess == '': for i in vol: answer += '-' else: for j in range(len(vol)): if guess == vol[j]: answer += guess else: answer += store[j] return answer
def format_error_string(stacktrace_str): """Return a formatted exception.""" return '["e", "{}"]'.format(stacktrace_str.replace('"', '""'))
def format_time(time_in_seconds): """ Format seconds into human readable format. Parameters ---------- time_in_seconds : float Returns ------- str Formated time. """ mins, seconds = divmod(int(time_in_seconds), 60) hours, minutes = divmod(mins, 60) if hours > 0: return '{:0>2d}:{:0>2d}:{:0>2d}'.format(hours, minutes, seconds) return '{:0>2d}:{:0>2d}'.format(minutes, seconds)
def filter_outliers(pts_i, pts_j, inliers): """ Filter outliers """ pts_out_i = [] pts_out_j = [] for n, status in enumerate(inliers): if status: pts_out_i.append(pts_i[n]) pts_out_j.append(pts_j[n]) return (pts_out_i, pts_out_j)
def mean (num_list): """ computes the mean of a list. Paramaters ------------- num_list: list List to calculate mean of Returns ------------- mean: float Mean of list of numbers """ list_mean=sum(num_list)/len(num_list) return list_mean
def binarysearch(vec, val): """ind returned satisfies vec[ind-1] <= val < vec[ind]""" nitem = len(vec) if nitem == 0: return 0 Li = 0 Ri = nitem Mi = nitem//2 while True: if vec[Mi] > val: # left search if Mi == (Li+Mi)//2: return Mi Ri = Mi Mi = (Li+Mi)//2 elif vec[Mi] < val: # right search if Mi == (Ri+Mi)//2: return Mi+1 Li = Mi Mi = (Ri+Mi)//2 else: return Mi+1
def representsInt(s): """ Check if s is an integer """ try: int(s) return True except ValueError: return False
def remove_head(alist): """Removes the first item in a list and returns the resulting list""" if alist != []: return alist[1:] else: return alist
def check_postal_code(postal_code): """ :param postal_code: Postal code from Client. :return: Either exception for invalid postal code or return valid postal code. """ if len(str(postal_code)) == 4: return postal_code raise TypeError("Postal code should be of length 4")
def std_float(number, num_decimals=2): """ Print a number to string with n digits after the decimal point (default = 2) """ return "{0:.{1:}f}".format(float(number), num_decimals)
def _hill_system_sort(ion_elements): """Reorder elements to be consistently ordered. Per https://en.wikipedia.org/wiki/Chemical_formula#Hill_system """ if ion_elements['C'] != 0: return ['C', 'H', *sorted(key for key in ion_elements.keys() if key not in ('C', 'H'))] return sorted(key for key in ion_elements.keys())
def split_col_row(ref): """Split the letter and number components of a cell reference. Examples: >>> split_col_row('A1') ('A', 1) >>> split_col_row('B100') ('B', 100) >>> split_col_row('CC12') ('CC', 12) """ head = ref.rstrip("0123456789") tail = ref[len(head) :] return head, int(tail)
def nmiles_to_km(N): """convert nautical miles to km""" N = N * 1.852 return N
def sequence_index_dict(seq, must_be_unique=True): """ Builds a dictionary for each element in seq mapped to its index in the sequence. Parameters ---------- seq : iterable of object must_be_unique : bool, optional Returns ------- dict of object, int Examples -------- >>> libtbx.utils.sequence_index_dict(['a', 'b']) {'a': 0, 'b': 1} """ result = {} for i, elem in enumerate(seq): if must_be_unique: assert elem not in result result[elem] = i return result
def write_steadyst_conv_msg(n, ConvCrit): """Return the convergence status message for writing to file.""" PrintMsg = f"\nSTATUS: CONVERGED SOLUTION OBTAINED AT\ \nITERATIONS={n},\nMAX. ERROR= {ConvCrit}" print(PrintMsg) print() return PrintMsg
def findpower2(num): """find the nearest number that is the power of 2""" if num & (num-1) == 0: return num bin_num = bin(num) origin_bin_num = str(bin_num)[2:] near_power2 = pow(10, len(origin_bin_num)) near_power2 = "0b" + str(near_power2) near_power2 = int(near_power2, base=2) return near_power2
def _getSubWikiHeaders(wikiHeaders,subPageId,mapping=None): """ Function to assist in getting wiki headers of subwikipages """ subPageId = str(subPageId) for i in wikiHeaders: # This is for the first match # If it isnt the actual parent, it will turn the first match into a parent node which will not have a parentId if i['id'] == subPageId: if mapping is None: i.pop("parentId",None) mapping = [i] else: mapping.append(i) elif i.get('parentId') == subPageId: mapping = _getSubWikiHeaders(wikiHeaders,subPageId=i['id'],mapping=mapping) return(mapping)
def GetSettingNames(sensorName): """ Return list of per sensor dictionary keys of selected settings and results. """ return ['enabled', 'k_brightness', 'noise_floor']
def string(node): """ Print out string representation of linked list :param node: value of head node, start of list :return: string: string of linked list, comma delimited """ if node is not None and node.next_node is not None: # first or n-1 element in the linked list return str(node.value) + ", " + string(node.next_node) elif node is not None: # last element in list return str(node.value) return ''