content
stringlengths
42
6.51k
def keyval_list_to_dict(l): """ Converts a list of key=value strings to a dictionary. :param l: the list :return: the dictionary """ stripped = lambda s: s.strip('"').strip("'") d = {} for e in l: keyval = e.split("=", 2) if len(keyval) == 2: d[stripped(keyval[0])] = stripped(keyval[1]) return d
def example_distribution_config(ref): """Return a basic example distribution config for use in tests.""" return { "CallerReference": ref, "Origins": { "Quantity": 1, "Items": [ { "Id": "origin1", "DomainName": "asdf.s3.us-east-1.amazonaws.com", "S3OriginConfig": {"OriginAccessIdentity": ""}, } ], }, "DefaultCacheBehavior": { "TargetOriginId": "origin1", "ViewerProtocolPolicy": "allow-all", "MinTTL": 10, "ForwardedValues": {"QueryString": False, "Cookies": {"Forward": "none"}}, }, "Comment": "an optional comment that's not actually optional", "Enabled": False, }
def str_xor(lhs, rhs): """Calculate xor for binary strings.""" if len(lhs) != len(rhs): raise RuntimeError("XOR strings '%s' and '%s' are not of same length") ret = "" for ii, jj in zip(lhs, rhs): if ii == jj: ret += "0" else: ret += "1" return ret
def get_string_or_none(text=None): """ Only return stripped content of text if text is not None and not empty Parameters ---------- text: str string to parse Returns ------- (str, None): content of text """ if text is not None and len(str(text).strip()) > 0: return str(text).strip() return None
def decodeSurrogatePair(hi, lo): """Returns a scalar value that corresponds to a surrogate pair""" return ((ord(hi) - 0xD800) * 0x400) + (ord(lo) - 0xDC00) + 0x10000
def view_url_to_swagger(url): """ Converts a view URL with `<>` arguments into Swagger-style `{}` arguments. """ return url.replace('<', '{').replace('>', '}')
def KK_RC26(w, Rs, R_values, t_values): """ Kramers-Kronig Function: -RC- Kristian B. Knudsen ([email protected] / [email protected]) """ return ( Rs + (R_values[0] / (1 + w * 1j * t_values[0])) + (R_values[1] / (1 + w * 1j * t_values[1])) + (R_values[2] / (1 + w * 1j * t_values[2])) + (R_values[3] / (1 + w * 1j * t_values[3])) + (R_values[4] / (1 + w * 1j * t_values[4])) + (R_values[5] / (1 + w * 1j * t_values[5])) + (R_values[6] / (1 + w * 1j * t_values[6])) + (R_values[7] / (1 + w * 1j * t_values[7])) + (R_values[8] / (1 + w * 1j * t_values[8])) + (R_values[9] / (1 + w * 1j * t_values[9])) + (R_values[10] / (1 + w * 1j * t_values[10])) + (R_values[11] / (1 + w * 1j * t_values[11])) + (R_values[12] / (1 + w * 1j * t_values[12])) + (R_values[13] / (1 + w * 1j * t_values[13])) + (R_values[14] / (1 + w * 1j * t_values[14])) + (R_values[15] / (1 + w * 1j * t_values[15])) + (R_values[16] / (1 + w * 1j * t_values[16])) + (R_values[17] / (1 + w * 1j * t_values[17])) + (R_values[18] / (1 + w * 1j * t_values[18])) + (R_values[19] / (1 + w * 1j * t_values[19])) + (R_values[20] / (1 + w * 1j * t_values[20])) + (R_values[21] / (1 + w * 1j * t_values[21])) + (R_values[22] / (1 + w * 1j * t_values[22])) + (R_values[23] / (1 + w * 1j * t_values[23])) + (R_values[24] / (1 + w * 1j * t_values[24])) + (R_values[25] / (1 + w * 1j * t_values[25])) )
def _NamesNotIn(names, mapping): """Returns a list of the values in |names| that are not in |mapping|.""" return [name for name in names if name not in mapping]
def level_to_session(level): """ Converts study level to a year of study. Intended for use with the level descriptions that come out of the HE In Year Cohort web report, but applicable to other instances. Parameters ---------- level : str The text version of a level. Should begin with the word 'level'. Returns ------- int The year of study that the level (typically) corresponds to. """ session = "X" if level[:5].upper() == "LEVEL": session = int(level[-1]) - 3 else: session = 1 return session
def count_stations(list_of_stations): """Technical function to culculate the number of active meteostations. It is used to write data in the logfile Arguments: list_of_stations -- list of stations processed in this session """ list_of_ids = [] for item in list_of_stations: list_of_ids.append(item['station_mac']) return len(set(list_of_ids)), len(list_of_ids)
def obs_to_string(observations): """ Convert observations (or states) to strings for transmission to server. Parameters ---------- observations: list of np.arrays [obs_1, ..., obs_n] which corresponds to the original observations (or states) each np.array obs_i has shape (batch_size) + obs_shape_i Returns ------- str_obs: list of strings [str_1, ..., str_n] which corresponds to the encoded observations (or states) """ str_obs = [] for obs in observations: str_obs.append(obs.reshape(-1).tostring()) return str_obs
def extract_configuration_pair(line): """ Extract a configuration pair by splitting on spaces and taking the first couple of values. :param line: a line inside the configuration file :type line: str :return: a key-value pair :rtype: bool """ split = line.split(maxsplit=2) return split[0], split[1]
def rank_results(results_dict, required_fields, rank_order): """First pick out the required fields from the results dict.""" # Choose the required items # Rank them and ensure no duplicates ranked_results = [] for field in rank_order: if field in required_fields: try: field_results = results_dict[field] for person in field_results: if person not in ranked_results: ranked_results.append(person) except KeyError: pass return ranked_results
def findmarkedvariables(str1, startmarker, endmarker, ignorelist=[]): """returns all the variables and locations in str1 marked with a given marker""" variables = [] currentpos = 0 while currentpos >= 0: variable = None currentpos = str1.find(startmarker, currentpos) if currentpos >= 0: startmatch = currentpos currentpos += len(startmarker) if endmarker is None: # handle case without an end marker - use any non-alphanumeric character as the end marker, var must be len > 1 endmatch = currentpos for n in range(currentpos, len(str1)): if not (str1[n].isalnum() or str1[n] == '_'): endmatch = n break if currentpos == endmatch: endmatch = len(str1) if currentpos < endmatch: variable = str1[currentpos:endmatch] currentpos = endmatch elif type(endmarker) == int: # setting endmarker to an int means it is a fixed-length variable string (usually endmarker==1) endmatch = currentpos + endmarker if endmatch > len(str1): break variable = str1[currentpos:endmatch] currentpos = endmatch else: endmatch = str1.find(endmarker, currentpos) if endmatch == -1: break # search backwards in case there's an intervening startmarker (if not it's OK)... start2 = str1.rfind(startmarker, currentpos, endmatch) if start2 != -1: startmatch2 = start2 start2 += len(startmarker) if start2 != currentpos: currentpos = start2 startmatch = startmatch2 variable = str1[currentpos:endmatch] currentpos = endmatch + len(endmarker) if variable is not None and variable not in ignorelist: if not variable or variable.replace("_","").replace(".","").isalnum(): variables.append((startmatch, variable)) return variables
def clean_parenthesized_string(string): """Produce a clipped substring of `string` comprising all characters from the beginning of `string` through the closing paren that matches the first opening paren in `string` Parameters ---------- string: String A string that contains a parenthesized statement in its entirety, along with extra content to be removed. The target parenthesized statement may contain additional parentheses Returns ------- clean_string: String A substring of `string`, extending from the beginning of `string`, through the closing paren that matches the first opening paren found, producing a valid parenthesized statement""" close_paren = 0 for i in range(len(string)): if string[i] == "(": close_paren += 1 elif string[i] == ")": if close_paren > 1: close_paren -= 1 else: return string[: i + 1] raise ValueError(f'Need closing paren:"""\n{string}\n"""\nRemaining close_paren: {close_paren}')
def path_decoder(url): """Grab the last component of a url as the path.""" components = url.split('/') if components[-1]: return components[-1] else: return components[-2]
def inject_class(value, class_name): """Given raw html, attach the provided class to the first element. This method assumes a class does not already exist on the element. """ desired_index = 0 svg_lines = value.split("\n") for index, line in enumerate(svg_lines): if "<svg" in line: desired_index = index break line = svg_lines[desired_index] line = "<svg class=\"" + class_name + "\" " + line[5:] svg_lines[desired_index] = line value = "\n".join(svg_lines) return value
def normalize(string): """Normalize numbers in URL query.""" if string.isnumeric(): return int(string) try: return float(string) except ValueError: return string
def flatten(sequence): """Flatten events in sequence elements to list of events""" return [event for element in sequence for event in element]
def get_bleeding_limbs(life): """Returns list of bleeding limbs.""" _bleeding = [] for limb in life['body']: if life['body'][limb]['bleeding']: _bleeding.append(limb) return _bleeding
def dectohex2(dec_str): """Alternative implementation using BIF""" dec = int(dec_str) return hex(dec)
def king(r, rc, rt, sigma_0, alpha=2): """ See http://iopscience.iop.org/1538-3881/139/6/2097/fulltext/ Parameters ---------- r: float radius rc: float core radius rt: float truncation radius sigma_0: float central density """ def z(x): return 1/(1+(x/rc)**2)**(1./alpha) term1 = (1 - z(rt))**-alpha term2 = (z(r) - z(rt))**alpha sigma = sigma_0 * term1 * term2 return sigma
def right_bisect(sorted_list, element): """ Find an element in a list (from the right) """ idxLeft = 0 idxRight = len(sorted_list) - 1 while idxLeft < idxRight: idxMiddle = (idxLeft + idxRight) // 2 middle = sorted_list[idxMiddle] if middle <= element: idxLeft = idxMiddle + 1 elif middle > element: idxRight = idxMiddle return idxRight
def get_polaris_version(self): """Retrieve deployment version from Polaris Returns: str: Polaris deployment version Raises: RequestException: If the query to Polaris returned an error """ try: query_name = "core_polaris_version" try: response = self._query(query_name, None) except Exception as e: return "Failed to retrieve Polaris Version" return response except Exception: raise
def convert_list_to_string(list_of_lists): """ Convert a list of lists of strings into a list of string This is a placeholder to use for firing patterns until I recode and rerun sims to use strings rather than lists of strings. This is because lists as data elements don't play nice with pandas. You can't do df['column'] == ['a','b'] as it thinks you're equality checking the entire Series rather than element by element as something like df['column'] == 'a, b' would do. """ def convert(list_of_strs): if isinstance(list_of_strs, list): return ', '.join(list_of_strs) else: # E.g. for nans return list_of_strs return [convert(list_of_strs) for list_of_strs in list_of_lists]
def list_math_substraction_number(a, b): """! @brief Calculates subtraction between list and number. @details Each element from list 'a' is subtracted by number 'b'. @param[in] a (list): List of elements that supports mathematical subtraction. @param[in] b (list): Value that supports mathematical subtraction. @return (list) Results of subtraction between list and number. """ return [a[i] - b for i in range(len(a))]
def build_output_type_set(output_type_list, config): """Builds set of output types. Args: output_type_list: list, possible output segmentation coord types. config: dict, user passed parameters. Returns: Set of requested output segmenation coord types. """ output_types = set() for output_type in output_type_list: if config["output_{}".format(output_type)]: output_types.add(output_type) return output_types
def to_dict(dictish): """ Given something that closely resembles a dictionary, we attempt to coerce it into a propery dictionary. """ if hasattr(dictish, "iterkeys"): m = dictish.iterkeys elif hasattr(dictish, "keys"): m = dictish.keys else: raise ValueError(dictish) return dict((k, dictish[k]) for k in m())
def create_look_up_table(vocab_list): """Create tables for encoding and decoding texts.""" vocab2int = {word: i for i, word in enumerate(vocab_list)} int2vocab = vocab_list return vocab2int, int2vocab
def flat_clfs(params) -> dict: """Flatten the classifiers for easier access.""" flat = {} for clfs in params["classifiers"].values(): flat.update(clfs) return flat
def _to_gj_multipoint(data): """ Dump a EsriJSON-like MultiPoint object to GeoJSON-dict. Input parameters and return value are the MULTIPOINT equivalent to :func:`_to_gj_point`. :returns: `dict` """ return {'type': 'Multipoint', 'coordinates': [pt for pt in data['points']]}
def selection(input_list): """ Sort a given list using selection sort algorithm and return a that list with the values ascending. """ input_list_length = len(input_list) for i in range(input_list_length): current_smallest_index = i try: for j in range(i, input_list_length): if input_list[j] < input_list[current_smallest_index]: current_smallest_index = j if current_smallest_index != i: input_list[current_smallest_index], input_list[i] = \ input_list[i], input_list[current_smallest_index] except TypeError: print('All data must be same type i.e. int, str, etc...') return None
def _prepend_argument(argument: str, to_prepend: str): """ Prepend as string to an argument :param argument: :param to_prepend: :return: """ return '--{0}-{1}'.format(to_prepend, argument[2:]) if argument[:2] == '--' \ else '-{0}-{1}'.format(to_prepend, argument[1:])
def C3(cls, *mro_lists): """Implementation of the Python's C3 Algorithm. Notes: * The order of items in an MRO should be preserved in all of its future subclasses """ import itertools # Make a copy so we don't change existing content mro_lists = [list(mro_list[:]) for mro_list in mro_lists] # Set up the new MRO with the class itself mro = [cls] # The real algorithm goes here while True: # Reset for the next round of tests candidate_found = False for mro_list in mro_lists: if not len(mro_list): # Any empty lists are of no use to the algorithm continue # Get the first item as a potential candidate for the MRO candidate = mro_list[0] if candidate_found: # Candidates promoted to the MRO are no longer of use if candidate in mro: mro_list.pop(0) # Don't bother checking any more candidates if one was found continue # See if it's in any position other than fist in any of the other lists if candidate in itertools.chain(*(x[1:] for x in mro_lists)): # Isn't a valid candidate yet and we need to move on to the first class # in the next list continue else: # The candidate is valid and should be promoted to the MRO mro.append(candidate) mro_list.pop(0) candidate_found = True if not sum(len(mro_list) for mro_list in mro_lists): # There are no MROs to cycle through, so we're all done break if not candidate_found: # No valid candidate was available, so we have to bail out raise TypeError("Inconsistent MRO") return tuple(mro)
def isnumeric(inp): """Check a string is numeric.""" try: float(inp) return True except ValueError: return False
def arch_to_src_arch(arch): """ These conversion are based on Linux' main makefile. """ if arch == 'i386': return 'x86' if arch == 'x86_64': return 'x86' if arch == 'sparc32': return 'sparc' if arch == 'sparc64': return 'sparc' if arch == 'sh64': return 'sh' if arch == 'tilepro': return 'tile' if arch == 'tilegx': return 'tile' return arch
def match_file(patterns, file): """ Matches the file to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *file* (:class:`str`) is the normalized file path to be matched against *patterns*. Returns :data:`True` if *file* matched; otherwise, :data:`False`. """ matched = False for pattern in patterns: if pattern.include is not None: if file in pattern.match((file,)): matched = pattern.include return matched
def is_object(obj): """ A json object will be an object if it has a: - id - name (or label) *Remember, we're assuming the category from the endpoint used. If a json object contains keys that contain all three fields, then it will be chosen. We also want to extract these values (as well as others, e.g. a description if possible). If there are multiple keys that contain the term "name", then we will take the shortest match. E.g. between keys: - "chebi_name" - "iupac_names" we will take "chebi_name". """ if isinstance(obj, dict): is_identified = any('id' in k.lower() for k in obj.keys()) is_named = any('name' in k.lower() for k in obj.keys()) return is_identified and is_named else: return False
def remove_comma(in_str): """ Remove comma from given string """ return str(in_str).replace(",", " ").replace(" ", " ")
def get_annotation_of_violation(annotations, violation): """ Returns the annotation this violation violates. :param annotations: List of annotations. :param violation: The violation that violates the searched annotation. :return: The violated annotation. """ for annotation in annotations: if violation in annotation.violations: return annotation
def define_regcontrol_object(regcontrol_name, action_type, regcontrol_info_series, general_property_list=None): """This function is used to create a command string to define an opendss regcontrol object at a given bus. A transformer should already exist at this bus. Regulator control will be placed on this transformer. Parameters ---------- regcontrol_name action_type regcontrol_info_series general_property_list Returns ------- """ # transformer should exist at this bus. Reg control should not exist on the transformer command_string = f"{action_type} RegControl.{regcontrol_name}" if action_type == "New": command_string = command_string + f" transformer={regcontrol_info_series['transformer']}" # these properties contain regcontrol data (refer OpenDSS manual for more information on these parameters) if general_property_list is None: general_property_list = ['winding', 'ptratio', 'band', 'vreg', 'delay'] for property_name in general_property_list: temp_s = f" {property_name}={regcontrol_info_series[property_name]}" command_string = command_string + temp_s command_string = command_string + " enabled=True" return command_string
def convert_file_timestamp(file_time: float, utc=False): """Takes a given file timestamp and converts it to a human-readable format. Examples: >>> import os\n >>> os.stat('.').st_mtime\n 1635892055.433207 >>> mtime = os.stat('.').st_mtime\n >>> convert_file_timestamp(mtime)\n '2021-11-02 15:27:35.433207' >>> convert_file_timestamp(mtime, utc=True)\n '2021-11-02 22:27:35.433207+00:00' References: https://stackoverflow.com/questions/39359245/from-stat-st-mtime-to-datetime\n https://stackoverflow.com/questions/1111317/how-do-i-print-a-datetime-in-the-local-timezone\n Args: file_time (float): Reference a file timestamp. utc (bool, optional): Set utc=True to get the returned timestamp in UTC. Defaults to False. Returns: str: Returns a string version of the timestamp. """ from datetime import datetime, timezone if utc: dt_obj = datetime.fromtimestamp(file_time, tz=timezone.utc) else: dt_obj = datetime.fromtimestamp(file_time) readable_time = str(dt_obj) return readable_time
def get_surrounding_text(text, sub_start, sub_end=None, distance=25): """ Looks for the substrings, 'sub_start' and 'sub_end' variables in 'text' and return a new substring with a number equal to 'distance' characters in the left of 'sub_start' position and in the right of 'sub_end' position if they exist, if one of them are not found the partition of the string will be until the start or until the end of the text variable. :param text: a string. The string to look for. :param sub_start: a string. The start to get the left and the right parts of the new subtring. :param sub_end: an optional string. The start to get the right part of the new substring. :param distance: an optional integer. The length of the left and right parts of the new string from sub_start and sub_end if they are found. If the substrings are not found, the distance will be to the start of the text and the end of the text. :return: a string. A new substring with the characters around the 'sub_start' and 'sub-end' substrings. """ surrounding_text = "" separators = ['.', '\n', ','] i_start = text.lower().find(sub_start.lower()) i_left = max(0, i_start - distance) i_separators = [text[i_left:i_start].find(separator) for separator in separators] i_separators = [i + i_left + 1 for i in i_separators if i >= 0] if i_separators: i_left = max(i_left, *i_separators) if sub_end: i_end = text.lower().find(sub_end.lower()) else: i_end = -1 sub_end = sub_start if i_end < 0: i_end = i_start + len(sub_start) i_right = len(text) else: i_end = i_end + len(sub_end) i_right= min(len(text), i_end + distance) i_separators = [text[i_end:i_right].find(separator) for separator in separators] i_separators = [i + i_end for i in i_separators if i >= 0] if i_separators: i_right = min(i_right, *i_separators) surrounding_text = text[i_left:i_right].strip() return surrounding_text
def to_user_facing_code(code): """Returns a user-facing code given a raw code (e.g., abcdefghij).""" return '{}-{}-{}'.format(code[:3], code[3:6], code[6:])
def strip_nones(row): """ Remove all items with None for a value, because why include it if there's no value? """ return dict([(k,v) for k, v in row.items() if v is not None])
def concatenate_replacements(text, replacements): """Applies a rewrite to some text and returns a span to be replaced. Args: text: Text to be rewritten. replacements: An iterable of (new_text, start of replacement, end) Returns: A new replacement. """ joined = [] first_start = None last_end = None for rewritten, start, end in replacements: if start > end: raise ValueError( 'Rewrites have invalid spans: start=%r > end=%r' % (start, end)) if first_start is None: first_start = start if last_end is not None: joined.append(text[last_end:start]) if last_end is not None and last_end > start: raise ValueError( 'Rewrites overlap: end > next start: ' '{last_end} > {start}. ' '(Was about to write: text[{start}:{end}] (== {old!r}) <- {new!r})' .format( start=start, end=end, last_end=last_end, old=text[start:end], new=rewritten)) joined.append(rewritten) last_end = end return ''.join(joined), first_start or 0, last_end or 0
def get_collection_id(collection): """Return id attribute of the object if it is collection, otherwise return given value.""" return collection.id if type(collection).__name__ == "Collection" else collection
def add_config(cmd, config): """ Adds config information to cmd either by appending to an existing --config/-C argument or appending to end of command. cmd (list of strings): passed snakemake commands. config (list of strings): additional values to add. """ config_flag = list(set(["--config", "-C"]).intersection(set(cmd))) if config_flag: # append to end of config arg if passed idx = max([cmd.index(c) for c in config_flag]) # last occurance return cmd[: idx + 1] + config + cmd[idx + 1: ] else: # add config to end if arg not already used return cmd + ["--config"] + config
def bool_to_returncode(success): """Return 0 if |success|. Otherwise return 1.""" if success: print('Success.') return 0 print('Failed.') return 1
def solution(n): """Returns the difference between the sum of the squares of the first n natural numbers and the square of the sum. >>> solution(10) 2640 >>> solution(15) 13160 >>> solution(20) 41230 >>> solution(50) 1582700 """ suma = 0 sumb = 0 for i in range(1, n + 1): suma += i ** 2 sumb += i sum = sumb ** 2 - suma return sum
def get_docker_host_mount_location(cluster_name: str) -> str: """Return host path that Docker mounts attach to.""" docker_mount_prefix = "/tmp/cloudtik_tmp_mount/{cluster_name}" return docker_mount_prefix.format(cluster_name=cluster_name)
def caseinsensitive_sort(stringList): """case-insensitive string comparison sort doesn't do locale-specific compare though that would be a nice addition usage: stringList = caseinsensitive_sort(stringList)""" tupleList = [(x.lower(), x) for x in stringList] tupleList.sort() return [x[1] for x in tupleList]
def hex_to_rgb(h): """returns rgb as int 0-255""" r, g, b = tuple(int(h.lstrip('#')[2*i:2*i+2], 16) for i in range(3)) return r, g, b
def au_to_m(au): """ Converts the input distance (or velocity) of the input from atronomical units to meters. """ return au * 1.495978707 * 10**11
def isFloat(val): """Returns true if the passed value is a float value, otherwise false. NOTE: Returns true only if the value is a float type. See isNum() for a less restrictive test. **Parameters:** * val - value to test **Returns:** True if the passed value is a float, otherwise false.""" return type(val) is float
def _merge_on_last_modified(l1, l2): """ Takes two iterables l1, l2 of objects with a `last_modified` attribute. Assumes the iterables are sorted (desc. order) on this attribute, and merges them into a single sorted list """ il1 = iter(l1) il2 = iter(l2) out = [] try: next_l1 = next(il1) except StopIteration: next_l1 = None try: next_l2 = next(il2) except StopIteration: next_l2 = None # Merge while both lists have elements while next_l1 is not None and next_l2 is not None: if next_l1.last_modified > next_l2.last_modified: out.append(next_l1) try: next_l1 = next(il1) except StopIteration: next_l1 = None else: out.append(next_l2) try: next_l2 = next(il2) except StopIteration: next_l2 = None if next_l1 is not None: out.append(next_l1) for elt in il1: out.append(elt) if next_l2 is not None: out.append(next_l2) for elt in il2: out.append(elt) return out
def udfize_lambda_string(expression: str): """Given an expression that uses 'input' as a parameter, return a lambda as a string.""" return "lambda input: ({})".format(expression)
def isruddynumeric(val): """ Checks if a value is numeric even if it is a floating point number. Parameters: val(str) Returns: bool """ try: float(val) return True except ValueError: return False
def _mounts_to_in_dict(mounts): """Convert docker-style mounts (external_dir):{docker_dir} into dictionary of external to docker. """ out = {} for m in mounts: external, docker = m.split(":") out[external] = docker return out
def check_not_finished_board(board: list): """ Check if skyscraper board is not finished, i.e., '?' present on the game board. Return True if finished, False otherwise. >>> check_not_finished_board(['***21**', '4?????*', '4?????*', '*?????5', \ '*?????*', '*?????*', '*2*1***']) False >>> check_not_finished_board(['***21**', '412453*', '423145*', '*543215', \ '*35214*', '*41532*', '*2*1***']) True >>> check_not_finished_board(['***21**', '412453*', '423145*', '*5?3215', \ '*35214*', '*41532*', '*2*1***']) False """ board = board[1:-1] for i in range(len(board)): board[i] = board[i].strip('*') for i in range(len(board)): try: board[i] = int(board[i]) except ValueError: return False return True
def tobool(value): """Convert value to boolean or Not a Number if not possible""" from numpy import nan if value is None: value = nan else: value = bool(value) return value
def capitalize_title(title): """Convert the first letter of each word in the title to uppercase if needed. :param title: str - title string that needs title casing. :return: str - title string in title case (first letters capitalized). """ return title.title()
def bech32_hrp_expand(hrp): """Expand the HRP into values for checksum computation.""" return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
def proba_fail(origin, next_ids, graph): """ Return the probability that the origin should be predicted before all the next_ids (supposing independency)""" prod = 1 for next_id in next_ids: if next_id in graph[origin]: prod *= graph[origin][next_id] else: prod *= 1-graph[next_id][origin] return prod
def ts_and_fract_to_float(ts_int,ts_fract): """ takes integral part and a fraction and converts to float :param ts: :return: """ return ts_int + (ts_fract / 1000.0)
def menus(): """ :return: [(action, menu_name)] """ return [ ('cancel_label', 'Cancel the label') ]
def htrc_get_titles(metadata, vol_id): """ Gets titles of the volume given the metadata from a json file and volume id. """ try: md = metadata[vol_id] return md[list(md.keys())[0]]['titles'] except KeyError: print('Volume ID not found:', vol_id) raise
def flag_name(status): """ Determine the name for a flag file of the status indicated. :param str status: Name of status for which to create flag file name. :return str: Name of flag file corresponding to given status. """ return status + ".flag"
def top_row(matrix): """ Return the first (top) row of a matrix. Returns a tuple (immutable). """ return tuple(matrix[0])
def automatic_nch(f_min, f_max, spacing): """How many channels are available in the spectrum :param f_min Lowest frequenecy [Hz] :param f_max Highest frequency [Hz] :param spacing Channel width [Hz] :return Number of uniform channels >>> automatic_nch(191.325e12, 196.125e12, 50e9) 96 >>> automatic_nch(193.475e12, 193.525e12, 50e9) 1 """ return int((f_max - f_min) // spacing)
def extract_txt(head): """ dummy formatting function """ info = { 'data': { 'head': head, # retain tail for backwards compatibility with client 'tail': [] } } return '', info
def generate_z_array(string): """ This methods takes and string as input and returns Z-array for same :param string: :return: """ n = len(string) z_array = [0] * n left_index, right_index = 0, 0 # [L, R] index for i in range(1, n): if i > right_index: left_index, right_index = i, i while right_index < n and string[right_index-left_index] == string[right_index]: right_index += 1 z_array[i] = right_index - left_index right_index -= 1 else: k = i - left_index if z_array[k] < right_index - i + 1: z_array[i] = z_array[k] else: left_index = i while right_index < n and string[right_index - left_index] == string[right_index]: right_index += 1 z_array[i] = right_index - left_index right_index -= 1 return z_array
def _primary_artist(artist): """ Utitlity function that tries to only get the main artist of a song. Example: "Tyler featuring Xyz" would just return "tyler" """ artist = artist.casefold() artist = artist.split('featuring')[0].strip() artist = artist.split('/')[0].strip() artist = artist.split(' x ')[0].strip() return artist
def skip_while(index, max_index, skipping_condition): """Increments |index| until |skipping_condition|(|index|) is False. Returns: A pair of an integer indicating a line number after skipped, and a boolean value which is True if found a line which skipping_condition is False for. """ while skipping_condition(index): index += 1 if index >= max_index: return index, False return index, True
def parse_cstimer_text(text: str) -> list: """ Parses csTimer text. """ return [parse_time(line.split()[1]) for line in text.split("\n")[4:]]
def knot_hash(string): """Calculates the knot hash""" lengths = [ord(x) for x in string] lengths.extend([17, 31, 73, 47, 23]) rope = [x for x in range(0, 256)] rope_length = len(rope) current_position = skip_size = 0 for _ in range(64): for length in lengths: sub_list = [] for i in range(length): sub_list.append(rope[(current_position + i) % rope_length]) sub_list.reverse() for i in range(length): rope[(current_position + i) % rope_length] = sub_list[i] current_position += (length + skip_size) % rope_length skip_size += 1 xor_hash = [] for i in range(16): xor = 0 for j in range(16): xor ^= rope[i * 16 + j] xor_hash.append(xor) result = "" for xor in xor_hash: result += format(xor, '02x') return result
def calculate_value(mask, value): """ Calculates value with mask applied. :param mask: mask string :param value: int value :return: int value """ # get binary value as string bin_value = '{0:036b}'.format(value) # replace all values except X for i in range(len(mask)): if mask[i] != 'X': bin_value = bin_value[:i] + mask[i] + bin_value[i+1:] return int(str(bin_value), 2)
def div_round_up(n, d): """ ceil(n / d) """ return int((n + d - 1) / d)
def generate_slices( example, slice_functions, **kwargs): """Returns (slice_key, example) tuples based on provided slice functions. Args: example: An input example. slice_functions: An iterable of functions each of which takes as input an example (and zero or more kwargs) and returns a list of slice keys. **kwargs: Keyword arguments to pass to each of the slice_functions. Returns: A list containing a (slice_key, example) tuple for each slice_key that the slice_functions return. """ slice_keys = set() for slice_function in slice_functions: try: slice_keys.update(slice_function(example, **kwargs)) except Exception as e: raise ValueError('One of the slice_functions %s raised an exception: %s.' % (slice_function.__name__, repr(e))) return [(slice_key, example) for slice_key in slice_keys]
def get_text(prompt="", msg=None, input=input): """Prompt user to enter multiple lines of text.""" print((msg or "Enter text.") + " End with ^D or a '.' as first character.") lines = [] while True: try: line = input(prompt) except EOFError: break if line == ".": # dot on a line by itself also ends break lines.append(line) return "\n".join(lines)
def create_slices(start, stop, step=None, length=1): """ Generate slices of time indexes Parameters ---------- start : int Index where first slice should start. stop : int Index where last slice should maximally end. length : int Number of time sample included in a given slice. step: int | None Number of time samples separating two slices. If step = None, step = length. Returns ------- slices : list List of slice objects. """ # default parameters if step is None: step = length # slicing slices = [slice(t, t + length, 1) for t in range(start, stop - length + 1, step)] return slices
def remove_prefix(str, prefix): """Removes a prefix from a string. Args: str (str): Any string to be separated from a prefix. prefix (str): Part to be stripped off the front. Returns: str: String with the prefix removed. If the string doesn't start with the specified prefix, this is a no-op. """ if str.startswith(prefix): return str[len(prefix) :] return str
def navbar_toggle(clicks, is_open): """Handle the collapsible function of the navbar Parameters ---------- clicks: int How many times the toggler has been clicked is_open: bool State of the collapsible unit Returns ---------- is_open: bool Oppositie of inputed state """ if clicks: return not is_open return is_open
def _jupyter_server_extension_paths(): """Register julynter server extension""" return [{ "module": "julynter" }]
def non_handling(sentence, mainword, **kwargs): #Done with testing """Delete any word that starts with 'non' or delete any word that comes immediately after the standalone word 'non'. Prevents the term search from making mistakes on words like noncalcified, nontuberculous, noninfectious, etc.""" if 'non' not in sentence: return False, sentence else: sentlist = sentence.split() if ' non ' in sentence: #i.e., standalone word ' non ' idx = sentlist.index('non') return True, ' '+' '.join(sentlist[0:idx]+sentlist[idx+2:])+' ' else: #non is prefixing another word for word in sentlist: if 'non' in word: sentlist.remove(word) return True, ' '+' '.join(sentlist)+' '
def index(List, i): """Get list element via index.""" return List[int(i)]
def is_dataframe_like(df): """ Looks like a Pandas DataFrame """ typ = type(df) return ( all(hasattr(typ, name) for name in ("groupby", "head", "merge", "mean")) and all(hasattr(df, name) for name in ("dtypes", "columns")) and not any(hasattr(typ, name) for name in ("name", "dtype")) )
def base36encode(number: int) -> str: """Convert from Base10 to Base36.""" # Taken from https://en.wikipedia.org/wiki/Base36#Python_implementation chars = "0123456789abcdefghijklmnopqrstuvwxyz" sign = "-" if number < 0 else "" number = abs(number) result = "" while number > 0: number, remainder = divmod(number, 36) result = chars[remainder] + result return sign + result
def combine_dicts(dictionary, extras): """ Similar to {**dictionary, **extras} in Python 3 Args: dictionary (dict): A dictionary extras (dict): Another dictionary Returns: dict: A new dictionary with both key and value pairs """ ret = dict(dictionary) ret.update(extras) return ret
def mean(target_value_list): """Calculate the average.""" average = sum(target_value_list) / len(target_value_list) return average
def escapeToXml(text, isattrib = 0): """ Escape text to proper XML form, per section 2.3 in the XML specification. @type text: C{str} @param text: Text to escape @type isattrib: C{bool} @param isattrib: Triggers escaping of characters necessary for use as attribute values """ text = text.replace("&", "&amp;") text = text.replace("<", "&lt;") text = text.replace(">", "&gt;") if isattrib == 1: text = text.replace("'", "&apos;") text = text.replace("\"", "&quot;") return text
def set_size(width, fraction=1, subplots=(1, 1)): """Set figure dimensions to avoid scaling in LaTeX. Parameters ---------- width: float or string Document width in points, or string of predined document type fraction: float, optional Fraction of the width which you wish the figure to occupy subplots: array-like, optional The number of rows and columns of subplots. Returns ------- fig_dim: tuple Dimensions of figure in inches """ if width == "thesis": width_pt = 426.79135 elif width == "beamer": width_pt = 307.28987 else: width_pt = width # Width of figure (in pts) fig_width_pt = width_pt * fraction # Convert from pt to inches inches_per_pt = 1 / 72.27 # Golden ratio to set aesthetic figure height # https://disq.us/p/2940ij3 golden_ratio = (5 ** 0.5 - 1) / 2 # Figure width in inches fig_width_in = fig_width_pt * inches_per_pt # Figure height in inches fig_height_in = fig_width_in * golden_ratio * (subplots[0] / subplots[1]) return (fig_width_in, fig_height_in)
def cdr(pair): """return the first element of a pair""" def createArray(a,b): array = [] array.append(a) array.append(b) return array #perform closure array = pair(createArray) return array[1]
def gen_output(matrix): """ Converts a 4x4 matrix into a 16-byte array represented as integers. """ return [int(b) for b in bytes(sum(matrix, []))]
def _check(isdsAppliance, serverID, action): """ Check if serverID one of these acceptable values: directoryserver directoryadminserver directorywat directoryintegrator directoryintegratorscimtarget scimservice Note: only directoryserver supports "startconfig" action """ if serverID == 'directoryserver': if action == 'startconfig': return True return True elif serverID == 'directoryadminserver': if action == 'startconfig': return False return True elif serverID == 'directorywat': if action == 'startconfig': return False return True elif serverID == 'directoryintegrator': if action == 'startconfig': return False return True elif serverID == 'directoryintegratorscimtarget': if action == 'startconfig': return False return True elif serverID == 'scimservice': if action == 'startconfig': return False return True else: return False
def tokenize(str_:str, sep=[' ', '\n'], by=["([{", ")]}", "$`'\""], start=0, strip='', keep_empty=True): """ Split the string `str_` by elements in `sep`, but keep enclosed objects not split. Example: ---------- >>> tokenize("function(something inside), something else. ") ["function(something inside),", "something", "else.", ""] """ if isinstance(sep, str): sep = [sep] if len(by) == 3: left, right, both = by elif len(by) == 2: left, right = by; both = "" elif len(by) == 1: left = ""; right = ""; both = by[0] else: raise TypeError("Invalid argument `by` for function `tokenize`. ") depth = {'all': 0} tokens = [] p = start for i in range(start, len(str_)): s = str_[i] both_done = False if s in right: if depth.get(s, 0) == 0: break assert depth[s] > 0 and depth['all'] > 0 depth[s] -= 1 depth['all'] -= 1 elif s in both and str_[i-1] != '\\': depth.setdefault(s, 0) if depth[s] > 0: depth[s] -= 1 depth['all'] -= 1 both_done = True if depth['all'] == 0: for x in sep: if str_[i:i + len(x)] == x: t = str_[p:i].strip(strip) if keep_empty or t != '': tokens.append(t) p = i + len(x) if s in left: r = right[left.index(s)] depth.setdefault(r, 0) depth[r] += 1 depth['all'] += 1 elif both_done: pass elif s in both and str_[i-1] != '\\': depth.setdefault(s, 0) if depth[s] == 0: depth[s] += 1 depth['all'] += 1 t = str_[p:].strip(strip) if keep_empty or t != '': tokens.append(t) return tokens
def bundle_or_dylib_filter(module): """ Return False if the module does not have a filetype attribute corresponding to a Mach-O bundle or dylib """ return getattr(module, 'filetype', None) in ('bundle', 'dylib')
def merge(L1: list, L2: list) -> list: """Merge sorted lists L1 and L2 into a new list and return that new list. >>> merge([1, 3, 4, 6], [1, 2, 5, 7]) [1, 1, 2, 3, 4, 5, 6, 7] """ newL = [] i1 = 0 i2 = 0 # For each pair of items L1[i1] and L2[i2], copy the smaller into newL. while not (i1 == len(L1) and i2 == len(L2)): if i1 != len(L1) or (i2 != len(L2) and L1[i1]<=L2[i2]): newL.append(L1[i1]) i1 += 1 else: newL.append(L2[i2]) i2 += 1 # Gather any leftover items from the two sections. # Note that one of them will be empty because of the loop condition. return newL
def polyreduce(a, root): """ Given x = r is a root of n'th degree polynomial p(x) = (x-r)q(x), divide p(x) by linear factor (x-r) using the same algorithm as polynomial evaluation. Then, return the (n-1)'th degree quotient q(x) = polyreduce(a, r) = c[0] + c[1]x + c[2]x^2 +...+ c[n-2]x^{n-2} + c[n-1]x^{n-1} """ c, p = [], 0 a.reverse() for coef in a: p = p * root + coef c.append(p) a.reverse() c.reverse() return c[1:]
def transform_objects(objects): """Transform objects.""" obj_list = [] for i, obj in objects.items(): data = dict(obj, instance_id=i) if data['destroyed'] is None: data['destroyed_x'] = None data['destroyed_y'] = None obj_list.append(data) return obj_list