content
stringlengths
42
6.51k
def calc_fs_usage(fs_info): """Return the used filesystem space in bytes. Reserved blocks are treated as used blocks because the primary goal of this usage metric is to indicate whether the container has to be resized. """ if not fs_info: return 0 blk_cnt = int(fs_info['block count']) free_blk_cnt = int(fs_info['free blocks']) blk_size = int(fs_info['block size']) return (blk_cnt - free_blk_cnt) * blk_size
def get_plant_health(count_dict): """With the ratio, return a number for the plant's pic to be added into hmtl""" #larger the num, the health level goes up/down pos_ratio = count_dict['pRatio'] neg_ratio = count_dict['nRatio'] if pos_ratio > neg_ratio: if pos_ratio < 0.3: num= 1 elif pos_ratio < 0.5: num= 2 elif pos_ratio < 0.8: num= 3 else: num= 4 return f"/static/images/happyplant{num}.PNG" else: if neg_ratio < 0.3: num= 1 elif neg_ratio < 0.5: num= 2 elif neg_ratio < 0.8: num= 3 else: num= 4 return f"/static/images/sadplant{num}.PNG"
def calc_crc(data): """Calculate frame CRC.""" crc = 0x2d for b in data: crc += b return crc & 0xff
def _get_json_entry(json_object, attribute_name, caller): """ :param json_object: dict[str, xx] :param attribute_name: str, attribute name :param caller: str, name of the caller function :return: """ if json_object.__contains__(attribute_name): return json_object[attribute_name] raise Exception('Cannot find attribute {} in {}.'.format(attribute_name, caller))
def is_dir(f): """ Utility function to test if something is a directory. """ return f['mimeType'] == 'application/vnd.google-apps.folder'
def get_missing_param_response(data=None): """ Returns error response for missing parameters :param str data: message :return: response :rtype: object """ return {"status": 400, "message": "Missing query parameter.", "data": data}
def normalize_element(elem): """ Return a surface form of the given question element. the output should be properly able to precede a predicate (or blank otherwise) """ return elem.replace("_", " ") \ if (elem != "_")\ else ""
def epc_calc_dual_phase(reg_dict): """ Returns True if dual phase mode is enabled, False otherwise Parameters ---------- reg_dict : dict The dictionary that contains all the register information Returns ---------- bool True if dual phase enabled, false otherwise """ dual_phase = (reg_dict["mod_sel"][2] == 0) and ( reg_dict["num_dcs"][2] == 1) and (reg_dict["pixel_mode"][2] == 1) and (reg_dict["dual_int_mode"][2] == 1) return dual_phase
def get_success_message(action, task_uri): """ message for different types of raid actions """ msg = "Successfully submitted {0} volume task.".format(action) status_message = {"msg": msg} if task_uri is not None: task_id = task_uri.split("/")[-1] status_message.update({"task_uri": task_uri, "task_id": task_id}) return status_message
def getTotalCombinationNumRecursion(n, total = 0): """ The old calculation was wrong, we need the total possible combination number which is Xn ... X2 * X1 = total This example to actually show you the math inside """ if (n == 1): return total if (total == 0): total = n n -= 1 total *= n return getTotalCombinationNumRecursion(n, total)
def natsort_key(val, key, string_func, bytes_func, num_func): """ Key to sort strings and numbers naturally. It works by splitting the string into components of strings and numbers, and then converting the numbers into actual ints or floats. Parameters ---------- val : str | unicode | bytes | int | float | iterable key : callable | None A key to apply to the *val* before any other operations are performed. string_func : callable If *val* (or the output of *key* if given) is of type *str*, this function will be applied to it. The function must return a tuple. bytes_func : callable If *val* (or the output of *key* if given) is of type *bytes*, this function will be applied to it. The function must return a tuple. num_func : callable If *val* (or the output of *key* if given) is not of type *bytes*, *str*, nor is iterable, this function will be applied to it. The function must return a tuple. Returns ------- out : tuple The string split into its string and numeric components. It *always* starts with a string, and then alternates between numbers and strings (unless it was applied recursively, in which case it will return tuples of tuples, but the lowest-level tuples will then *always* start with a string etc.). See Also -------- parse_string_factory parse_bytes_factory parse_number_factory """ # Apply key if needed if key is not None: val = key(val) # Assume the input are strings, which is the most common case try: return string_func(val) except (TypeError, AttributeError): # If bytes type, use the bytes_func if type(val) in (bytes,): return bytes_func(val) # Otherwise, assume it is an iterable that must be parsed recursively. # Do not apply the key recursively. try: return tuple( natsort_key(x, None, string_func, bytes_func, num_func) for x in val ) # If that failed, it must be a number. except TypeError: return num_func(val)
def common_prefix(strings): """ Find the longest string that is a prefix of all the strings. """ if not strings: return '' prefix = strings[0] for s in strings: if len(s) < len(prefix): prefix = prefix[:len(s)] if not prefix: return '' for i in range(len(prefix)): if prefix[i] != s[i]: prefix = prefix[:i] break return prefix
def vadd(vector1, vector2): """ add vectors """ return (vector1[0]+vector2[0], vector1[1]+vector2[1])
def _find_latest_versions(experiments): """Find the latest versions of the experiments""" leaf_experiments = {} for experiment in experiments: name = experiment["name"] version = experiment["version"] if name in leaf_experiments: leaf_experiments[name] = max(leaf_experiments[name], version) else: leaf_experiments[name] = version return leaf_experiments
def fix_url(url): """Add http:// prefix to url if it doesn't have one.""" if "://" not in url: if url.startswith("//"): url = "http:" + url else: url = "http://" + url return(url)
def getKeys(header): """ A function to get LASTTOKEN from the url header Attributes ---------- header : dictionary a dictionary of HTTP headers Returns ---------- cookie : Set-Cookie header as a dictionary """ cookie = {} keySections = header['Set-Cookie'].split(", ") for x in keySections: cookie[x.split('=')[0]] = x.split('=')[1] return cookie
def get_mod_func(callback): """Convert a fully-qualified module.function name to (module, function) - stolen from Django""" try: dot = callback.rindex('.') except ValueError: return (callback, '') return (callback[:dot], callback[dot+1:])
def convert_to_bool(string): """ Converts string to bool :param string: String :str string: str :return: True or False """ if isinstance(string, bool): return string return string in ['true', 'True', '1']
def getFilteredDivs(divs): """routine to filter elements corresponding to a comment/reply""" filtered = list() for div in divs: if len(div.get("class")) == 1: filtered.append(div) return filtered
def func_right_elegible_first(k_idx, k_right, s_right, cap_right_closed, closed_ZG_right): """Computes and approximation for the expected value of $Z_{right}^eF$ (right elegible first). Computes and approximation for the expected value of the o.f. value reached by the elegible first algorithm for the right subproblem using the expression (53b) in Corollary 26. Args: k_idx: Int. Knapsack capacity. k_right: float. The expected value of the slack for the right subproblem. s_right: Expected value of the splitting item for the right subproblem. cap_right_closed: Int. Expected capacity of right subproblem. closed_ZG_right: float. The expected value of Z^G (o.f. value of greedy algorithm) for the right subproblem. Returns: closed_EF_right: float. An approximation for the elegible first solution on right subproblem """ mu = k_idx + 1 # Quantity of Items lamb = mu / 2 # Number of Items in each subproblem k_0 = k_right # Updating the split and slack values for the right problem s_0 = s_right kp = cap_right_closed closed_EF_right = k_0 * (kp - 2 * s_0 - 1) * (1 - (1 - k_0 / kp) ** (lamb - s_0)) / 4 closed_EF_right = closed_EF_right - kp * (1 - k_0 / kp) * ( 1 - (1 + (kp - s_0 - 1) * k_0 / kp) * (1 - k_0 / kp) ** (lamb - s_0 + 1)) / (4 * k_0) closed_EF_right = closed_EF_right + closed_ZG_right # Computing the eligible-first solution for the right subproblem return closed_EF_right
def r_split(_, text, char): """ Strips string to left of and including specified characters.""" return text.split(char)[-1]
def _decicelsius_to_kelvins(temperatures_decicelsius): """Converts from temperatures from decidegrees Celsius to Kelvins. :param temperatures_decicelsius: numpy array of temperatures in decidegrees Celsius. :return: temperatures_kelvins: numpy array of temperatures in Kelvins, with same shape as input. """ return temperatures_decicelsius * 0.1 + 273.15
def get_brief_description(description): """Get brief description from paragraphs of command description.""" if description: return description[0] else: return 'No description available.'
def complex_impedance(z, XR): """ Returns the complex impedance from z (in %) and the X/R ratio. """ z = float(abs(z)) XR = float(abs(XR)) real = (z ** 2 / (1 + XR ** 2)) ** 0.5 try: imag = (z ** 2 / (1 + 1 / XR ** 2)) ** 0.5 except ZeroDivisionError: imag = 0.0 return complex(real, imag)
def is_strictly_increasing(timestamps): """Check if the timestamps are strictly increasing. "Strictly increasing" means that the passed in values are increasing as given and they are 100ps apart from one to the next. Args: timestamps (list): An ascendingly sorted list of timestamps (in picoseconds). Returns: True if the timestamps fit the "strictly increasing" definition above, False otherwise. """ if timestamps is None or len(timestamps) == 0: return True return all(x < y and x + 100 == y for x, y in zip(timestamps, timestamps[1:]))
def find_lemmas(player_name, golf_dic_list): """ find all the lemmas associated with the given ibm names return a list of name. for the golf_dic all lemmas are located at index 0 """ lemma = [] for golf_p_list in golf_dic_list: for k in golf_p_list: if player_name.strip().lower() == k.strip().lower(): lemma = lemma + [golf_p_list[0]] break return lemma
def convertToBashStyle(strCommand): """ Strip escape windows chars for the command line in the end they won't be used in a shell the resulting command is bash/zh like Args: strCommand (str): command generated by mkvtoolnix-gui Returns: str: cli command converted to bash style """ strTmp = strCommand if strTmp.find(r'^"^(^"') >= 0: # This is for cmd in Windows strTmp = ( strTmp.replace("'", r"'\''") .replace("^", "") .replace("/", "\\") .replace('"', "'") ) return str(strTmp)
def get_affix(text): """ This method gets the affix information :param str text: Input text. """ return " ".join( [word[-4:] if len(word) >= 4 else word for word in text.split()])
def date_format_to_human(value): """ Convert Python format date to human. Example: >>> date_format_to_human('%Y-%m-%d') YYYY-MM-DD :param value: Date format example: %Y-%m """ maps = { '%y': 'YY', '%Y': 'YYYY', '%m': 'MM', '%d': 'DD', } for k, v in maps.items(): value = value.replace(k, v) return value
def convert_headers_str_to_dict(headers_str: str) -> dict: """Convert headers in str format to dict.""" headers_gen = (header.split(': ') for header in headers_str.split('\r\n')) headers_dict = {header[0]: header[1] for header in headers_gen} return headers_dict
def getcardinals(minv, maxv, stepv): """ Get lats and longs to mark on map :param minv: :type minv: float :param maxv: :type maxv: float :param stepv: :type stepv: int :return: :rtype: list """ cardinals = [val for val in range(minv, maxv) if val % stepv == 0] if len(cardinals) > 10: return [ cardinal[1] for cardinal in enumerate(cardinals) if cardinal[0] % 2 > 0 ] return cardinals
def order_lro_results(doc_id_order, combined): """Order results in the order the user passed them in. For long running operations, we need to explicitly pass in the document ids since the initial request will no longer be available. :param doc_id_order: A list of document IDs from the original request. :param combined: A combined list of the results | errors :return: In order list of results | errors (if any) """ mapping = [(item.id, item) for item in combined] ordered_response = [ i[1] for i in sorted(mapping, key=lambda m: doc_id_order.index(m[0])) ] return ordered_response
def parse_list(raw): """Takes a string representation of a comma separated list and returns a list of elements with trimmed whitespace. Args: raw (str): Comma separated list as a string. Returns: List[str]: List of trimmed strings. """ return [word.strip() for word in raw.split(",")]
def _elide_string_middle(text: str, max_length: int) -> str: """Replace the middle of the text with ellipses to shorten text to the desired length. Args: text: Text to shorten. max_length: Maximum allowable length of the string. Returns: The elided text, e.g. "Some really long tex ... the end." """ if len(text) <= max_length: return text half_len = (max_length - 5) // 2 # Length of text on either side. return '{} ... {}'.format(text[:half_len], text[-half_len:])
def file_size(value, fmt="{value:.1f} {suffix}", si=False): """ Takes a raw number of bytes and returns a humanized filesize. """ if si: base = 1000 suffixes = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") else: base = 1024 suffixes = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB") max_suffix_index = len(suffixes) - 1 for i, suffix in enumerate(suffixes): unit = base ** (i + 1) if value < unit or i == max_suffix_index: return fmt.format(value=(base * value / unit), suffix=suffix)
def bounding_box(points): """ Return the coordinates for the box that contains all POINTS. """ min_x = min([point[0] for point in points]) min_y = min([point[1] for point in points]) max_x = max([point[0] for point in points]) max_y = max([point[1] for point in points]) return [(min_x,min_y),(max_x,max_y)]
def jenkins_node_names(name, count): """Returns the names for `count` production jenkins node prefixed by `name`.""" return ["%s-%s" % (name, i) for i in range(1, count+1)]
def build_hierarchy(objects): """Returns parent child relationships, skipping.""" objects_set = set(objects) par_lookup = {} def test_parent(parent): while (parent is not None) and (parent not in objects_set): parent = parent.parent return parent for obj in objects: par_lookup.setdefault(test_parent(obj.parent), []).append((obj, [])) for parent, children in par_lookup.items(): for obj, subchildren in children: subchildren[:] = par_lookup.get(obj, []) return par_lookup.get(None, [])
def max_ordered_from_domain(domain): """ Testdata: ("3", "2c", "2b", "2a", "1") or ("ORIGIN", "not ok", "LIMIT_VALUE", "ok") yield domain[-1] -> "1" or domain[-1] -> "ok" """ return domain[-1] if domain else "NULL"
def format_dictionary_keys(dictionary, formatter): """ Returns a new dictionaries whose keys have been passed through `formatter`, which should be a function that formats strings. """ new = {} for key, value in dictionary.items(): assert isinstance(key, str) new[formatter(key)] = value return new
def _exactly_one_specified(*values): """Make sure exactly one of ``values`` is truthy. Args: values (Tuple[Any, ...]): Some values to be checked. Returns: bool: Indicating if exactly one of ``values`` was truthy. """ count = sum(1 for value in values if value) return count == 1
def get_chunks(act_viewport, pred_viewport, frame_nos, max_frame, fps): """ For chunks of fps number of frames for actual as well as predicted viewports """ act_tiles,pred_tiles,chunk_frames = [],[],[] chunk_size = fps number_of_chunks = int(len(act_viewport) / chunk_size) for i in range(number_of_chunks): act_tiles.append(act_viewport[i*chunk_size : (i+1)*chunk_size]) pred_tiles.append(pred_viewport[i*chunk_size : (i+1)*chunk_size]) chunk_frames.append(frame_nos[i*chunk_size : (i+1)*chunk_size]) act_tiles.append(act_viewport[number_of_chunks*chunk_size :]) pred_tiles.append(pred_viewport[number_of_chunks*chunk_size :]) chunk_frames.append(frame_nos[number_of_chunks*chunk_size :]) return act_tiles, pred_tiles, chunk_frames
def get_donations(values): """ Acumulate the best possible donation sum at any point in the array. This is simply max(best(i-3), best(i-2)) + donation(i) with best(0) = values[0] best(1) = values[1] best(2) = values[0] + values[2] """ best_values = {0: values[0], 1: values[1], 2: values[0] + values[2]} for idx in range(3, len(values)): current = values[idx] + max(best_values[idx - 3], best_values[idx - 2]) best_values[idx] = current return best_values[len(values) - 1]
def join_if_not_empty(sep: str, str1: str, str2: str) -> str: """ >>> join_if_not_empty('SEP', '', '') '' >>> join_if_not_empty('SEP', 'a', '') 'a' >>> join_if_not_empty('SEP', 'a', ' ') 'aSEP ' >>> join_if_not_empty('SEP', 'a', 'b') 'aSEPb' """ if str1 != '' and str2 != '': result = sep.join((str1, str2)) else: result = max(str1, str2) return result
def quad_gradient_to_BACT(b1_gradient, l): """Convert the b1_gradient (T/m) attribute for a quad into SLAC BACT kG units""" return -b1_gradient*10.0*l
def get_conn_info(core_connectivity_info_list, match): """ Get core connectivity info objects from the list. Matching any the `match` argument. :param core_connectivity_info_list: the connectivity info object list :param match: the value to match against either the Core Connectivity Info `id`, `host`, `port`, or `metadata` values :return: the list of zero or more matching connectivity info objects """ conn_info = list() if not match: return conn_info for cil in core_connectivity_info_list: for ci in cil.connectivityInfoList: if match == ci.id or match == ci.host or match == ci.port or \ match == ci.metadata: conn_info.append(ci) return conn_info
def is_label_definition(line): """Returns if the line is a LABEL node.""" return line.startswith("LABEL ")
def sample_builder(samples): """ Given a dictionary with value: count pairs, build a list. """ data = [] for key in samples: data.extend([key] * samples[key]) data.sort() return data
def validate_yes_no(value): """Validate yes-no question""" if isinstance(value, bool): return value if value is None or value == "<ask>": return value check_value = value.strip().lower() if check_value in ("", "y", "yes", "t", "true", "1"): return True if check_value in ("n", "no", "f", "false", "0"): return False return None
def get_x_color(x_color_tuple): """ Extracts 3 individual integers from a tuple and returns them. Parameter: x_color_tuple: a tuple of red, green and blue values for x coordinates Programs that use this function: - Vertical Striper 1 through 16, but get_x_color is called directly by the following functions. - stripe_vertically - stripe_vertically_alternate - stripe_vertically_alternate_2 - stripe_vertically_reverse - stripe_vertically_reverse_alt - stripe_vertically_reverse_alt_2 """ return int(x_color_tuple[0]), int(x_color_tuple[1]), \ int(x_color_tuple[2])
def first_elem(s): """ Extracts first element of pandas Series s, or returns s if not a series. """ try: return s.iloc[0] except AttributeError: return s
def gcd_recur(a, b): """ Euclide's trick to calculate gcd: perfect for applying recursion a, b: positive integers returns: a positive integer, the greatest common divisor of a & b. """ if b == 0: return a else: return gcd_recur(b, a % b)
def union_values(dictionary): """Given a dictionary with values that are Collections, return their union. Arguments: dictionary (dict): dictionary whose values are all collections. Return: (set): the union of all collections in the dictionary's values. """ sets = [set(p) for p in dictionary.values()] return sorted(set.union(*sets)) if sets else set()
def _code_in_list(code, codelist): """Tells if `code` is contained in `codelist` Examples: - 401 is not contained in ['3xx', '404', '5xx'] - 404 is contained in ['3xx', '404', '5xx'] - 503 is contained in ['3xx', '404', '5xx'] """ # status codes to exclude exact_codes = [code for code in codelist if 'x' not in code] if str(code) in exact_codes: return True # classes of status code to exclude class_codes = [code[0] for code in codelist if 'x' in code] if str(code)[0] in class_codes: return True return False
def coord_to_string(coordinates): """Transform coordinates into a string for API""" if type(coordinates) == tuple: string = ','.join(list(map(str, coordinates))) elif type(coordinates) == dict: string = ','.join(list(map(str, coordinates.values()))) elif (type(coordinates) == list) and (len(coordinates) == 2): string = ','.join(list(map(str, coordinates))) elif type(coordinates) == str: string = coordinates else: print('invalid coordinate type') string = None return string
def set_attributes_to_descend(meta, traverse_limit): """Set which attributes should have values inferred from ancestral taxa.""" desc_attrs = set() desc_attr_limits = {} for key, value in meta.items(): if "traverse" in value and value["traverse"]: if "traverse_direction" not in value or value["traverse_direction"] in ( "down", "both", ): desc_attrs.add(key) if "traverse_limit" in value: desc_attr_limits.update({key: value["traverse_limit"]}) else: desc_attr_limits.update({key: traverse_limit}) return desc_attrs, desc_attr_limits
def coerce_types(T1, T2): """Coerce types T1 and T2 to a common type. Coercion is performed according to this table, where "N/A" means that a TypeError exception is raised. +----------+-----------+-----------+-----------+----------+ | | int | Fraction | Decimal | float | +----------+-----------+-----------+-----------+----------+ | int | int | Fraction | Decimal | float | | Fraction | Fraction | Fraction | N/A | float | | Decimal | Decimal | N/A | Decimal | float | | float | float | float | float | float | +----------+-----------+-----------+-----------+----------+ Subclasses trump their parent class; two subclasses of the same base class will be coerced to the second of the two. """ # Get the common/fast cases out of the way first. if T1 is T2: return T1 if T1 is int: return T2 if T2 is int: return T1 # Subclasses trump their parent class. if issubclass(T2, T1): return T2 if issubclass(T1, T2): return T1 # Floats trump everything else. if issubclass(T2, float): return T2 if issubclass(T1, float): return T1 # Subclasses of the same base class give priority to the second. if T1.__base__ is T2.__base__: return T2 # Otherwise, just give up. raise TypeError('cannot coerce types %r and %r' % (T1, T2))
def odml_tuple_export(odml_tuples): """ Converts odml style tuples to a parsable string representation. Every tuple is represented by brackets '()'. The individual elements of a tuple are separated by a semicolon ';'. The individual tuples are separated by a comma ','. An odml 3-tuple list of 2 tuples would be serialized to: "[(11;12;13),(21;22;23)]". :param odml_tuples: List of odml style tuples. :return: string """ str_tuples = "" for val in odml_tuples: str_val = ";".join(val) if str_tuples: str_tuples = "%s,(%s)" % (str_tuples, str_val) else: str_tuples = "(%s)" % str_val return "[%s]" % str_tuples
def fizz_buzz(i, prediction): """ Unpack the predictions """ return [str(i), "fizz", "buzz", "fizzbuzz"][prediction]
def rivers_with_station(stations): """receives list of station obhjects and returns a list of names of rivers that have at least one station on""" rivers = set() for station in stations: rivers.add(station.river) return rivers
def get_mac_addr_from_dbus_path(path): """Return the mac addres from a dev_XX_XX_XX_XX_XX_XX dbus path""" return path.split("/")[-1].replace("dev_", '').replace("_", ":")
def remove_all(value, node): """ Remove all elements in linked list that match a value :param value: value to look for in list :param node: value of head node, start of list :return: node: head of linked list """ # now this is some tom foolery if node is not None and node.value == value: # when the value matches if node.next_node is not None and node.next_node.value == value: # and the next value matches return remove_all(value, node.next_node) # keep checking if the next value matches elif node.next_node is not None and node.next_node.value != value: # found where the end of string of numbers return node.next_node return node.next_node elif node is not None: # doesn't match value searching for node.next_node = remove_all(value, node.next_node) return node
def verification_storage_location(l1_block_id: str, level_received_from: int, chain_id: str) -> str: """ Format the path for the storage of a verification object Args: l1_block_id: the id of the L1 block which this verification verifies level_received_from: the level from which this verification was received chain_id: the internal id of the chain who sent this verification Returns: path: the formatted path """ return f"BLOCK/{l1_block_id}-l{level_received_from}-{chain_id}"
def pg2dtypes(pgtype): """Returns equivalent dtype for input `pgtype`.""" mapping = { 'date': 'datetime64[ns]', 'number': 'float64', 'string': 'object', 'boolean': 'bool', 'geometry': 'object', } return mapping.get(str(pgtype), 'object')
def clean(s): """ Adds escape characters for a string. """ s = s.replace(r'{', r'\{') s = s.replace(r'}', r'\}') s = s.replace(r'_', r'\_') s = s.replace(r'%', r'\%') return s
def _fix_uppercase(kwargs): """ Properly handle uppercase arguments which are normalized by click. """ if 'c' in kwargs: kwargs['C'] = kwargs.pop('c') return kwargs
def count_label(SEG_label): """ Parameters ---------- SEG_label : list label list. Returns ------- count_list : array """ N, S, V, F, Q = 0, 0, 0, 0, 0 for i in range(len(SEG_label)): count_N = SEG_label[i].count('N') count_S = SEG_label[i].count('S') count_V = SEG_label[i].count('V') count_F = SEG_label[i].count('F') count_Q = SEG_label[i].count('Q') N += count_N S += count_S V += count_V F += count_F Q += count_Q count_list = [N, S, V, F, Q] return count_list
def _iscomment(line): """ Determine if a line is a comment line. A valid line contains at least three words, with the first two being integers. Note that Python 2 and 3 deal with strings differently. """ if line.isspace(): return True elif len(line.split()) >= 3: try: # python 3 str if line.split()[0].isdecimal() and line.split()[1].isdecimal(): return False except: # python 2 str if (line.decode().split()[0].isdecimal() and line.split()[1].decode().isdecimal()): return False return True else: return True
def get_exit_info(jobreport_dictionary): """ Return the exit code (exitCode) and exit message (exitMsg). E.g. (0, 'OK'). :param jobreport_dictionary: :return: exit_code, exit_message """ return jobreport_dictionary['exitCode'], jobreport_dictionary['exitMsg']
def get_ms_name(name): """Split module_name.signal_name to module_name , signal_name """ tokens = name.split('.') if len(tokens) == 0: raise SystemExit("This to be catched in validate.py") module = tokens[0] signal = None if len(tokens) == 2: signal = tokens[1] return module, signal
def removeElement_1(nums, val): """ Brute force solution Don't preserve order """ # count the frequency of the val val_freq = 0 for num in nums: if num == val: val_freq += 1 # print(val_freq) new_len = len(nums) - val_freq # remove the element from the list i = 0 j = len(nums) - 1 while val_freq > 0 and i < new_len: if nums[i] == val: print('index:', i) while j > 0 and nums[j] == val: j -= 1 print('j:', j) # swap elements temp = nums[i] nums[i] = nums[j] nums[j] = temp val_freq -= 1 j -= 1 i += 1 return new_len
def get_source_points(tcdcn_cfNet, x, y_kpt_norm, num_model_kpts, use_tcdcn=False): """ This method gets the source keypoints for training the model. Either a model's output or the true keypoints or a combination of two is taken as the source. paramers: num_model_kpts: the number of kpts to be used from cfNet model per example default is all kpts returns: The keypoint locations upon which the model should train to denoise the output. It's a matrix of shape (#batch, #kpts) """ # source_points is of shape (#batch, #kpts) if use_tcdcn: model_points = tcdcn_cfNet.model_prediction(x, dropout=0) num_batch, num_kpts = y_kpt_norm.shape source_points = model_points else: # in this case the true keypoint positions is taken as the source source_points = y_kpt_norm return source_points
def message_identifier(msg): """ Extract an identifier for message editing. Useful with :meth:`amanobot.Bot.editMessageText` and similar methods. Returned value is guaranteed to be a tuple. ``msg`` is expected to be ``chat`` or ``choson_inline_result``. """ if 'chat' in msg and 'message_id' in msg: return msg['chat']['id'], msg['message_id'] if 'inline_message_id' in msg: return msg['inline_message_id'], raise ValueError()
def rename_channel(channel): """ Function to rename Bh1 -> BHN, BH2 -> BHE for consistency Args: channel (str): Channel code Returns (str): Renamed channel """ if channel == 'BH1': return 'BHN' elif channel == 'BH2': return 'BHE' else: return channel
def _aln_identity(a, b): """Compute identity of alignment between two sequences. Args: a, b: two aligned sequences Returns: float representing the fraction of the alignment that is identical between a and b """ assert len(a) == len(b) identical_count = sum(1 for i in range(len(a)) if a[i] == b[i]) return identical_count / float(len(a))
def qconj(q): """ Return quaduple (4 tuple) result of quaternion multiplation of q1 * q2 Quaternion multiplication is not commutative q1 * q2 != q2 * q1 Quaternions q1 and q2 are sequences are of the form [w, x, y, z] """ return tuple(-e if i>0 else e for i, e in enumerate(q))
def _concat_element(base, index, value): """Implementation of perl .= on an array element""" try: base[index] += value except TypeError: if value is None: if base[index] is None: base[index] = '' else: base[index] = str(base[index]) else: if base[index] is None: base[index] = str(value) else: base[index] = str(base[index]) + str(value) return base[index]
def pfxlen2mask_int(pfxlen): """ Converts the given prefix length to an IP mask value. :type pfxlen: int :param pfxlen: A prefix length. :rtype: long :return: The mask, as a long value. """ return 0xFFFFFFFF << (32 - int(pfxlen))
def gzipped_extension(filename): """ Return true if file seems to be gzipped based on extension """ return filename[-3:] == '.gz'
def studentGrades(gradeList): """ >>> grades = [ ... ['Student', 'Quiz 1', 'Quiz 2', 'Quiz 3'], ... ['John', 100, 90, 80], ... ['McVay', 88, 99, 111], ... ['Rita', 45, 56, 67], ... ['Ketan', 59, 61, 67], ... ['Saranya', 73, 79, 83], ... ['Min', 89, 97, 101]] >>> studentGrades(grades) [90, 99, 56, 62, 78, 95] >>> grades = [ ... ['Student', 'Quiz 1', 'Quiz 2'], ... ['John', 100, 90], ... ['McVay', 88, 99], ... ['Min', 89, 97]] >>> studentGrades(grades) [95, 93, 93] >>> studentGrades(55) 'error' """ # --- YOU CODE STARTS HERE # decide whether the data type is right if type(gradeList) == list: # create the list for average grade ave_grade = [] # see all the data for x in range(1,len(gradeList)): # set the first value for variables total_grade = 0 ave = 0 for y in range(1,len(gradeList[x])): # count the total grade total_grade = total_grade + gradeList[x][y] # count the average grade ave = int(total_grade /(len(gradeList[x])-1)) ave_grade.append(ave) # return the average grade in list return ave_grade # if the data type is wrong, return error return "error"
def represent_as_memory_index(argument): """ Format the argument so it appears as a memory index. See :func:`~.is_memory_index` for details on what a memory index is. Args: argument (str): The argument to represent as a memory index. Returns: str: The formatted argument. """ return "[{argument}]".format(argument=argument)
def max_by_not_zero(func, collection): """Return the element of a collection for which func returns the highest value, greater than 0. Return None if there is no such value. >>> max_by_not_zero(len, ["abc", "d", "ef"]) 'abc' >>> max_by_not_zero(lambda x: x, [0, 0, 0, 0]) is None True >>> max_by_not_zero(None, []) is None True """ if not collection: return None def annotate(element): return (func(element), element) highest = max(list(map(annotate, collection)),key=lambda pair:pair[0]) if highest and highest[0] > 0: return highest[1] else: return None
def create_board(board_str): """create board representation of board string returns a list of string representing board characters of 'x', 'o' and space ' '. An empty space list is added so that board index starts at 1. >>> create_board(" xxo o ") >>> [" ", " ", "x", "x", "o", " ", " ", "o", " "] """ board = [" "] board.extend(list(board_str)) return board
def bin_to_nibbles(s): """convert string s to nibbles (half-bytes) >>> bin_to_nibbles("") [] >>> bin_to_nibbles("h") [6, 8] >>> bin_to_nibbles("he") [6, 8, 6, 5] >>> bin_to_nibbles("hello") [6, 8, 6, 5, 6, 12, 6, 12, 6, 15] """ res = [] for x in s: res += divmod(ord(x), 16) return res
def isbn_10_check_digit(nine_digits): """Function to get the check digit for a 10-digit ISBN""" if len(nine_digits) != 9: return None try: int(nine_digits) except Exception: return None remainder = int(sum((i + 2) * int(x) for i, x in enumerate(reversed(nine_digits))) % 11) if remainder == 0: tenth_digit = 0 else: tenth_digit = 11 - remainder if tenth_digit == 10: tenth_digit = 'X' return str(tenth_digit)
def reprFloat(f): """reprFloat(float) -> string Return the string representation of an IDL float type (float, double, long double), with enough precision to completely reconstruct the bit pattern.""" # *** Deal with long double s = "%.17g" % f if s.find(".") == -1 and s.find("e") == -1: s = s + ".0" return s
def check_fields_of_view_format(fields_of_view): """Confirm that the input fields of view is valid. Parameters ---------- fields_of_view : list of int List of integer fields of view. Returns ------- str or list of int Correctly formatted fields_of_view variable. """ if fields_of_view != "all": if isinstance(fields_of_view, list): if all(isinstance(x, int) for x in fields_of_view): return fields_of_view else: try: return list(map(int, fields_of_view)) except ValueError: raise TypeError( f"Variables of type int expected, however some of the input fields of view are not integers." ) else: raise TypeError( f"Variable of type list expected, however type {type(fields_of_view)} was passed." ) else: return fields_of_view
def fib(index): """recursive fibonacci sequence function""" if index <= 1: return index return fib(index - 1) + fib(index - 2)
def get_size(shard): """Get the size of a shard. Args: shard: A file like object representing the shard. Returns: The shard size in bytes. """ shard.seek(0, 2) return shard.tell()
def loadMushroomDataSet(path): """ return the dataset (list) of mushroom from path file :param path: path of mushroom dataset :return: mushroom dataset """ mushroomDataset = None try : mushroomDataset = [line.split() for line in open(path).readlines()] except Exception as e: print(e) finally: return mushroomDataset
def fancier_uniquer(seq, f, p): """ Keeps "best" item of each f-defined equivalence class, with picking function p choosing appropriate (index, item) for each equivalence class from the list of all (index, item) pairs in that class """ bunches = {} for index, item in enumerate(seq): marker = f(item) bunches.setdefault(marker, []).append((index, item)) auxlist = [p(candidates) for candidates in bunches.values()] auxlist.sort() return [item for index, item in auxlist]
def splitElements(osmElements): """Splits returned OSM elements into list of ways and list of nodes Args: osmElements: list of nodes and ways returned from OSM query Returns: tuple containing (list of ways, list of nodes) """ nodes = [] ways = [] garbage = [] for element in osmElements: if element['type'] == 'node': nodes.append(element) elif element['type'] == 'way': ways.append(element) else: garbage.append(element) # if DEBUG_MODE == True: # print('splitElements returned \n' + # str(len(ways)) + " ways, " + str(len(nodes)) + " nodes, and " # + str(len(garbage)) + " bad elements") return((ways, nodes))
def neighbors(i, j, m, n): """ Returns the neighboring indices for a given index in a matrix of m x n shape """ # Sets default delta indices # Adjust delta indices for given index on edge of matrix inbrs = [-1, 0, 1] if i == 0: inbrs = [0, 1] if i == m-1: inbrs = [-1, 0] jnbrs = [-1, 0, 1] if j == 0: jnbrs = [0, 1] if j == n-1: jnbrs = [-1, 0] nbrs = [] # Applies deltas and yields neighboring indices for delta_i in inbrs: for delta_j in jnbrs: # Ignore 0,0 neighbor (the agent itself) if delta_i == delta_j == 0: continue else: nbrs.append((i+delta_i, j+delta_j)) return nbrs
def seconds_to_human(number_of_seconds, out_seconds=True, out_minutes=True, out_hours=True, out_weeks=True, out_days=True, out_unit_auto_adjust=False): """ Converts number of seconds to string representation. :param out_seconds: False, if seconds part in output is to be trucated :param number_of_seconds: number of seconds. :param out_unit_auto_adjust: if True, funcion automatically decides what parts of the date-time diff passed as an argument will become part of the output string. For example, if number_of_seconds is bigger than days, there is no sense to show seconds part. :return: string representation of time delta """ human_strings = [] if out_unit_auto_adjust: if number_of_seconds > 600: # don't show seconds if time > 10 min out_seconds = False if number_of_seconds > 86400: # don't show minutes if time > 10 hours out_minutes = False if number_of_seconds > 864000: # don't show hours if time > 10 days out_hours = False if number_of_seconds > 6048000: out_days = False weeks = 0 days = 0 hours = 0 if out_weeks and number_of_seconds > 604800: # weeks weeks = int(number_of_seconds / 604800) number_of_seconds = number_of_seconds - (weeks * 604800) elem_str = str(int(weeks)) + ' week' if weeks > 1: elem_str += 's' human_strings.append(elem_str) if out_days and number_of_seconds > 86400: # days days = int(number_of_seconds / 86400) number_of_seconds = number_of_seconds - (days * 86400) elem_str = str(int(days)) + ' day' if days > 1: elem_str += 's' human_strings.append(elem_str) if out_hours and number_of_seconds > 3600: hours = int(number_of_seconds / 3600) number_of_seconds = number_of_seconds - (hours * 3600) elem_str = str(int(hours)) + ' hour' if hours > 1: elem_str += 's' human_strings.append(elem_str) if out_minutes and number_of_seconds > 60: minutes = int(number_of_seconds / 60) number_of_seconds = number_of_seconds - (minutes * 60) elem_str = str(int(minutes)) + ' minute' if minutes > 1: elem_str += 's' human_strings.append(elem_str) if out_seconds and number_of_seconds >= 1: elem_str = str(int(number_of_seconds)) + ' second' if number_of_seconds > 1: elem_str += 's' human_strings.append(elem_str) return ' '.join(human_strings)
def return_core_dense_key(core_idx, dense=False): """Return core dense keys in the right format.""" if dense is False: return (core_idx, 0) else: return (core_idx, dense)
def result_type_match(pred, target, result_type): """ Verifies nature of prediction and target given a result_type. """ if result_type == 'T' and pred == target: return True if result_type == 'F' and pred != target: return True if result_type == 'TP' and pred == target and target == 1: return True if result_type == 'FP' and pred != target and target == 0: return True if result_type == 'TN' and pred == target and target == 0: return True if result_type == 'FN' and pred != target and target == 1: return True return False
def dic2axisheader(dic): """ Convert axisheader dictionary to list """ al = [0] * 12 al[0] = dic["nucleus"] al[1] = dic["spectral_shift"] al[2] = dic["npoints"] al[3] = dic["size"] al[4] = dic["bsize"] al[5] = dic["spectrometer_freq"] al[6] = dic["spectral_width"] al[7] = dic["xmtr_freq"] al[8] = dic["zero_order"] al[9] = dic["first_order"] al[10] = dic["first_pt_scale"] al[11] = dic["extended"] return al
def GetPrimeNumbers(limiter=2): """ This Function will return all prime numbers by range of limiter. the limiter must be between 2 - 100. :param limiter: integer between 2 - 100 :return: list of prime numbers """ primeNumList = [] if limiter >=2 and limiter <= 100: for x in range(limiter): primeNumList.append(x) return primeNumList
def find_index_of_sequence(data, sequence, startindex=0): """find the index in a sequence""" index = startindex for token in sequence: index = data.find(token, index) if index == -1: return -1 return index + len(sequence[-1])
def clean_battlefield(battlefield: str) -> str: """ Clean the battlefield and return only survived letters :param battlefield: :return: """ result = battlefield.split('[') result = [string for string in result if string != ''] result = list(reversed(result)) temp = list() while result: for i, r in enumerate(result): if r.count('#') <= 1: if i + 1 < len(result) and (r.count('#') == 0 and result[i + 1].count('#') < 2): temp.append(''.join(char for char in r if char .isalpha())) del result[i] break elif i + 1 < len(result) and (r.count('#') == 1 and result[i + 1].count('#') == 0): temp.append(''.join(char for char in r if char .isalpha())) del result[i] break elif i + 1 < len(result): del result[i] break else: temp.append(''.join(char for char in r if char .isalpha())) del result[i] break del result[i] break answer = ''.join(char for char in reversed(temp)) return answer