content
stringlengths
42
6.51k
def improve_temperature_measurement(temp_raw, dig_t): """Refine the temperature measurement. Adapts the raw temperature measurement according to a formula specified in the Bosch data sheet. Args: temp_raw (int): raw temperature reading dig_t (list): blocks of data pertaining to temperature Returns: tuple: refined temperature measurement and reference point Reference: Bosch data sheet, Appendix A, "BME280_compensate_T_double" """ var1 = ((((temp_raw >> 3) - (dig_t[0] << 1))) * (dig_t[1])) >> 11 var2 = (((temp_raw >> 4) - (dig_t[0])) * ((temp_raw >> 4) - (dig_t[0]))) var3 = ((var2 >> 12) * (dig_t[2])) >> 14 t_fine = var1 + var3 temperature = float(((t_fine * 5) + 128) >> 8) return temperature, t_fine
def _format_pval(p_value): """Helper function for formatting p-value.""" if p_value < 0.0001: return 'p < 0.0001' else: return 'p = {:5.4f}'.format(p_value)
def rk4_backward(f, x, h, **kwargs): """Implements a backwards classic Runge-Kutta integration RK4. Parameters ---------- f : callable function to reverse integrate, must take x as the first argument and arbitrary kwargs after x : numpy array, or float state needed by function h : float step size Returns ------- state : numpy array, or float Reverse integrated state """ k1 = f(x, **kwargs) k2 = f(x - 0.5 * h * k1, **kwargs) k3 = f(x - 0.5 * h * k2, **kwargs) k4 = f(x - h * k3, **kwargs) return x - (h / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
def combine(intervals): """combine overlapping and adjacent intervals. >>> combine([(10,20), (30,40)]) [(10, 20), (30, 40)] >>> combine([(10,20), (20,40)]) [(10, 40)] >>> combine([(10,20), (15,40)]) [(10, 40)] """ if not intervals: return [] new_intervals = [] intervals.sort() first_from, last_to = intervals[0] for this_from, this_to in intervals[1:]: if this_from > last_to: new_intervals.append((first_from, last_to)) first_from, last_to = this_from, this_to continue if last_to < this_to: last_to = this_to new_intervals.append((first_from, last_to)) return new_intervals
def handle_to_osrelpath(handle, is_windows=False): """Return OS specific relpath from handle.""" directories = handle.split("/") if is_windows: return "\\".join(directories) return "/".join(directories)
def _filetype(filepath): """returns the file extension of a file""" if '.' in filepath: return filepath.lower()[filepath.rindex('.')+1:]
def sort_string(string: str) -> str: """ >>> sort_string("kjldsk") 'djkkls' """ return "".join(sorted(string))
def create_entail_axioms(relations_to_pairs, relation='synonym'): """ For every linguistic relationship, check if 'relation' is present. If it is present, then create an entry named: Axiom ax_relation_token1_token2 : forall x, _token1 x -> _token2 x. """ rel_pairs = relations_to_pairs[relation] axioms = [] if not rel_pairs: return axioms for t1, t2 in rel_pairs: axiom = 'Axiom ax_{0}_{1}_{2} : forall x, _{1} x -> _{2} x.'\ .format(relation, t1, t2) axioms.append(axiom) return axioms
def for_approach_rate(approach_rate): """ for a approach rate get how much before the circle appers and when it has full opacity :param approach_rate: float ar :return: preempt in ms, fade time in ms """ if approach_rate < 5: preempt = 1200 + 600 * (5 - approach_rate) / 5 fade_in = 800 + 400 * (5 - approach_rate) / 5 elif approach_rate > 5: preempt = 1200 - 750 * (approach_rate - 5) / 5 fade_in = 800 - 500 * (approach_rate - 5) / 5 else: preempt = 1200 fade_in = 800 return preempt, fade_in
def extract_vertices(vertices): """ Extract two opposite vertices from a list of 4 (assumption: rectangle) """ min_x,max_x,min_y,max_y = float("inf"),float("-inf"),float("inf"),float("-inf") for v in vertices: if v.get('x',min_y) < min_x: min_x = v.get('x') if v.get('x',max_y) > max_x: max_x = v.get('x') if v.get('y',min_y) < min_y: min_y = v.get('y') if v.get('y',max_y) > max_y: max_y = v.get('y') v1 = next(v for v in vertices if v.get('x') == min_x and v.get('y') == min_y) v2 = next(v for v in vertices if v.get('x') == max_x and v.get('y') == max_y) return v1,v2
def gzipped(content): """ test if content is gzipped by magic num. """ if content is not None and len(content) > 10 \ and ord(content[0:1]) == 31 and ord(content[1:2]) == 139 \ and ord(content[2:3]) == 8: return True return False
def validate_hs_code(code): """ validate HS Code is the right length """ if code and (len(code) not in (6, 8, 10) or not str(code).isdigit()): return False return True
def gen_q_list(q_bands): """ convert q_bands into a list of q_indices """ lis =[] for i in range(-q_bands,q_bands+1): lis.append(i) return lis
def extract_values_from_json(obj, key) -> list: """ Extracts recursively values from specific keys. :param obj: json object :param key: key to extract values from :return: list of values for given key types """ """Pull all values of specified key from nested JSON.""" arr = [] def extract(obj, arr, key): """Recursively search for values of key in JSON tree.""" if isinstance(obj, dict): for k, v in obj.items(): if isinstance(v, dict): extract(v, arr, key) elif k == key: arr.append(v) elif isinstance(obj, list): for item in obj: extract(item, arr, key) return arr results = extract(obj, arr, key) return results
def check_input_format(input_data): """ check if the input data is numbers > 0. :param input_data: str, the input number of row runs to achieved. :return: str, the data in legal format. """ while not input_data.isdigit() or int(input_data) <= 0: input_data = input("Illegal format, please enter numbers > 0: ") return input_data
def _arg_scope_func_key(op): """Returns a key that can be used to index arg_scope dictionary.""" return getattr(op, '_key_op', str(op))
def options(amount=None): """Provides values for options which can be ORed together. If no amount is provided, returns a generator of ever growing numerical values starting from 1. If amount is provided, returns a amount-sized list of numerical values. """ def generator(): exp = 0 cache = None while 1: if cache: cache = cache * 2 else: cache = 2 ** exp yield cache exp += 1 if amount is None: return generator() return [v for _, v in zip(range(amount), generator())]
def mapToDict(dictShape, x): """ Make a dict over two lists. Parameters ---------- dictShape : list of any The labels of the returned dict. x : list of any The values of the returned dict. Returns ------- dict Each key in dictShape corresponds to the value in x. """ res = dict([]) for key in dictShape: res[key] = x return res
def bytes_to_readable_str(num_bytes, include_b=False): """Generate a human-readable string representing number of bytes. The units B, kB, MB and GB are used. Args: num_bytes: (`int` or None) Number of bytes. include_b: (`bool`) Include the letter B at the end of the unit. Returns: (`str`) A string representing the number of bytes in a human-readable way, including a unit at the end. """ if num_bytes is None: return str(num_bytes) if num_bytes < 1024: result = "%d" % num_bytes elif num_bytes < 1048576: result = "%.2fk" % (num_bytes / 1024.0) elif num_bytes < 1073741824: result = "%.2fM" % (num_bytes / 1048576.0) else: result = "%.2fG" % (num_bytes / 1073741824.0) if include_b: result += "B" return result
def mock_get_benchmark_config(benchmark): """Mocked version of common.benchmark_config.get_config.""" if benchmark == 'benchmark1': return { 'unsupported_fuzzers': ['fuzzer2'], } if benchmark == 'benchmark2': return { 'unsupported_fuzzers': ['fuzzer2', 'fuzzer3'], } return {}
def get_index_of_max(iterable): """Return the first index of one of the maximum items in the iterable.""" max_i = -1 max_v = float('-inf') for i, iter in enumerate(iterable): temp = max_v max_v = max(iter,max_v) if max_v != temp: max_i = i return max_i
def number_to_event_name(number): """ Convert an integer to an NCANDA event name (Arm 1 + full-year visits only) """ if number == 0: return "baseline" elif number >= 1: return "followup_%dy" % number else: raise KeyError("Number %d is not eligible for conversion" % number)
def change_pose_f1(bodys, is_gt=False): """ format: ---> a total list [[jtype1, X1, Y1, Z1], [jtype2, X2, Y2, Z2], ...] """ include_together = [] if len(bodys) > 0: for body in bodys: for i in range(15): joint = [] if not is_gt: X = body[i][0] Y = body[i][1] Z = body[i][2] else: X = body[i][4] Y = body[i][5] Z = body[i][6] joint.append(i) joint.append(X) joint.append(Y) joint.append(Z) include_together.append(joint) return include_together
def uk_to_mjy(t_uK, nu_GHz, th_arcmin): """Convert brightness temperature [uK] to flux density [mJy]. See equation at https://science.nrao.edu/facilities/vla/proposing/TBconv: T = 1.36 * ( lambda[cm]^2 / theta[arcsec]^2 ) * S[mJy/beam] Parameters ---------- t_uK: brightness temperature in uK nu_GHz: frequency in GHz th_arcmin: FWHM in arcmin Returns ------- s_mJy (float): flux density in mJy """ l_cm = 3e1 / nu_GHz # wavelength [cm] t_K = t_uK / 1e6 th_arcsec = th_arcmin * 60. s_mJy = t_K / 1.36 / (l_cm/th_arcsec)**2 return s_mJy
def encode_pair(left: str, right: str, rep: int, s: str) -> str: """Encodes a left/right pair using temporary characters.""" return ( s.replace("", "") .replace(left, "\ufffe" * rep) .replace(right, "\uffff" * rep) )
def not_blank(key, obj, blanks): """Test value is not blank.""" return key in obj and obj[key] and obj[key] not in blanks
def inorder_traverse_re(root): """Inorder traversal (recursive).""" values = [] def traverse(node): if not node: return traverse(node.left) values.append(node.val) traverse(node.right) traverse(root) return values
def __create_code_block(message): """Create a code block""" return f"```\n{message}```"
def unparse_host_port(host, port=None): """ Undo parse_host_port(). """ if ':' in host and not host.startswith('['): host = '[%s]' % host if port: return '%s:%s' % (host, port) else: return host
def count_unequal_bits(a, b): """Counts number of bits that are different between them a and b. Args: a, b: Non-negative integers. Returns: Number of bits different between a and b. Raises: ValueError: If either of the arguments is negative. """ if a < 0 or b < 0: raise ValueError('Input arguments must be >= 0.') c = a ^ b count = 0 while c != 0: c = c & (c - 1) count += 1 # A easier way to do is # if c & 1 == 1: # count += 1 # c = c >> 1 return count
def merge_lists(old_list, new_list): """Merge two dictionaries.""" for mac in new_list: old_list[mac] = new_list[mac] return old_list
def _XSWAP(value): """Swap 2 least significant bytes of @value""" return ((value & 0xFF00FF00) >> 8) | ((value & 0x00FF00FF) << 8)
def merge_column_sets(columns: set, exclusions: set) -> set: """ Take the column set and subtract the exclusions. :param columns: set of columns on the instance :param exclusions: set of columns we want to ignore :return: set of finalised columns for publication :rtype: set """ return columns.difference(exclusions)
def remove_special_characters(returned_string): """ Function to clean up the list provided by the 'search_string' function. The function removes special characters from elements in the list. E.g. '*' and ','. Example to remove line breaks from the middle of words and delete excess elements from the end of the list """ special_character_removal = [a.strip("*,") for a in returned_string] # strip special characters from elements special_character_removal = [b.replace("\\r\\n", '') for b in special_character_removal] # remove line breaks special_character_removal = [c.split(",")[0] for c in special_character_removal] # to remove excess elements from the list: delete_extra_elements = special_character_removal.index('Last element you want to keep') delete_extra_elements += 1 # add one to index so you start at the element after your last one to keep del special_character_removal[delete_extra_elements:] # delete list elements from after keyword to end of list return special_character_removal
def is_permutation(str1, str2): """Check if given strings are permutation of each other :param str1: String one :param str2: String two :returns: True/False accordingly """ # Very first check is to see if they both are equal so we can return True quickly if str1 == str2: return True # We can also check if they both of same length and return True if not. if len(str1) != len(str2): return False # Can also use defaultdict with default value = 0 char_map = {} for c in str1: if char_map.get(c) is not None: char_map[c] += 1 else: char_map[c] = 1 for c in str2: res = char_map.get(c, 0) if res == 0: return False elif res == 1: del(char_map[c]) else: char_map[c] -= 1 if len(char_map) == 0: return True else: return False
def make_pretty_name(method): """Makes a pretty name for a function/method.""" meth_pieces = [method.__name__] # If its a server method attempt to tack on the class name if hasattr(method, '__self__') and method.__self__ is not None: try: meth_pieces.insert(0, method.__self__.__class__.__name__) except AttributeError: pass return ".".join(meth_pieces)
def assert_single_element(iterable): """Get the single element of `iterable`, or raise an error. :raise: :class:`StopIteration` if there is no element. :raise: :class:`ValueError` if there is more than one element. """ it = iter(iterable) first_item = next(it) try: next(it) except StopIteration: return first_item raise ValueError("iterable {!r} has more than one element.".format(iterable))
def build_descriptor(comp, splitby, mask, normalization, split, algo="smrt", level="short"): """ Generate a shorthand descriptor for the group a result belongs to. """ # From actual file, or from path to result, boil down comparator to its abbreviation comp_map = { 'hcp_niftismooth_conn_parby-glasser_sim.df': ('hcpg', "HCP [rest] (glasser parcels)"), 'hcpniftismoothconnparbyglassersim': ('hcpg', "HCP [rest] (glasser parcels)"), 'hcp_niftismooth_conn_sim.df': ('hcpw', "HCP [rest] (wellids)"), 'hcpniftismoothconnsim': ('hcpw', "HCP [rest] (wellids)"), 'indi-glasser-conn_sim.df': ('nkig', "NKI [rest] (glasser parcels)"), 'indiglasserconnsim': ('nkig', "NKI [rest] (glasser parcels)"), 'indi-connectivity_sim.df': ('nkiw', "NKI [rest] (wellids)"), 'indiconnsim': ('nkiw', "NKI [rest] (wellids)"), 'fear_glasser_sim.df': ('f__g', "HCP [task: fear] (glasser parcels)"), 'fearglassersim': ('f__g', "HCP [task: fear] (glasser parcels)"), 'fear_conn_sim.df': ('f__w', "HCP [task: fear] (wellids)"), 'fearconnsim': ('f__w', "HCP [task: fear] (wellids)"), 'neutral_glasser_sim.df': ('n__g', "HCP [task: neutral] (glasser parcels)"), 'neutralglassersim': ('n__g', "HCP [task: neutral] (glasser parcels)"), 'neutral_conn_sim.df': ('n__w', "HCP [task: neutral] (wellids)"), 'neutralconnsim': ('n__w', "HCP [task: neutral] (wellids)"), 'fear-neutral_glasser_sim.df': ('fn_g', "HCP [task: fear-neutral] (glasser parcels)"), 'fearneutralglassersim': ('fn_g', "HCP [task: fear-neutral] (glasser parcels)"), 'fear-neutral_conn_sim.df': ('fn_w', "HCP [task: fear-neutral] (wellids)"), 'fearneutralconnsim': ('fn_w', "HCP [task: fear-neutral] (wellids)"), 'glasserwellidsproximity': ('px_w', "Proximity (wellids)"), 'glasserparcelsproximity': ('px_g', "Proximity (glasser parcels)"), 'glasserwellidslogproximity': ('pxlw', "log Proximity (wellids)"), 'glasserparcelslogproximity': ('pxlg', "log Proximity (glasser parcels)"), } # Make short string for split seed and normalization split = int(split) if 200 <= split < 300: xv = "2" xvlong = "halves" elif 400 <= split < 500: xv = "4" xvlong = "quarters" elif split == 0: xv = "0" xvlong = "whole" else: xv = "_" xvlong = "undefined" norm = "s" if normalization == "srs" else "_" # Build and return the descriptor if level == "short": return "{}{}{:0>2}{}{}{}".format( comp_map[comp][0], splitby[0], 0 if mask == "none" else int(mask), algo[0], norm, xv, ) elif level == "long": return "{}, {}-normed, {} (by {})".format( comp_map[comp][1], normalization, xvlong, algo ) else: return "Undefined"
def get_changed_properties(current, new, unchangeable=None): """Get the changed properties. :param current: Current properties :type current: dict :param new: New properties to be set :type new: dict :param unchangeable: Set of unchangeable properties, defaults to None :type unchangeable: set, optional :raises ValueError: If the value of an unchangeable property is tried to be changed :return: Changed properties :rtype: dict """ unchangeable = unchangeable or set() changed_properties = {} for name, new_value in new.items(): # convert bool types if isinstance(new_value, bool): new_value = "on" if new_value else "off" else: new_value = str(new_value) # check if new value differs from current one and if the value is changeable current_value = current.get(name, None) if current_value is None: # the dataset does not yet exist -> add new value changed_properties[name] = new_value elif new_value != current_value: if name in unchangeable: dataset_type = current["type"] raise ValueError( "The value of {} property '{}' cannot be changed after creation." .format(dataset_type, name)) if name.startswith("feature@"): if (current_value in ["active", "enabled"] and new_value == "disabled"): changed_properties[name] = new_value else: changed_properties[name] = new_value return changed_properties
def get_default_readout(n_atom_basis): """Default setting for readout layers. Predicts only the energy of the system. Args: n_atom_basis (int): number of atomic basis. Necessary to match the dimensions of the linear layer. Returns: DEFAULT_READOUT (dict) """ default_readout = { 'energy': [ {'name': 'linear', 'param': {'in_features': n_atom_basis, 'out_features': int(n_atom_basis / 2)}}, {'name': 'shifted_softplus', 'param': {}}, {'name': 'linear', 'param': {'in_features': int( n_atom_basis / 2), 'out_features': 1}} ] } return default_readout
def get_backbone(ca_list,c_list, n_list): """ oputput: mainchain atom ca_list: Ca atom of all amino acids c_list: c atom of all amino acids n_list: n atom of all amino acids """ mainchain = [] for i in range(len(ca_list)): mainchain.append(n_list[i]) mainchain.append(ca_list[i]) mainchain.append(c_list[i]) return mainchain
def _serialize_query_value(value): """ Serialize a query value. :param value: :return: """ if isinstance(value, bool): return "true" if value else "false" return "'%s'" % str(value).replace("\\", "\\\\").replace("'", "\\'")
def calc_adda_mm_per_px(px_per_unit, x_px=1024, unit_fact=1.0e-9): """calc_adda_mm_per_px(px_per_unit, x_px=1024, unit_fact=1.0e-9) Compute the mm per pixel for the maximum pixel size (4096) of a Soft Imaging Systems ADDA-II slow scan interface. This is entered into the channel calibration in the Analysis.ini file and is corrected for subsampling (e.g. to 1024 pixels) for a given image within the AnalySIS software Parameters ---------- px_per_unit: number The number of pixels per unit (e.g. nm) that are measured from a calibration standard using something like the KMAG Imaging-C module. x_px: number (1024) The image width set for the Channel in the analySIS software. The default is set to 1024. unit_factor: number (1.0e-9) This is the mutiplier in for the scale in SI units. The default is 1.0e-9 for nm. Returns ------- px_per_mm: number The pixels/mm used by the analySIS inverse magnification calibration: px_per mm = slope * (1/magnification) These values are entered into arrays, one for the X-AXIS (width) and one for the Y-AXIS (height) for the channel in the Analysis.ini file. N.B. Correct for lens hysteresis when measuring standards... Example for the Hitachi 4100: > from jmToolsPy3 import calc_adda_mm_per_px > print("800X: X= ", calc_adda_mm_per_px(150.92), " Y= ", calc_adda_mm_per_px(148.77)) """ adda_max = 4096 # Max pix size for ADDA-II scale_fact = adda_max/x_px un_per_px = px_per_unit /scale_fact mm_per_px = un_per_px*1000.*unit_fact px_per_mm = 1.0/mm_per_px return(round(px_per_mm, 3))
def loc_tech_is_in(backend_model, loc_tech, model_set): """ Check if set exists and if loc_tech is in the set Parameters ---------- loc_tech : string model_set : string """ if hasattr(backend_model, model_set) and loc_tech in getattr( backend_model, model_set ): return True else: return False
def get_processable_layers(layers): """ Returns computable layers from a list of layers, along with their types. We check that by searching specific keywords in layer's name, since we can't be sure about layer's type. If you wish to extend the project and add compatibility for more layers, you should as well update this function. """ processable_layers = [] for layer in layers: layer_name = layer.name.lower() if "conv" in layer_name: processable_layers.append([layer_name, "CONVOLUTIONAL"]) elif "primary" in layer_name and "caps" in layer_name: processable_layers.append([layer_name, "PRIMARY_CAPS"]) elif "caps" in layer_name: processable_layers.append([layer_name, "DENSE_CAPS"]) elif "mask" in layer_name: processable_layers.append([layer_name, "MASK"]) return processable_layers
def _get_test_item_id(x): """ custom test ids """ if isinstance(x, bool) or x is None: return "swagger_format={}".format(x) else: return "call_mode={}".format(x)
def get_font_face(font_name: str, part: str, unicode: str, subset_filename: str): """Format @font-face string. Parameters: font_name: Font name part: subset part unicode: unicode range of the part subset_filename: woff2 filename of the subset Returns: css string of @font-face. """ return f"""/* {font_name} [{part}] */ @font-face {{ font-family: '{font_name.split("-")[0]}'; font-style: normal; font-weight: 300; font-display: swap; src: url('./woff2/{subset_filename}') format('woff2'); unicode-range: {unicode} }} """
def sci_notation(x: float): """ Format scientifically as 10^ Parameters ---------- x : float Returns ------- y : str """ a, b = '{:.2e}'.format(x).split('e') return r'${}\times10^{{{}}}$'.format(a, int(b))
def default_to_list(value): """ Ensures non-list objects are add to a list for easy parsing. Args: value(object): value to be returned as is if it is a list or encapsulated in a list if not. """ if not isinstance(value, list) and value is not None: value = [value] elif value is None: value = [] return value
def check_equal(dictionary_of_key_values): """Just a useful function to make nose stdout readable.""" print("-"*30) print("Testing") result = False print(result) print("-"*30) return result
def get_value_or_404(json: dict, key: str): """ If a key is present in a dict, it returns the value of key else None.""" try: return json[key] except BaseException: # print(f'{key} not found') return None
def processtext(tokens): """ Preprocessing token list for filtering '(' and ')' in text :type tokens: list :param tokens: list of tokens """ identifier = '_!' within_text = False for (idx, tok) in enumerate(tokens): if identifier in tok: for _ in range(tok.count(identifier)): within_text = not within_text if ('(' in tok) and (within_text): tok = tok.replace('(','-LB-') if (')' in tok) and (within_text): tok = tok.replace(')','-RB-') tokens[idx] = tok return tokens
def split_class_str(name): """construct output info""" return str(name).split('\'')[1].split('.')[-1] + '-> '
def cleanPath(path): """Clean the path, remove `//`, './', '../'. """ stack, i, n = [], 0, len(path) while i < n: string = [] while i < n and path[i] != '/': string.append(path[i]) i += 1 i += 1 string = ''.join(string) if string == '..': if stack: stack.pop() elif string and string != '.': stack.append(string) path = '/{}{}'.format('/'.join(stack), '/' * (path[-1] == '/')) return '/' if path == '//' else path
def nws_api_gave_error(response): """Checks to see if the API gave an error by checking to see if ``correlationId`` is in the keys. There should be a more robust way to check to see if it's a bad API response. :param response: The response object from the ``requests`` module. :type response: requests.Response :return: True if it was a bad request, False otherwise. :rtype: bool """ if 'correlationId' in list(response.keys()): return True return False
def heun_step(u, delta_t, t, du): """ Implementation of the Heun's Method (Trapezoid) approximation for systems of coupled ODEs Parameters: ----------- u: array-like vector of initial conditions delta_t: float time step size t: float current time du: lambda vector-valued function for differential equation Returns: -------- tuple of floats vector of values for the function at the next time step """ u_tilde = u + delta_t * du(u, t)# One estimate using Euler's method, average slope will be used return u + delta_t / 2 * (du(u, t) + du(u_tilde, t + delta_t))
def check_bounds( x0, mn, mx): """ Checks that bounds are valid, and asserts False with a usable warning message if not. """ for i in range(len(x0)): if not (mn[i] <= x0[i] and mx[i] >= x0[i]): return False return True
def _get_date_columns_selector(available_date_offsets, required_date_offsets): """ Return a numpy "fancy index" which maps one set of matrix columns to another. Specifically, it will take a matrix with the "available" date columns and transform it to one with the "required" columns. If any of the required dates are greater than the latest available date then the last available date will be used in its place. Example: >>> available = {'2019-01': 0, '2019-02': 1, '2019-03': 2} >>> required = {'2019-02': 0, '2019-03': 1, '2019-04': 2} >>> index = _get_date_columns_selector(available, required) >>> matrix = numpy.array([ ... [1, 2, 3], ... [4, 5, 6], ... ]) >>> matrix[index] array([[2, 3, 3], [5, 6, 6]]) """ max_available_date = max(available_date_offsets) columns = [] for date in sorted(required_date_offsets): if date > max_available_date: date = max_available_date columns.append(available_date_offsets[date]) # We want all rows rows = slice(None, None, None) return rows, columns
def bcost(action): """Returns the cost (a number) of an action in the bridge problem.""" # An action is an (a, b, arrow) tuple; a and b are # times; arrow is a string. a, b, arrow = action return max(a,b)
def lr_schedule(epoch): """Learning Rate Schedule Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs. Called automatically every epoch as part of callbacks during training. # Arguments epoch (int): The number of epochs # Returns lr (float32): learning rate """ lr = 0.001 if epoch > 180: lr *= 0.5e-3 elif epoch > 160: lr *= 1e-3 elif epoch > 120: lr *= 1e-2 elif epoch > 80: lr *= 1e-1 print("lr:", lr) return lr
def get_printable_object_name(obj): """ Get a human-readable object name for our reports. This function tries to balance useful information (such as the `__repr__` of an object) with the shortest display (to make reports visually understandable). Since this text is only read by humans, it can be any for whatsoever. """ try: if isinstance(obj, dict): return "Dict ID={}: first-5-keys={}".format(id(obj), list(obj.keys())[:10]) else: return "{}: {}".format(type(obj), str(obj)) except TypeError: return "Foreign object (raised TypeError): {}, ID={}".format(type(obj), id(obj)) except ModuleNotFoundError: return "Foreign object (raised ModuleNotFoundError): {}, ID={}".format(type(obj), id(obj))
def load_urls_from_text(text): """Load urls from text, one per line, ignore lines with #, ignores duplicity""" urls = set() lines = text.split('\n') for line in lines: # Ignore all white characters url = line.strip() # Take url only if is not commented if not line.startswith("#") and (url != '') and (url is not None): urls.add(url) return urls
def extract_path(x): """ extract_path(x) Return the path from the circuit `x`. Assumption: The start city is city 0. """ n = len(x) start = 0 # we start at city 0 path = [] for _i in range(n): start = x[start] path.append(start) return path
def format_sale(sale): """Formats the results to a dictionary""" sale = { "id": sale[0], "books": sale[1], "total": sale[2], "created_by": sale[3], "attendant_name": sale[5], "created_at": str(sale[4]) } return sale
def list_intersect(a, b): """ return the intersection of two lists """ return list(set(a) & set(b))
def dummy_hash(x): """ Supplies a constant dummy hash """ return 'dummy_hash'.encode('utf-8')
def column(matrix, i): """ Returning all the values in a specific columns Parameters: X: the input matrix i: the column Return value: an array with desired column """ return [row[i] for row in matrix]
def isiterable(x): """check if an object is iterable""" #try: # from collections import Iterable # return isinstance(x, Iterable) #except ImportError: try: iter(x) return True except TypeError: return False
def line2dict(inline): """ Take a line of a todo.txt file and convert to a helpful dict. """ result = {} if inline[0:2] == 'x ': result['done'] = True # see if there's a completed date possible_date = inline.split()[1] else: result['done'] = False return result
def extract_between(source: str, start: str, end: str) -> str: """Extract all of the characters between start and end. :param source: The input string from which to extract a substring. :param start: The substring that marks the extraction starting place. :param end: The substring that marks the place where extraction will end. :param return: A substring that is extracted from ``source``. :note: The ``start`` and ``end`` strings are not include in the result. """ return source[source.find(start) + len(start):source.find(end)]
def pair_greedy(pairs, label_budgets, budget, mapper): """ pairs: (obj_val, id) label_budgets: label -> budget budget: int mapper: id -> label """ label_budgets = {k: v for k, v in label_budgets.items() if v is not None} result = set() for val, v in pairs: if len(result) >= budget: break label = mapper(v) if label not in label_budgets: result.add(v) else: # Add greedily until budget runs out for a particular label if label_budgets[label] > 0: result.add(v) label_budgets[label] -= 1 return result
def is_happy(num): """Process of finding happy numbers""" eval_set = set() while num not in eval_set and num > 0: num_string = str(num) squares = [pow(int(i), 2) for i in num_string] num_sum = sum(squares) if num_sum != 1: eval_set.add(num) num = num_sum else: return True return False
def compute_f1(actual, predicted): """ Computes the F1 score of your predictions. Note that we use 0.5 as the cutoff here. """ num = len(actual) true_positives = 0 false_positives = 0 false_negatives = 0 true_negatives = 0 for i in range(num): if actual[i] >= 0.5 and predicted[i] >= 0.5: true_positives += 1 elif actual[i] < 0.5 and predicted[i] >= 0.5: false_positives += 1 elif actual[i] >= 0.5 and predicted[i] < 0.5: false_negatives += 1 else: true_negatives += 1 try: precision = true_positives / (true_positives + false_positives) recall = true_positives / (true_positives + false_negatives) F1 = 2 * precision * recall / (precision + recall) except ZeroDivisionError: F1 = 0.0 return F1
def write_clusters(filehandle, clusters, max_clusters=None, min_size=1, header=None): """Writes clusters to an open filehandle. Inputs: filehandle: An open filehandle that can be written to clusters: An iterator generated by function `clusters` or a dict max_clusters: Stop printing after this many clusters [None] min_size: Don't output clusters smaller than N contigs header: Commented one-line header to add Outputs: clusternumber: Number of clusters written ncontigs: Number of contigs written """ if not hasattr(filehandle, 'writable') or not filehandle.writable(): raise ValueError('Filehandle must be a writable file') if iter(clusters) is not clusters: # Is True if clusters is not iterator clusters = clusters.items() if max_clusters is not None and max_clusters < 1: raise ValueError('max_clusters must be at least 1.') if header is not None and len(header) > 0: if '\n' in header: raise ValueError('Header cannot contain newline') if header[0] != '#': header = '# ' + header print(header, file=filehandle) clusternumber = 0 ncontigs = 0 for clustername, contigs in clusters: if len(contigs) < min_size: continue clustername = 'cluster_' + str(clusternumber + 1) for contig in contigs: print(clustername, contig, sep='\t', file=filehandle) filehandle.flush() clusternumber += 1 ncontigs += len(contigs) if clusternumber + 1 == max_clusters: break return clusternumber, ncontigs
def pack_op_args(inputs, outputs, attrs): """ flatten inputs outputs attrs """ op_args = (inputs, outputs, attrs) return [item for arg in op_args for item in arg]
def align_data(data): """Given dict with lists, creates aligned strings Adapted from Assignment 3 of CS224N Args: data: (dict) data["x"] = ["I", "love", "you"] (dict) data["y"] = ["O", "O", "O"] Returns: data_aligned: (dict) data_align["x"] = "I love you" data_align["y"] = "O O O " """ spacings = [max([len(seq[i]) for seq in data.values()]) for i in range(len(data[list(data.keys())[0]]))] data_aligned = dict() # for each entry, create aligned string for key, seq in data.items(): str_aligned = "" for token, spacing in zip(seq, spacings): str_aligned += token + " " * (spacing - len(token) + 1) data_aligned[key] = str_aligned return data_aligned
def chain_dict_update(*ds): """Updates multiple dictionaries into one dictionary. If the same key appears multiple times, then the last appearance wins. >>> m, n, o = {'a':10}, {'b':7}, {'a':4} >>> chain_dict_updates(m, n, o) ... {'b': 7, 'a': 4} """ dct = {} for d in ds: dct.update(d) return dct
def to_unicode(value): """Converts a string argument to a unicode string. If the argument is already a unicode string or None, it is returned unchanged. Otherwise it must be a byte string and is decoded as utf8. """ if isinstance(value, (str, type(None))): return value if not isinstance(value, bytes): raise TypeError("Expected bytes, unicode, or None; got %r" % type(value)) return value.decode("utf-8")
def extractBits(number, numBits, position): """ Extract bits from a large number and return the number. Parameters: number the number to extract from numBits the amount of bits to extract position the position to start from Returns: int the int of the extracted bits """ return ( (1 << numBits) - 1 & (number >> (position-1)) )
def _dict_to_other_case(d, switch_case_func): """Apply switch_case_func to first layer key of a dict.""" r = dict() for key, value in d.items(): key = switch_case_func(key) r[key] = value return r
def isASubset(setToTest,pileList): """ Check if setToTest is ordered subset of pileList in O(n) @ In, setToTest, list, set that needs to be tested @ In, pileList, list, pile of sets @ Out, isASubset, bool, True if setToTest is a subset """ if len(pileList) < len(setToTest): return False index = 0 for element in setToTest: try: index = pileList.index(element, index) + 1 except ValueError: return False else: return True
def lorentz_transform( p_in, px_in, py_in, pz_in, gamma, beta, nx, ny, nz ): """ Perform a Lorentz transform of the 4-momentum (p_in, px_in, py_in, pz_in) and return the results Parameters ---------- p_in, px_in, py_in, pz_in: floats The coordinates of the 4-momentum gamma, beta: floats Lorentz factor and corresponding beta of the Lorentz transform nx, ny, nz: floats Coordinates of *normalized* vector that indicates the direction of the transform """ p_parallel_in = nx*px_in + ny*py_in + nz*pz_in p_out = gamma * ( p_in - beta * p_parallel_in ) p_parallel_out = gamma * ( p_parallel_in - beta * p_in ) px_out = px_in + nx * ( p_parallel_out - p_parallel_in ) py_out = py_in + ny * ( p_parallel_out - p_parallel_in ) pz_out = pz_in + nz * ( p_parallel_out - p_parallel_in ) return( p_out, px_out, py_out, pz_out )
def replaceSuffix(path, old_suffix, new_suffix=None, params=None): """Replace the last part of a URL path with something else. Also appends an optional list of query parameters. Used for replacing, for example, one link ID at the end of a relative URL path with another. Args: path: HTTP request relative URL path (with no query arguments) old_suffix: expected suffix at the end of request.path component; if any False value (such as None), the empty string '' is used new_suffix: if non-False, appended to request.path along with a '/' separator (after removing old_suffix if necessary) params: an optional dictionary of query parameters to append to the redirect target; appended as ?<key1>=<value1>&<key2>=... Returns: /path/with/new_suffix?a=1&b=2 """ if not old_suffix: old_suffix = '' old_suffix = '/' + old_suffix if path.endswith(old_suffix): # also removes any trailing '/' if old_suffix was empty path = path[:-len(old_suffix)] if new_suffix: # if present, appends new_suffix, after '/' separator path = '%s/%s' % (path, new_suffix) if params: # appends any query parameters, after a '?' and separated by '&' path = '%s?%s' % (path, '&'.join( ['%s=%s' % (p,v) for p,v in params.iteritems()])) return path
def prob2(limit=4000000): """ Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be: 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ... By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms. """ a = 0 n1 = 1 n2 = 2 while True: t = n1 + n2 n1 = n2 n2 = t if n2 > limit: break if n2 % 2 == 0: a += n2 return a + 2
def process_problem_name(problem_name): """ Remove commas and replace space with underscore in problem name @param problem_name :: name of the problem that is being considered @returns :: the processed problem name """ problem_name = problem_name.replace(',', '') problem_name = problem_name.replace(' ', '_') return problem_name
def short_type(obj: object) -> str: """Return the last component of the type name of an object. If obj is None, return 'nil'. For example, if obj is 1, return 'int'. """ if obj is None: return 'nil' t = str(type(obj)) return t.split('.')[-1].rstrip("'>")
def TiltToTime(Tilt): """ 'Tilt' is in degrees and this function outputs the hours and minutes """ TiltTime = (((Tilt)%360)/360)*12 Hrs = int(TiltTime) if Hrs == 0: Hrs = 12 mins = int(round(TiltTime*60)%60) return(Hrs,mins)
def is_license_plate(string): """ Retruns true when the string is in Ontario license plate with example format such as ABCD-012, with 4 letters, followed by a hyphen, and 3 numbers. """ if string[0].isalpha(): if string[1].isalpha(): if string[2].isalpha(): if string[3].isalpha(): if string[4] == '-': if string[5].isdigit(): if string[6].isdigit(): if string[7].isdigit(): return True return False
def find_argmax(topics_list): """ returns the maximum probability topic id in a [(topic_id, topic_prob)...] topics distribution """ m = -1. r_tid = -1 for tid, tprob in topics_list: if tprob > m: m = tprob r_tid = tid return r_tid
def drange(start, stop, step): """ Generate a sequences of numbers. """ values=[] r = start while r <= stop: values.append(r) r += step return values
def space(string, length): """ :param string: '556e697432332d41' :param length: 4 :return: 556e 6974 3233 2d41 """ return ' '.join( string[i:i + length] for i in range(0, len(string), length))
def sizeof_fmt(num, suffix='b'): """ Format a number of bytes in to a humage readable size """ for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1024.0: return "%3.1f%s%s" % (num, unit, suffix) num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix)
def trim_irrelevant_characters(text: list, delimiter: str) -> list: """ :param text: a list of words :param delimiter: the delimiter character value ("," / " " / "\n") :return: a list of words stripped from irrelevant characters """ new_text = [] for word in text: if delimiter == ',': word = word.replace('\n', '') word = word.replace(' ', '') if delimiter == ' ': word = word.replace('\n', '') word = word.replace(',', '') new_text.append(word) return new_text
def validate_params_dict(params): """validates params data type Args: params: dict. Data that needs to be validated. Returns: dict. Returns the params argument in dict form. """ if not isinstance(params, dict): raise Exception('Excepted dict, received %s' % params) # The params argument do not represent any domain class, hence dict form of # the data is returned from here. return params
def generate_media_name(file_name: str) -> str: """Camel cased media name Addition of "media" is due to the "upload_to" argument in the media model used - `file = models.FileField(upload_to="media", verbose_name=_("file"))` """ name = file_name.replace(" ", "_") return f"media/{name}"
def get_fake_pcidevice_required_args(slot='00:00.0', class_id='beef', vendor_id='dead', device_id='ffff'): """Get a dict of args for lspci.PCIDevice""" return { 'slot': slot, 'class_id': class_id, 'vendor_id': vendor_id, 'device_id': device_id }
def prayer_beads(data=None, nprays=0): """ Implement a prayer-bead method to estimate parameter uncertainties. Parameters ---------- data: 1D float ndarray A time-series dataset. nprays: Integer Number of prayer-bead shifts. If nprays=0, set to the number of data points. Notes ----- Believing in a prayer bead is a mere act of faith, please don't do that, we are scientists for god's sake! """ print( "Believing in prayer beads is a mere act of faith, please don't use it" "\nfor published articles (see Cubillos et al. 2017, AJ, 153).") return None
def _remove_startswith(chunk: str, connector: str) -> str: """ Returns first string with the second string removed from its start. @param chunk: String to be updated @param connector: String to be removed from the beginning of the chunk @return: Updated 'chunk' string """ if chunk.startswith(f'{connector} '): chunk = chunk[len(connector) + 1:] elif chunk.startswith(f' {connector} '): chunk = chunk[len(connector) + 2:] return chunk
def _comment(s): """Returns a new string with "#" inserted before each line in 's'.""" if not s: return "#" res = "".join(["#" + line for line in s.splitlines(True)]) if s.endswith("\n"): return res + "#" return res
def compareTripletsSFW(a, b): """Same as compareTriplets, implemented strightforwardlly""" result = [0, 0] for i in range(min(len(a), len(b))): if a[i] > b[i]: result[0] = result[0] + 1 elif b[i] > a[i]: result[1] = result[1] + 1 return result