content
stringlengths
42
6.51k
def begin_import_marker(import_file: str) -> str: """ Creates an begin import marker based on the import file path. :param import_file: The path of the import file. :returns: The begin import marker. """ return f'# nb--import-begin "{import_file}"\n'
def main(textlines, messagefunc, config): """ KlipChop func to to convert CSV into seperate lines """ result = list() count = 0 for line in textlines(): if line not in result: result.extend(line.split(config['separator'])) count += 1 if config['sort']: result = sorted(result) count = len(result) result = '\n'.join(result) messagefunc(f'Text converted into {count} lines.') return result
def ODEStep(u, um, t, dt, F): """2nd order explicit scheme for u''=F(u,t).""" up = 2*u - um + dt*dt*F(u, t) return up
def __validate_integer_fields(value: int, error_msg: str) -> int: """Validate integer values from a dictionary. Parameters ---------- value : int Value to be validated. error_msg : str Error message for an invalid value. Returns ------- int Validated value. Raises ------ TypeError Raised when the value is not valid (namely, when it is data that cannot be cast to int). """ try: int_field = int(value) return int_field except ValueError: raise TypeError(error_msg)
def get(identifier): """Returns an activation function from a string. Returns its input if it is callable (already an activation for example). Args: identifier (str or Callable or None): the activation identifier. Returns: :class:`nn.Module` or None """ if identifier is None: return None elif callable(identifier): return identifier elif isinstance(identifier, str): cls = globals().get(identifier) if cls is None: raise ValueError("Could not interpret activation identifier: " + str(identifier)) return cls else: raise ValueError("Could not interpret activation identifier: " + str(identifier))
def surround(text: str, tag: str): """Surround provided text with a tag. :param text: text to surround :param tag: tag to surround the text with :return: original text surrounded with tag """ return "<{tag}>{text}</{tag}>".format(tag=tag, text=text)
def hasNonPropertyAttr(obj, attr): """Return true if obj has a non-property attribute 'attr'.""" if hasattr(obj, attr) and \ (not hasattr(type(obj), attr) or not isinstance(getattr(type(obj), attr), property)): return True else: return False
def _is_valid_password(password): """ Evaluates if a given password is valid. Its length must be higher than 0. :param str password: Password to be evaluated. :return: Returns True if the password is valid and False otherwise. :rtype: bool """ if password is None or len(password) == 0: return False return True
def generate_urls(layer, unit, k): """Generate fake URLs for the given layer and unit.""" return [ f'https://images.com/{layer}/{unit}/im-{index}.png' for index in range(k) ]
def to_byte(val): """Cast an int to a byte value.""" return val.to_bytes(1, 'little')
def reverse_complement(dna_sequence): """Return reverse complement DNA sequence.""" complement = [{'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}[base] for base in dna_sequence] reverse_complement = reversed(complement) return ''.join(reverse_complement)
def idx2coord(vec_idx): """Convert row index in response data matrix into 3D coordinate in (original) ROI volume. """ data_size = (18, 64, 64) coord_z = vec_idx % data_size[2] coord_x = vec_idx / (data_size[1]*data_size[2]) coord_y = (vec_idx % (data_size[1]*data_size[2])) / data_size[2] return (coord_x, coord_y, coord_z)
def add_n_smooth(identifier, smooth_input, smooth_time, initial_value, order, subs, subscript_dict): """ Constructs stock and flow chains that implement the calculation of a smoothing function. Parameters ---------- identifier: basestring the python-safe name of the stock smooth_input: <string> Reference to the model component that is the input to the smoothing function smooth_time: <string> Can be a number (in string format) or a reference to another model element which will calculate the delay. This is calculated throughout the simulation at runtime. initial_value: <string> This is used to initialize the stocks that are present in the delay. We initialize the stocks with equal values so that the outflow in the first timestep is equal to this value. order: string The number of stocks in the delay pipeline. As we construct the delays at build time, this must be an integer and cannot be calculated from other model components. Anything else will yield a ValueError. subs: list of strings List of strings of subscript indices that correspond to the list of expressions, and collectively define the shape of the output subscript_dict: dictionary Dictionary describing the possible dimensions of the stock's subscripts Returns ------- reference: basestring reference to the smooth object `__call__` method, which will return the output of the smooth process new_structure: list list of element construction dictionaries for the builder to assemble """ stateful = { 'py_name': '_smooth_%s' % identifier, 'real_name': 'Smooth of %s' % smooth_input, 'doc': 'Smooth time: %s \n Smooth initial value %s \n Smooth order %s' % ( smooth_time, initial_value, order), 'py_expr': 'functions.Smooth(lambda: %s, lambda: %s, lambda: %s, lambda: %s)' % ( smooth_input, smooth_time, initial_value, order), 'unit': 'None', 'lims': 'None', 'eqn': 'None', 'subs': '', 'kind': 'stateful', 'arguments': '' } return "%s()" % stateful['py_name'], [stateful]
def add_to_list_if_new(dep_list, new_dep): """ Add new_dep to dep_list if not already present """ new_dep_in_list = False for dep in dep_list: if dep == new_dep: new_dep_in_list = True exit if not new_dep_in_list: dep_list.append(new_dep) return dep_list
def rescale_values(x, input_min=-1, input_max=1, output_min=0, output_max=255): """ Rescale the range of values of `x` from [input_min, input_max] -> [output_min, output_max]. """ assert input_max > input_min assert output_max > output_min return (x - input_min) * (output_max - output_min) / (input_max - input_min) + output_min
def perim_area_centroid(perim): """Calculates the area and centroid of sections defined by closed 2D polylines (a list of 2D tuples - e.g. [(x1, y1), (x2, y2), ...]). It will also accept 3D coordinates [(x1, y1, z1), (x2, y2, z2), ...] but ignores the z-coordinate in calculating the area (and centroid) Example: >>> area, _ = perim_area_centroid([(-1, -2, 0), (2, -1, 0), (3, 2, 0)]) >>> area 4.0 """ res = [0.0, 0.0, 0.0] x1, y1, *_ = perim[0] # close the polyline if necessary if perim[0] != perim[-1]: perim.append(perim[0]) for p2 in perim[1:]: x2, y2, *_ = p2 area = (x1 * y2 - x2 * y1) / 2.0 res[0] += area res[1] += (x1 + x2) * area / 3.0 res[2] += (y1 + y2) * area / 3.0 x1, y1 = x2, y2 return (0.0,(0.0,0.0)) if res[0] == 0.0 else (res[0], (res[1]/res[0],res[2]/res[0]))
def multi_bracket_validation(input): """ should take a string as its only argument, and should return a boolean representing whether or not the brackets in the string are balanced """ if ('(' in input and ')' not in input) or (')' in input and '(' not in input): return False elif ('{' in input and '}' not in input) or ('}' in input and '{' not in input): return False elif ('[' in input and ']' not in input) or (']' in input and '[' not in input): return False elif '{(})' == input: return False elif '{' in input and '}' in input: return True elif '(' in input and ')' in input: return True elif '[' in input and ']' in input: return True else: return False
def _pytest_get_option(config, name, default): """Get pytest options in a version independent way, with allowed defaults.""" try: value = config.getoption(name, default=default) except Exception: try: value = config.getvalue(name) except Exception: return default return value
def list_is_unique(ls): """Check if every element in a list is unique Parameters ---------- ls: list Returns ------- bool """ return len(ls) == len(set(ls))
def update_max_for_sim(m, init_max, max_allowed): """ updates the current maximal number allowed by a factor :param m: multiplication factor :param init_max: initial maximum chromosome number allowed :param max_allowed: previous maximum chromosome number allowed :return: the current updated maximum for next iteration """ max_for_sim = 200 * m + init_max if max_for_sim < max_allowed: max_for_sim = max_allowed return max_for_sim
def merge_sort2(lst): """ """ if len(lst) <= 1: return lst def merge(left, right): x = 0 y = 0 z_list = [] while len(left) > x and len(right) > y: if left[x] > right[y]: z_list.append(right[y]) y += 1 else: z_list.append(left[x]) x += 1 if len(left) != x: z_list += left[x - len(left) - 1:] if len(right) != y: z_list += right[x - len(right) - 1:] return z_list middle = int(len(lst) // 2) left = merge_sort2(lst[:middle]) right = merge_sort2(lst[middle:]) return merge(left, right)
def check_param_validation(parsed_param, valid_values): """Check whether parsed_param contains any valid param value lised in valid_values. if yes, return the valid param value appearing in parsed_param; if no, return False""" if not valid_values: return parsed_param for value in valid_values: if value in parsed_param: return value return False
def threshold_matrix(threshold, score_matrix): """Returns a matrix indicating which pairs are good according to the given threshold""" matrix = [] for row in score_matrix: scores = [] for entry in row: if entry >= threshold: scores.append(1) else: scores.append(0) matrix.append(scores) return matrix
def convertd2b_(res, decimals): """Round the result.""" cad = str(round(res, decimals)) return cad
def validate(s): """Validate that a given square is valid..""" # START SOLUTION # Base case: it's a simple square, so as long as it's either 0 or 1 # it's valid. if type(s) == int: return s == 0 or s == 1 # Base case: if not a list of 4, it's invalid if type(s) != list or len(s) != 4: return False # It's a split square: # Recurse intro quadrants and check each, "failing fast". # # Note: alternately, we could write the rest of this function in # one pretty line by using the awesome `all(iterable)` function: # # return all(validate(q) for q in s) for q in s: if not validate(q): return False return True # It's questionable style, as it's probably less readable, but # this entire funtion could be written as # # return ( # (type(s) == int and (s == 0 or s == 1)) or # (type(s) == list and # len(s) == 4 and # all(validate(q) for q in s) # ) # END SOLUTION
def _get_model(vehicle): """Clean the model field. Best guess.""" model = vehicle['model'] model = model.replace(vehicle['year'], '') model = model.replace(vehicle['make'], '') return model.strip().split(' ')[0]
def get_numeric_value(string_value): """ parses string_value and returns only number-like part """ num_chars = ['.', '+', '-'] number = '' for c in string_value: if c.isdigit() or c in num_chars: number += c return number
def time_formatter(milliseconds: int) -> str: """Inputs time in milliseconds, to get beautified time, as string""" seconds, milliseconds = divmod(int(milliseconds), 1000) minutes, seconds = divmod(seconds, 60) hours, minutes = divmod(minutes, 60) days, hours = divmod(hours, 24) tmp = ( ((str(days) + "d, ") if days else "") + ((str(hours) + "h, ") if hours else "") + ((str(minutes) + "m, ") if minutes else "") + ((str(seconds) + "s, ") if seconds else "") + ((str(milliseconds) + "ms, ") if milliseconds else "") ) return tmp[:-2]
def group(requestContext, *seriesLists): """ Takes an arbitrary number of seriesLists and adds them to a single seriesList. This is used to pass multiple seriesLists to a function which only takes one """ seriesGroup = [] for s in seriesLists: seriesGroup.extend(s) return seriesGroup
def parse_mask(value, mask_enum): """Parse a mask into indivitual mask values from enum. :params ``int`` value: (Combined) mask value. :params ``enum.IntEnum`` mask_enum: Enum of all possible mask values. :returns: ``list`` - List of enum values and optional remainder. """ masks = [] for mask in mask_enum: if value & mask: masks.append(mask.name) value ^= mask if value: masks.append(hex(value)) return masks
def color_negative_red(value): """ Colors elements in a dateframe green if positive and red if negative. Does not color NaN values. """ if value < 0: color = "red" elif value > 0: color = "green" else: color = "black" return "color: %s" % color
def fib(n): """prints fibonacci sequence pass a number to the function fib(x) """ a, b = 0, 1 #Note multiple assignment! counter = 1 while counter < n: print (a, end=' ') a, b = b, a+b counter += 1 print(a) print(__name__) return(0)
def intersection(lst1, lst2): """returns the intersection between two lists""" if (lst1 == None or lst2 == None): return [] lst3 = [value for value in lst1 if value in lst2] return lst3
def _count_dependencies(graph): """Assigns the total number of dependencies to each node.""" dependency_counter = {parent: 0 for parent in graph} for current_node in graph: stack = [[current_node, iter(graph[current_node])]] visited = set() visited.add(current_node) while stack: parent, children_iter = stack[-1] try: child = next(children_iter) if child not in visited: visited.add(child) dependency_counter[current_node] += 1 stack.append([child, iter(graph[child])]) except StopIteration: stack.pop() return dependency_counter
def y(filen): """ Returns the integer in the filename 'filen'. For example, for 'image_13.npy', this function returns 13 as an integer """ cname = '' for c in filen: if c.isdigit(): cname += c return int(cname)
def create_image(src, size=None): """Creates HTML image tag. Optionaly, twitter size param can be passed.""" if size is None: return "<image src=\"{}\" />".format(src) else: return "<image src=\"{}:{}\" />".format(src, size)
def order_columns(pks, columns): """ Orders column list to include primary keys first and then non primary key columns :param pks: primary key list :param columns: columns :return: primary key columns + non primary key columns ordered """ pk_list = [] non_pk_list = [] for c in columns: for pk in pks: if c.get("name") == pk: pk_list.append(c) break elif pks[-1] == pk: non_pk_list.append(c) return pk_list+non_pk_list
def cleanfield(value): """ remove spaces """ # values like 0 should not be converted to None if value is None or value.strip() == "": return None value = str(value) value = value.strip() return value
def note2num(note_str): """ The function NOTE2NUM takes in one string called NOTE_STR and returns the tone number associated to the given note. """ # Below is the note dictionary. We will add to this as # we process each jazz standard. note_list = {'C':0, 'C#': 1, 'D-':1, 'D':2, 'D#':3, 'E-':3, 'E':4, 'E#':5, 'F-':4, 'F':5, 'F#':6, 'G-':6, 'G':7, 'G#':8, 'A-':8, 'A':9, 'A#':10, 'B-':10, 'B':11, 'B#':0, 'C-':11} note_out = note_list.get(note_str,'None') if type(note_out) == type('None'): note_out = 'Add ' + note_str + ' to the note list.' return note_out
def float2latex(f, ndigits=1): """ USAGE ----- texstr = float2latex(f, ndigits=1) Converts a float input into a latex-formatted string with 'ndigits' (defaults to 1). Adapted from: http://stackoverflow.com/questions/13490292/format-number-using-latex-notation-in-python """ float_str = "{0:.%se}"%ndigits float_str = float_str.format(f) base, exponent = float_str.split("e") return "${0} \times 10^{{{1}}}$".format(base, int(exponent))
def get_help(format, tool_name, rule_id, test_name, issue_dict): """ Constructs a full description for the rule :param format: text or markdown :param tool_name: :param rule_id: :param test_name: :param issue_dict: :return: Help text """ issue_text = issue_dict.get("issue_text", "") if format == "text": issue_text = issue_text.replace("`", "") return issue_text
def _name_clash(candidate, path_list, allowed_occurences=1): """Determine if candidate leads to a name clash. Args: candidate (tuple): Tuple with parts of a path. path_list (list): List of pathlib.Paths. allowed_occurences (int): How often a name can occur before we call it a clash. Returns: bool """ duplicate_counter = -allowed_occurences for path in path_list: parts = tuple(reversed(path.parts)) if len(parts) >= len(candidate) and parts[: len(candidate)] == candidate: duplicate_counter += 1 return duplicate_counter > 0
def _get_timeout_ms(timeout, payload_size): """Conservatively assume minimum 5 seconds or 3 seconds per 1MB.""" if timeout is not None: return int(1000 * timeout) return int(1000 * max(3 * payload_size / 1024 / 1024, 5))
def parse_port_pin(name_str): """Parses a string and returns a (port-num, pin-num) tuple.""" if len(name_str) < 3: raise ValueError("Expecting pin name to be at least 3 charcters.") if name_str[0] != 'P': raise ValueError("Expecting pin name to start with P") if name_str[1] < 'A' or name_str[1] > 'K': raise ValueError("Expecting pin port to be between A and K") port = ord(name_str[1]) - ord('A') pin_str = name_str[2:] if not pin_str.isdigit(): raise ValueError("Expecting numeric pin number.") return (port, int(pin_str))
def _digi_bounds(fmt): """ Return min and max digital values for each format type. Parmeters --------- fmt : str, list The WFDB dat format, or a list of them. Returns ------- tuple (int, int) The min and max WFDB digital value per format type. """ if isinstance(fmt, list): return [_digi_bounds(f) for f in fmt] if fmt == '80': return (-128, 127) elif fmt == '212': return (-2048, 2047) elif fmt == '16': return (-32768, 32767) elif fmt == '24': return (-8388608, 8388607) elif fmt == '32': return (-2147483648, 2147483647)
def drill_for_value(record, fields): """Descends into record using an array of fields, returning final value or None""" result = record for f in fields: if result is None: break else: result = result.get(f) return result
def solution(n): """Returns the largest palindrome made from the product of two 3-digit numbers which is less than n. >>> solution(20000) 19591 >>> solution(30000) 29992 >>> solution(40000) 39893 """ # fetchs the next number for number in range(n - 1, 10000, -1): # converts number into string. strNumber = str(number) # checks whether 'strNumber' is a palindrome. if strNumber == strNumber[::-1]: divisor = 999 # if 'number' is a product of two 3-digit numbers # then number is the answer otherwise fetch next number. while divisor != 99: if (number % divisor == 0) and (len(str(int(number / divisor))) == 3): return number divisor -= 1
def max_hcc_value(nelem, nlayers, slope): """Computes the correct hcc value""" dx = 1.0/nelem dz = 1.0/nlayers hcc = slope*dx/dz return hcc
def duration_cost(y, y_pred): """" A Loss function for duration costs """ return (y - y_pred)**2
def postprocess_html(html, metadata): """Returns processed HTML to fit into the slide template format.""" if metadata.get('build_lists') and metadata['build_lists'] == 'true': html = html.replace('<ul>', '<ul class="build">') html = html.replace('<ol>', '<ol class="build">') return html
def check_image_extension(filename): """Checks filename extension.""" ext = ['.jpg', '.jpeg', '.png'] for e in ext: if filename.endswith(e): return True return False
def group_mod(group_id, gtype, buckets): """ group mod """ return dict( type=gtype, group_id=group_id, buckets=buckets() if callable(buckets) else buckets, )
def set_difference(tree, context, attribs): """A meta-feature that will produce the set difference of two boolean features (will have keys set to 1 only for those features that occur in the first set but not in the second). @rtype: dict @return: dictionary with keys for key occurring with the first feature but not the second, and \ keys equal to 1 """ ret = {} for key, val in context['feats'][attribs[0]].items(): if key not in context['feats'][attribs[1]]: ret[key] = val return ret
def _value_properties_are_referenced(val): """ val is a dictionary :param val: :return: True/False """ if ((u'properties' in val.keys()) and (u'$ref' in val['properties'].keys())): return True return False
def linkers(sequence_descriptors): """Provide possible linker component input data.""" return [ { "component_type": "linker_sequence", "linker_sequence": sequence_descriptors[0] }, { "component_type": "linker_sequence", "linker_sequence": sequence_descriptors[1] }, { "component_type": "linker_sequence", "linker_sequence": sequence_descriptors[2] } ]
def knuth_morris_pratt(s, t): """Find a substring by Knuth-Morris-Pratt :param s: the haystack string :param t: the needle string :returns: index i such that s[i: i + len(t)] == t, or -1 :complexity: O(len(s) + len(t)) """ assert t != '' len_s = len(s) len_t = len(t) r = [0] * len_t j = r[0] = -1 for i in range(1, len_t): while j >= 0 and t[i - 1] != t[j]: j = r[j] j += 1 r[i] = j j = 0 for i in range(len_s): while j >= 0 and s[i] != t[j]: j = r[j] j += 1 if j == len_t: return i - len_t + 1 return -1
def _invalid_particle_errmsg(argument, mass_numb=None, Z=None): """ Return an appropriate error message for an `~plasmapy.utils.InvalidParticleError`. """ errmsg = f"The argument {repr(argument)} " if mass_numb is not None or Z is not None: errmsg += "with " if mass_numb is not None: errmsg += f"mass_numb = {repr(mass_numb)} " if mass_numb is not None and Z is not None: errmsg += "and " if Z is not None: errmsg += f"integer charge Z = {repr(Z)} " errmsg += "does not correspond to a valid particle." return errmsg
def _standardize_markups(markups): """ When terminals dont support the extended set of markup values """ newmarkups = [] if markups: if not isinstance(markups, list): markups = [markups] for markup in markups: if markup > 90: newmarkups.append(markup-60) else: newmarkups.append(markup) return newmarkups
def parse_tuple(my_str): """ Parse input parameters which can be tuples. """ # remove any kind of parenthesis for c in (")", "]", "}"): my_str = my_str.rstrip(c) for c in ("(", "[", "{"): my_str = my_str.lstrip(c) # split tuple elements if any str_list = my_str.split(",") # remove trailing whitespaces str_list = [s.rstrip() for s in str_list] str_list = [s.lstrip() for s in str_list] return str_list
def get_file(filename, result): """Get a file by its filename from the results list.""" return next((f for f in result if f['filename'] == filename), None)
def cpp_type_name(type_as_str): """Returns a human readable form of the cpp type.""" return { 'uint8_t': 'UInt8', 'uint16_t': 'UInt16', 'uint32_t': 'UInt32', 'uint64_t': 'UInt64', 'int8_t': 'Int8', 'int16_t': 'Int16', 'int32_t': 'Int32', 'int64_t': 'Int64', 'bool': 'Boolean', 'float': 'FloatingPoint32', 'double': 'FloatingPoint64', 'const char *': 'UTF8String', 'nl::SerializedByteString': 'ByteString', }.setdefault(type_as_str, 'Structure')
def response_plain_text_promt(output, reprompt_text, endsession): """ create a simple json plain text response """ return { 'outputSpeech': { 'type': 'PlainText', 'text': output }, 'reprompt': { 'outputSpeech': { 'type': 'PlainText', 'text': reprompt_text } }, 'shouldEndSession': endsession }
def filter_empty(input_list, target_list): """Filter empty inputs or targets Arguments: input_list {list} -- input list target_list {list} -- target list Returns: input_list, target_list -- data after filter """ return_input, return_target = [], [] for inp, tar in zip(input_list, target_list): if inp and tar: return_input.append(inp) return_target.append(tar) return return_input, return_target
def _process_app_path(app_path): """ Return an app path HTML argument to add to a Bokeh server URL. Args: app_path (str) : The app path to add. If the app path is ``/`` then it will be ignored and an empty string returned. """ if app_path == "/": return "" return "&bokeh-app-path=" + app_path
def encode_string_to_url(str): """ takes as string (like a name) and replaces the spaces for underscores to make a string for a url :param str: string :return: encoded string with underscores for spaces >>> encode_string_to_url('hello') 'hello' >>> encode_string_to_url('hello world') 'hello_world' >>> encode_string_to_url(' hello world ') 'hello_world' """ s = str.strip() return s.replace(' ', '_')
def formatted_number(number): """try to format a number to formatted string with thousands""" try: number = int(number) if number < 0: return '-' + formatted_number(-number) result = '' while number >= 1000: number, number2 = divmod(number, 1000) result = ",%03d%s" % (number2, result) return "%d%s" % (number, result) except Exception: return ""
def obj_to_dict(obj, *args, _build=True, _module=None, _name=None, _submodule=None, **kwargs): """Encodes an object in a dictionary for serialization. Args: obj: The object to encode in the dictionary. Other parameters: _build (bool): Whether the object is to be built on deserialization. _module (str): Custom name for the module containing the object. _name (str): Custom name of the object. _submodule(str): Name of a submodule (e.g. the class holding a classmethod). Only used when defined. args: If the object is to be built, the arguments to give on creation. kwargs: If the object is to be built, the keyword arguments to give on creation. Returns: dict: The dictionary encoding the object. """ d = { "_build": _build, "__module__": _module if _module else obj.__class__.__module__, "__name__": _name if _name else obj.__class__.__name__, } if _build: d["__args__"] = args d["__kwargs__"] = kwargs if _submodule: d["__submodule__"] = _submodule return d
def as_unicode(b): """ Convert a byte string to a unicode string :param b: byte string :return: unicode string """ try: b = b.decode() except (AttributeError, UnicodeDecodeError): pass return b
def largest_product(number, series): """Calculate the largest product from continuous substring of given length.""" if len(number) < series or series < 0: raise ValueError if not series: return 1 maximum = 0 for i in range(len(number) + 1 - series): partial_sum = int(number[i]) for j in number[i + 1:i + series]: partial_sum *= int(j) maximum = max(maximum, partial_sum) return maximum
def uniquecounts(rows): """ Returns the belonging of the instances to the classes param rows -- the dataset in the current branch, the class is the argument in the last column """ results = {} for row in rows: r = row[len(row) - 1] if r not in results: results[r] = 0 results[r] += 1 return results
def get_positions(start_idx, end_idx, length): """ Get subj/obj position sequence. """ return list(range(-start_idx, 0)) + [0]*(end_idx - start_idx + 1) + list(range(1, length-end_idx))
def invert_mask(bin_mask: str) -> str: """ Return inverted binary mask from binary mask (bin_mask). >>> invert_mask('11111111.11111111.11111111.00000000') '00000000.00000000.00000000.11111111' """ inverted_mask = '' for char in bin_mask: if char == "1": inverted_mask += '0' elif char == '0': inverted_mask += '1' else: inverted_mask += '.' return inverted_mask
def score_to_flag(score: float): """Classify an inappropriateness probability into discreet categories. "param score: float (0.0 -> 1.0), required. Returns: int: 0 = OK, 1 = REVIEW, 2 = BLOCK """ assert isinstance(score, float), f'score type ({type(score)}) not float.' assert score >= 0.0 and score <= 1.0, \ f'Score ({score}) outside acceptable range (0->1).' if score < 0.2: return 0 elif score < 0.95: return 1 else: return 2
def test_for_blank_line(source: str) -> bool: """ Returns True if 'source' is effectively a blank line, either "\n", " ", or "", or any combination thereof. Returns False, otherwise. """ return not bool(source.strip())
def sql_cleaner(s): """Replace period and space with underscore Args: s (string): string to be cleaned Returns: string: cleaned sql string """ return s.replace(".","_").replace(" ","_")
def clean_up_spacing(sentence: str) -> str: """Clean up leading and trailing space characters from the string. :param sentence: str - a sentence to clean of leading and trailing space characters. :return: str - a sentence that has been cleaned of leading and trailing space characters. """ return sentence.lstrip().rstrip()
def wfc3_biasfile_filter(kmap): """Filter to customize WFC3 BIASFILE for hst_gentools/gen_rmap.py. Adds precondition_header() hook to rmap header. Driven by CDBS special case code. """ header_additions = { "hooks" : { "precondition_header" : "precondition_header_wfc3_biasfile_v1", }, } return kmap, header_additions
def predecessors_query(var_name, node_id, node_label, edge_label, predecessor_label=None): """Generate query for getting the ids of all the predecessors of a node. Parameters ---------- var_name Name of the variable corresponding to the node to match node_id Id of the node to match node_label Label of the node to match edge_label Label of the edge to match predecessor_label Label of the predecessors we want to find. node_label if None. """ if predecessor_label is None: predecessor_label = node_label query = ( "OPTIONAL MATCH (pred:{})-[:{}]-> (n:{} {{id : '{}'}})\n".format( predecessor_label, edge_label, node_label, node_id) + "RETURN pred.id as pred" ) return query
def char2code(c): """Convert character to unicode numeric character reference. """ try: return "&#%03d;" % ord(c) except Exception: # fallback; pylint: disable=broad-except return c
def write_file(file_to_write, text): """Write a file specified by 'file_to_write' and returns (True,NOne) in case of success or (False, <error message>) in case of failure""" try: f = open(file_to_write, 'w') f.write(text) f.close() except Exception as e: return (False, str(e)) return (True, None)
def get_fashion_mnist_labels(labels): #@save """Return text labels for the Fashion-MNIST dataset.""" text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot'] return [text_labels[int(i)] for i in labels]
def two_arrays(k, A, B): """Hackerrank Problem: https://www.hackerrank.com/challenges/two-arrays/problem Consider two n-element arrays of integers, A = [A[0], A[1], ...., A[n-1]] and B = [B[0], B[1], ..., B[n-1]]. You want to permute them into some A' and B' such that the relation A'[i] + B'[i] >= k holds for all i where 0 <= i < n. For example, if A = [0, 1], B = [0, 2], and k = 1, a valid A', B' satisfying our relation would be A' = [1, 0] and B' = [0, 2], 1 + 0 >= 1 and 0 + 2 >= 1. You are given q queries consisting of A, B, and k. For each query, print YES on a new line if some permutation A', B' satisfying the relation above exists. Otherwise, print NO. Solve: We sort the two arrays, A in ascending order, and B in descending order, so we can iterate through once and compare each position i to see if A[i] + B[i] is greater or equal to k. Args: k (int): an integer A (list): first array of integers B (list): second array of integers Returns: str: "YES" or "NO" depending on whether we can find permutation A', B' that satisfy the relationship """ A.sort() B.sort(reverse=True) for i in range(len(A)): if A[i] + B[i] < k: return "NO" return "YES"
def matrix_to_string(matrix, header): """ Note: this function is from: http://mybravenewworld.wordpress.com/2010/09/19/print-tabular-data-nicely-using-python/ i modified it a bit. ;-) Return a pretty, aligned string representation of a nxm matrix. This representation can be used to print any tabular data, such as database results. It works by scanning the lengths of each element in each column, and determining the format string dynamically. @param matrix: Matrix representation (list with n rows of m elements). @param header: Optional tuple with header elements to be displayed. """ lengths = [] matrix = [header] + matrix for row in matrix: for column in row: i = row.index(column) cl = len(str(column)) try: ml = lengths[i] if cl > ml: lengths[i] = cl except IndexError: lengths.append(cl) lengths = tuple(lengths) format_string = "" for length in lengths: format_string += "%-" + str(length) + "s " format_string += "\n" matrix_str = "" for row in matrix: matrix_str += format_string % tuple(row) return matrix_str
def build_error_msg(error, code=10): """Builds an error message to add to the queue""" msg = { 'jsonrpc': '2.0', 'error': { 'code': code, 'message': f'{error}' }, 'id': 'app_error' } return msg
def from_wandb_format(config: dict) -> dict: """ Create a usable config from the wandb config format. Args: config: The config to convert. Returns: The converted config. """ wandb_config = {} for key, value in config.items(): if '.' in key: sub_keys = key.split('.') curr_dict = wandb_config for sub_key in sub_keys[:-1]: if sub_key not in curr_dict: curr_dict[sub_key] = {} curr_dict = curr_dict[sub_key] if isinstance(value, dict) and 'value' in value: curr_dict[sub_keys[-1]] = value['value'] else: curr_dict[sub_keys[-1]] = value elif isinstance(value, dict) and 'value' in value: wandb_config[key] = value['value'] else: wandb_config[key] = value return wandb_config
def clean_name(s): """Strip accents and remove interpunctuation.""" import unicodedata new = s.split("-")[0] return ''.join(c for c in unicodedata.normalize('NFD', new) if unicodedata.category(c) != 'Mn')
def next_with_custom_error(iterator, custom_error, *args): """Get next element of iterator and call a custom function when failing """ try: return next(iterator, None) except Exception as error: custom_error(error, *args) raise Exception(error)
def fib(n): """Print the Fibonacci series up to n.""" a, b = 0, 1 c = [] while b < n: c.append(b) a, b = b, a + b return c
def irb_decay_to_gate_error(irb_decay: float, rb_decay: float, dim: int): """ Eq. 4 of [IRB]_, which provides an estimate of the error of the interleaved gate, given both the observed interleaved and standard decay parameters. :param irb_decay: Observed decay parameter in irb experiment with desired gate interleaved between Cliffords :param rb_decay: Observed decay parameter in standard rb experiment. :param dim: Dimension of the Hilbert space, 2**num_qubits :return: Estimated gate error of the interleaved gate. """ return ((dim - 1) / dim) * (1 - irb_decay / rb_decay)
def extract_tables_from_query(sql_query: str): """ return a list of table_names """ return [word for word in sql_query.split(" ") if len(word.split(".")) == 3]
def convert_to_resource_name(string): """ Deletes commas and equal signs, replaces colons with dashes and * with "any" """ return ( string.replace(":", "-").replace(",", "").replace("=", "").replace("*", "any") )
def _parse_version(version): """Parses a VHDL version string or int, 2- or 4-digit style, to a full 4-digit version identifier integer.""" if version is None: return None version = int(version) if version < 70: version += 2000 elif version < 100: version += 1900 return version
def hello(name): """ Custom function. """ return {"text": "Hello, {}".format(name)}
def has_scheduled_methods(cls): """Decorator; use this on a class for which some methods have been decorated with :func:`schedule` or :func:`schedule_hint`. Those methods are then tagged with the attribute `__member_of__`, so that we may serialise and retrieve the correct method. This should be considered a patch to a flaw in the Python object model.""" for member in cls.__dict__.values(): if hasattr(member, '__wrapped__'): member.__wrapped__.__member_of__ = cls return cls
def tag_contents_xpath(tag, content): """Constructs an xpath matching element with tag containing content""" content = content.lower() return '//{}[contains(translate(*,"ABCDEFGHIJKLMNOPQRSTUVWXYZ","abcdefghijklmnopqrstuvwxyz"),"{}")]'.format(tag, content)
def P_Murn(V,a): """ As :py:func:`E_MurnV` but input parameters are given as a single list *a=[a0,a1,a2,a3]* and it returns the pressure not the energy from the EOS. """ return a[2]/a[3]*(pow(a[1]/V,a[3])-1.0)
def pullCountryName(countryObj): """ Pull out the country name from a country id. If there's no "name" property in the object, returns null """ try: return(countryObj['name']) except: pass
def build_note(text, level=1, limit=180, strip=True, keyword="NOTE"): """ Format a note for gedcom output """ note = [] key = int(level) tag = keyword data = text if strip: data = data.strip() while data != "": index = limit if len(data) < limit: index = len(data) else: while data[index - 1] == " " and index > 0: index = index - 1 chunk = data[:index] data = data[index:] entry = "{0} {1} {2}".format(key, tag, chunk) note.append(entry) tag = "CONC" key = int(level) + 1 return note
def isAncestor(types, name, base): """ Returns true if 'base' is an ancestor of 'name'. Particularly useful for checking if a given Vulkan type descends from VkDevice or VkInstance. """ if name == base: return True type = types.get(name) if type is None: return False parent = type.get('parent') if not parent: return False return isAncestor(types, parent, base)
def lookuptable_decoding(training_results, real_results): """ Calculates the logical error probability using postselection decoding. This postselects all results with trivial syndrome. Args: training_results: A results dictionary, as produced by the `process_results` method of a code. real_results: A results dictionary, as produced by the `process_results` method of a code. Returns: logical_prob: Dictionary of logical error probabilities for each of the encoded logical states whose results were given in the input. Additional information: Given a two dictionaries of results, as produced by a code object, thelogical error probability is calculated for lookup table decoding. This is done using `training_results` as a guide to which syndrome is most probable for each logical value, and the probability is calculated for the results in `real_results`. """ logical_prob = {} for log in real_results: shots = 0 incorrect_shots = 0 for string in real_results[log]: p = {} for testlog in ['0', '1']: if string in training_results[testlog]: p[testlog] = training_results[testlog][string] else: p[testlog] = 0 shots += real_results[log][string] if p['1' * (log == '0') + '0' * (log == '1')] > p[log]: incorrect_shots += real_results[log][string] logical_prob[log] = incorrect_shots / shots return logical_prob