content
stringlengths
42
6.51k
def _dat_str(day, month, year): """Create date string """ dd = int(day) if dd < 10: sd = '0' + str(dd) else: sd = str(dd) mm = int(month) if mm < 10: sm = '0' + str(mm) else: sm = str(mm) sy = str(int(year)) return sd + '.' + sm + '.' + sy
def nonsmall_diffs(linelist1, linelist2, small=0.0): """ Return True if line lists differ significantly; otherwise return False. Significant numerical difference means one or more numbers differ (between linelist1 and linelist2) by more than the specified small amount. """ # embedded function used only in nonsmall_diffs function def isfloat(value): """ Return True if value can be cast to float; otherwise return False. """ try: float(value) return True except ValueError: return False # begin nonsmall_diffs logic assert isinstance(linelist1, list) assert isinstance(linelist2, list) if len(linelist1) != len(linelist2): return True assert 0.0 <= small <= 1.0 epsilon = 1e-6 smallamt = small + epsilon for line1, line2 in zip(linelist1, linelist2): if line1 == line2: continue else: tokens1 = line1.replace(',', '').split() tokens2 = line2.replace(',', '').split() for tok1, tok2 in zip(tokens1, tokens2): tok1_isfloat = isfloat(tok1) tok2_isfloat = isfloat(tok2) if tok1_isfloat and tok2_isfloat: if abs(float(tok1) - float(tok2)) <= smallamt: continue else: return True elif not tok1_isfloat and not tok2_isfloat: if tok1 == tok2: continue else: return True else: return True return False
def _model_name(name): """ Extracts the main component of a model, removes suffixes such ``Classifier``, ``Regressor``, ``CV``. @param name string @return shorter string """ if name.startswith("Select"): return "Select" if name.startswith("Nu"): return "Nu" modif = 1 while modif > 0: modif = 0 for suf in ['Classifier', 'Regressor', 'CV', 'IC', 'Transformer']: if name.endswith(suf): name = name[:-len(suf)] modif += 1 return name
def bytes_to_string(bytes): """Converts number of bytes to a string. Based on old code here: http://dev.jmoiron.net/hg/weechat-plugins/file/tip/mp3.py Uses proc-like units (capital B, lowercase prefix). This only takes a few microseconds even for numbers in the terabytes. """ units = ['B', 'kB', 'mB', 'gB', 'tB'] negate = bytes < 0 if negate: bytes = -bytes factor = 0 while bytes / (1024.0 ** (factor + 1)) >= 1: factor += 1 return '%s%0.1f %s' % \ ('-' if negate else '', bytes / (1024.0 ** factor), units[factor])
def fib(n): """This function returns the nth Fibonacci numbers.""" i = 0 # variable i = the first fibonacci number j = 1 # variable j = the second fibonacci number n = n - 1 # variable n = n - 1 while n >= 0: # while n is greater than 0 i, j = j, i + j # 0, 1 = 1, 0 + 1 n = n - 1 # we want the script to add the number preceeding it return i
def qte(lne): """Quotes the given string""" return "'" + lne + "'"
def arithmetic_sum_n(a_1, d, n): """Calculate the sum of an arithmetic series. Parameters: a_1 The first element of the series d The difference between elements n The number of elements in the series to sum Return: The sum of n numbers in the arithmetic series. """ a_n = a_1 + d*(n - 1) return n*(a_1 + a_n) // 2
def measure(function, xs, ys, popt, weights): """ measure the quality of a fit """ m = 0 n = 0 for x in xs: try: if len(popt) == 2: m += (ys[n] - function(x, popt[0], popt[1]))**2 * weights[n] elif len(popt) == 3: m += (ys[n] - function(x, popt[0], popt[1], popt[2]))**2 * weights[n] else: raise NotImplementedError n += 1 except IndexError: raise RuntimeError('y does not exist for x = ', x, ' this should not happen') return m
def get_matching_entities_in_inning(entry, inning, entities): """ Method to get matching entities in an inning with the summary :param entry: :param inning: :param entities: :return: """ plays = entry["play_by_play"] entities_in_inning = set() for top_bottom in ["top", "bottom"]: if str(inning) in plays: # inning may be of a previous match like "He got the victory Friday when he got David Ortiz to hit into an inning-ending double play in the 11th inning" inning_plays = plays[str(inning)][top_bottom] for inning_play in inning_plays: for attrib in ["batter", "pitcher", "fielder_error"]: if attrib in inning_play: entities_in_inning.add(inning_play[attrib]) for attrib in ["scorers", "b1", "b2", "b3"]: if attrib in inning_play and len(inning_play[attrib]) > 0 and inning_play[attrib][0] != "N/A": for baserunner_instance in inning_play[attrib]: entities_in_inning.add(baserunner_instance) intersection = entities_in_inning.intersection(entities) return intersection
def cleanLocals(locals_dict) : """ When passed a locals() dictionary, clean out all of the hidden keys and return """ return dict((k,v) for (k,v) in locals_dict.items() if not k.startswith("__") and k != "self")
def _unresr_input(endfin, pendfin, pendfout, mat, temperatures=[293.6], sig0=[1e10], iprint=False, **kwargs): """Write unresr input. Parameters ---------- endfin : `int` tape number for input ENDF-6 file pendfin : `int` tape number for input PENDF file pendfout : `int` tape number for output PENDF file mat : `int` MAT number temperatures : iterable of `float` iterable of temperature values in K (default is 293.6 K) sig0 : iterable of `float` iterable of dilution values in barns (default is 1e10 b) iprint : `bool` print option (default is `False`) Returns ------- `str` unresr input text """ text = ["unresr"] text += ["{:d} {:d} {:d} /".format(endfin, pendfin, pendfout)] text += ["{:d} {:d} {:d} {:d} /".format(mat, len(temperatures), len(sig0), int(iprint))] text += [" ".join(map("{:.1f}".format, temperatures)) + " /"] text += [" ".join(map("{:.2E}".format, sig0)) + " /"] text += ["0 /"] return "\n".join(text) + "\n"
def _self_updating_js(ident): """Create JS code for a Callback that keeps a 1d CDS selection up to date""" return """ // Define a callback to capture errors on the Python side function callback(msg){ console.log("Python callback returned unexpected message:", msg) } callbacks = {iopub: {output: callback}}; var selected_str = JSON.stringify(cb_obj.selected['1d']) var cmd = "cds_hack['""" + ident + """']['source'].selected['1d'].update(" + selected_str + ")" // Execute the command on the Python kernel var kernel = IPython.notebook.kernel; kernel.execute(cmd, callbacks, {silent : false}); var cmd = "for link in cds_hack['""" + ident + """']['linked']: link.update()" kernel.execute(cmd, callbacks, {silent : false}); """
def generate_attributes(num_replications, num_global_memory_banks=32): """ Generates the kernel attributes for the global memory. They specify in which global memory the buffer is located. The buffers will be placed using a round robin scheme using the available global memory banks and the number of replications that should be generated (e.g. if a global memory contains multiple banks) @param num_replications Number okernel replications @param num_global_memory_banks Number of global memory banks that should be used for generation @return Array of strings that contain the attributes for every kernel """ return [ "__attribute__((buffer_location(\"host\")))" for _ in range(num_replications)]
def clean_name(_name): """Remove unsupported characters from field names. Converts any "desirable" seperators to underscore, then removes all characters that are unsupported in Python class variable names. Also removes leading numbers underscores. Arguments: _name {str} -- Initial field name Returns: {str} -- Cleaned field name """ import re _replaced = re.sub(r"[\-|\.|\@|\~|\:\/|\s]", "_", _name) _scrubbed = "".join(re.findall(r"([a-zA-Z]\w+|\_+)", _replaced)) return _scrubbed.lower()
def data_error(campo, msg): """ Metodo que retona o dict padrao de error para o validade_data campo: nome do campo com erro msg: Mensagem do error. return: Retorna um dict """ data = { "error_message": { campo: [msg] } } return data
def filterMatches(matches, prevKeypoints, currKeypoints, threshold=30): """ This function discards matches between consecutive frames based on the distance between keypoints, which has to be below a certain threshold, and based on the fact whether their scale increases (keypoints that don't increase in scale are not useful for TTC). In the paper, the threshold for the L2 distance (Euclidean) was 0.25. For ORB, most inliers are included (and most outliers excluded) with a threshold of 64. """ # queryIdx refers to prevKeypoints, trainIdx refers to currKeypoints filteredMatches = list(filter(lambda m: m.distance < threshold and currKeypoints[m.trainIdx].size > prevKeypoints[m.queryIdx].size, matches)) # Check if length is reasonable # print(len(filteredMatches)) return filteredMatches
def get_prefix4WOA18(input): """ Convert WOA18 variable name string into the prefix used on files """ d = { 'temperature': 't', 'salinity': 's', 'phosphate': 'p', 'nitrate':'n', 'oxygen':'o', 'silicate':'i', } return d[input]
def gen_addrmap(regmap): """ Collect all addresses as keys to the addrmap dict. Values are the names. """ addrmap = dict() for key, item in regmap.items(): if "base_addr" in item: addr = item["base_addr"] aw = item["addr_width"] addri = int(str(addr), 0) if addri in addrmap: raise ValueError("Double assigned localbus address!") if addri >= (0x01000000 >> 2): raise ValueError("Localbus address outside valid range!") if 0 < aw <= 6: # cutoff array generation if length > 32. for ix in range(1 << aw): addrmap[addri + ix] = key + '_' + str(ix) elif aw == 0: addrmap[addri] = key else: print("Large array ignored (len>32), key: {}".format(key)) return addrmap
def foundInErrorMessages(message): """validate if the message is found in the error messages Args: message (String): message to find Returns: bool: found in the error messages """ return message in [ "2001: NO EXISTEN REGISTROS EN LA CONSULTA" ]
def midi_filter(midi_info): """Return True for qualified midi files and False for unwanted ones""" if midi_info['first_beat_time'] > 0.0: return False elif midi_info['num_time_signature_change'] > 1: return False elif midi_info['time_signature'] not in ['4/4']: return False return True
def midpointint(f,a,b,n): """This function takes in a function and 3 integer parameters representing an interval and the number of subintervals to be made in that interval and returns the approximate value of the integral of the given function in the given interval """ h = (b-a)/float(n) s = 0.0 for i in range (n+1): s += f(a - (1.0/2)*h + i*h) midpoint = h*s return midpoint
def replace_conditional(record: dict, field: str, value: str, replacement: str): """ Function to conditionally replace a value in a field. Parameters ---------- record : dict Input record. field : str Key of field to be conditionally altered. value : str Value to identify and replace. replacement : str Value to insert on replacement. Returns ------- type Record with specified key altered if `record[key] == value`. Otherwise, the original record is returned. """ if record[field] == value: record[field] = replacement return(record)
def strbool(val): """ Create bool object from string. None is returned if input is not derterminable. @param val: the string to check. @type val: str @return: True/False/None. @rtype: bool """ try: return bool(int(val)) except: val = val.lower() if val == 'true' or val == 'on': return True elif val == 'false' or val == 'off': return False else: return None
def table(title, headers, data_node): """ Returns a dictionary representing a new table element. Tables are specified with two main pieces of information, the headers and the data to put into the table. Rendering of the table is the responsibility of the Javascript in the resources directory. When the data does not line up with the headers given this should be handled within the Javascript itself, not here. Args: title: The title to display headers: The columns to put into the table data_node: A dictionary with the form:: {'case' : {'subcase' : { 'header' : data } } } Returns: A dictionary with the metadata specifying that it is to be rendered as a table. """ tb = { 'Type': 'Table', 'Title': title, 'Headers': headers, 'Data': data_node, } return tb
def get_alert_queries(entities: list) -> list: """ create the value for the Prometheus query parameter based on the entities This is also very rudimentary and must probably adapted to each ones use case. """ instance_entities = [e for e in entities if e["type"] == "instance"] if instance_entities: queries = [] for entity in instance_entities: instance = entity["entity"].replace(" ", "") queries.append(f'ALERTS{{instance="{instance}"}}') return queries # if we do not have specific search targets we use the less specific ones service_name_entities = [e for e in entities if e["type"] == "service-name"] if service_name_entities: queries = [] for entity in service_name_entities: queries.append(f'ALERTS{{service="{entity["entity"]}"}}') return queries return []
def set_payment_id_to_tx_extra_nonce(payment_id): """ Sets payment ID to the extra :param payment_id: :return: """ return b"\x00" + payment_id
def is_empty(value): """ Check whether value is empty """ if (type(value) == str and value.strip() == "") or (type(value) == list and len(value) == 0): return(True) else: return(False)
def _curl_https_get_code(host): """ Create a curl command for a given host that outputs HTTP status code. """ return ( '/opt/mesosphere/bin/curl ' '-s -o /dev/null -w "%{{http_code}}" ' 'https://{host}' ).format(host=host)
def _get_expected_types(keyword_type, scope_type): """ Get the actual type of the keyword based on the defined keyword types, and the scope type that is only useful when the defined type is 'inherited'. """ if keyword_type == 'inherited': assert scope_type is not None, ("Scope type should not be None " "when checking inherited value") return [scope_type] if type(keyword_type) is list: return keyword_type else: return [keyword_type]
def is_valid_git_sha1(hash): """check if a string is a valid git sha1 string Input: hash: string to validate Output: True if the string has 40 characters and is an hexadecimal number, False otherwise. """ if len(hash) != 40: return False try: value = int(hash, 16) except ValueError: return False return True
def transformTernary(string, location, tokens): """Evaluates a ParseResult as a ternary expression""" # pylint: disable=eval-used return eval(str(tokens[2]) +" if " + str(tokens[0]) + " else " + str(tokens[4]))
def _union(xval, iunion, ix): """helper method for ``filter``""" import numpy as np if xval: if iunion: iunion = np.union1d(iunion, ix) else: pass return iunion
def build_all_reduce_device_prefixes(job_name, num_tasks): """Build list of device prefix names for all_reduce. Args: job_name: 'worker', 'ps' or 'localhost'. num_tasks: number of jobs across which device names should be generated. Returns: A list of device name prefix strings. Each element spells out the full host name without adding the device. e.g. '/job:worker/task:0' """ if job_name != 'localhost': return ['/job:%s/task:%d' % (job_name, d) for d in range(0, num_tasks)] else: assert num_tasks == 1 return ['/job:%s' % job_name]
def get_validations(action_id): """ Stub to return validations """ return [{ 'id': '43', 'action_id': '12345678901234567890123456', 'validation_name': 'It has shiny goodness', 'details': 'This was not very shiny.' }]
def next_batch(targ_capacity, curr_capacity, num_up_to_date, batch_size, min_in_service): """Return details of the next batch in a batched update. The result is a tuple containing the new size of the group and the number of members that may receive the new definition (by a combination of creating new members and updating existing ones). Inputs are the target size for the group, the current size of the group, the number of members that already have the latest definition, the batch size, and the minimum number of members to keep in service during a rolling update. """ assert num_up_to_date <= curr_capacity efft_min_sz = min(min_in_service, targ_capacity, curr_capacity) efft_bat_sz = min(batch_size, max(targ_capacity - num_up_to_date, 0)) new_capacity = efft_bat_sz + max(min(curr_capacity, targ_capacity - efft_bat_sz), efft_min_sz) return new_capacity, efft_bat_sz
def tap_up(tap, max_tap): """ Go to the next upper tap position """ if tap + 1 <= max_tap: return tap + 1 else: return tap
def get_per_dial_emb_file_path(data_dir: str, image_id: int, ext: str = 'npz') -> str: """ Call as get_npz_file_path(data_dir=data_dir, image_id=image_id) :param data_dir: :param image_id: :param ext: :return: """ file_path = f"{data_dir}/{str(image_id)}.{ext}" return file_path
def rttm2simple(rttm:list) -> list: """ Convert rttm-like list to a simple hypothesis list compatible with simple DER library Parameters ---------- rttm: list List of strings Returns ---------- list List of tuples containing unique ID, start and end time in seconds """ output = list() for line in rttm: _, _, _, start, duration, _, _, label, _, _ = line.split() end = float(start)+float(duration) output.append((f"{label}", float(start), end)) return output
def _upper_case_first_letter(name): """ Function that convert first character of name to UPPER case :param name: String which first character will be converted to UPPER case :return: String with first character in UPPER case """ indices = set([0]) return "".join(c.upper() if i in indices else c for i, c in enumerate(name))
def generator(self, arg, *args, **kwargs): """Just return the first arg as the results. Ignores any other params""" # print 'generator: {}'.format(arg) return arg
def _render_sources(dataset, tables): """Render the source part of a query. Args: dataset: the data set to fetch log data from. tables: the tables to fetch log data from. Returns: a string that represents the from part of a query. """ return "FROM " + ", ".join( ["[%s.%s]" % (dataset, table) for table in tables])
def get_uuid(data): """Returns the Universally unique identifier (UUID) of a managed object. It is expressed in the format of object type:object ID. For example, if a controller's object type number is "207" and the controller ID is "0A", the UUID is "207:0A". The UUID is often needed for querying performance statistics, for example. """ return '{}:{}'.format(data['TYPE'], data['ID'])
def clamp(min, max, value): """Restricts a number to be within a range. Also works for other ordered types such as Strings and Dates""" if max < min: raise ValueError( "\ min must not be greater than max in clamp(min, max, value)" ) if value > max: return max if value < min: return min return value
def _validate_requirements(requirements): """ Validate the requirements. Args: requirements(list): List of requirements Returns: List of strings of invalid requirements """ invalid_requirements = [] for requirement in requirements: invalid_params = [] if not requirement.get("namespace"): invalid_params.append("namespace") if not requirement.get("name"): invalid_params.append("name") if not requirement.get("display_name"): invalid_params.append("display_name") if "criteria" not in requirement: invalid_params.append("criteria") if invalid_params: invalid_requirements.append( "{requirement} has missing/invalid parameters: {params}".format( requirement=requirement, params=invalid_params, ) ) return invalid_requirements
def pull_service_id(arn): """ pulls the ecs service id from the full arn """ return arn.split('service/', 1)[-1]
def getBaseUrl(serverType, serverFqdn, serverPort): """Return the base url of the a server based on the protocol and port it is running on.""" # dict mapping SOAP server type to protocol type2ProtoDict = {'GSI': 'https', 'gsi': 'https', 'SSL': 'https', 'ssl': 'https', 'HTTP': 'http', 'http': 'http' } # make sure we recognize the server type if serverType not in type2ProtoDict: raise RuntimeError("Server type %s not recognized." % serverType) # get protocol from server type serverProtocol = type2ProtoDict[serverType] return serverProtocol + '://' + serverFqdn + ':%s' % str(serverPort)
def get_symlink_command(from_file, dest): """Returns a symlink command. :param from_file: :param dest: """ return 'ln -sf %s %s' % (from_file, dest)
def infer_angular_variance_spherical(var_r, phi_c, var_q_min): """Calculate angular variance given properties of a beam with quadratic phase corrected by a lens. The lens which removes the spherical component has focal length f = -k*var_r/(2*phi_c). This means that -phi_c is the phase of the quadratic component at r = var_r**0.5. The formula is (19) of Siegman IEEE J. Quantum Electronics, vol. 27 1991, with k = 2*pi/lambda. For consistency with the rest of the program, I use slightly different definitions. The derivation is on p120 of Dane's Fathom notebook 2. It is valid for any refractive index. Args: var_r (scalar): real space variance. phi_c (scalar): real-space curvature - see above. var_q_min (scalar): angular variance of the beam after its curvature has been removed. Returns: var_q (scalar): angular variance of the beam after the lens. """ var_q = var_q_min + 4*phi_c**2/var_r return var_q
def codepipeline_approval(message): """Uses Slack's Block Kit.""" console_link = message['consoleLink'] approval = message['approval'] pipeline_name = approval['pipelineName'] action_name = approval['actionName'] approval_review_link = approval['approvalReviewLink'] expires = approval['expires'] return ( { 'type': 'section', 'text': { 'type': 'plain_text', 'text': f'Pipeline "{pipeline_name}" is waiting for approval.', }, 'accessory': { 'type': 'button', 'text': { 'type': 'plain_text', 'text': 'Open in :aws: Console', 'emoji': True, }, 'url': console_link, }, }, { 'type': 'section', 'fields': [ { 'type': 'mrkdwn', 'text': f'*Action name*:\n{action_name}', }, { 'type': 'mrkdwn', 'text': f'*Expires:* {expires}', }, ], }, { 'type': 'actions', 'elements': [ { 'type': 'button', 'text': { 'type': 'plain_text', 'emoji': False, 'text': 'Review approve', }, 'style': 'primary', 'url': approval_review_link, }, ], }, )
def mask_to_list(mask): """Converts the specfied integer bitmask into a list of indexes of bits that are set in the mask.""" size = len(bin(mask)) - 2 # because of "0b" return [size - i - 1 for i in range(size) if mask & (1 << size - i - 1)]
def range_overlap(a_min, a_max, b_min, b_max): """ Check, if ranges [a_min, a_max] and [b_min, b_max] overlap. """ return (a_min <= b_max) and (b_min <= a_max)
def table_score(person, table): """ Returns score of table based on personal preferences and table data. see test() in score.py for more information. """ table_person = set(table['person']) friend = set(person.get('friend', [])).intersection(table_person) acquaintance = set(person.get('acquaintance', [])).intersection(table_person) enemy = set(person.get('enemy', [])).intersection(table_person) # default weight weight = person.get('weight', { 'friend' : 2, 'acquaintance' : 1, 'empty' : 0, 'stranger' : -1, 'enemy' : -2 }) if len(table_person) == 0: return weight['empty'] else: score = 0 score += len(friend) * weight['friend'] score += len(acquaintance) * weight['acquaintance'] score += len(enemy) * weight['enemy'] n_stranger = len(table_person) - len(friend) - len(acquaintance) - len(enemy) return score
def gcd(a, b): """Compute the greatest common divisor of a and b""" while b > 0: a, b = b, a % b return a
def str_candset(candset, names=None): """ nicely format a single committee """ if names is None: namedset = [str(c) for c in candset] else: namedset = [names[c] for c in candset] return "{" + ", ".join(map(str, namedset)) + "}"
def format_date_c19(date_in): """ Formats "m/d/y" to "yyyymmdd" """ month, day, year = date_in.split('/') return '20%s%02i%02i' % (year, int(month), int(day))
def compare_names(namepartsA, namepartsB): """Takes two name-parts lists (as lists of words) and returns a score.""" complement = set(namepartsA) ^ set(namepartsB) intersection = set(namepartsA) & set(namepartsB) score = float(len(intersection))/(len(intersection)+len(complement)) return score
def _column_tup_to_str(ind): """ Convert tuple of MultiIndex to string. Parameters ---------- ind : tuple ind[0]: either 'sleep' or 'activity' ind[1]: int that is the day number ind[2]: bool, True being light, False being dark Returns ------- output : str Conversion to a single string represnting info in tuple. E.g., ('activity', 6, True) gets converted to 'total seconds of activity in day 6'. """ if ind[0] == 'activity': string = 'total seconds of activity in ' elif ind[0] == 'sleep': string = 'total minutes of sleep in ' elif ind[0] == 'latency': string = 'minutes of sleep latency in ' else: raise RuntimeError('%s is invalid MultiIndex' % ind[0]) if ind[2]: return string + 'day ' + str(ind[1]) else: return string + 'night ' + str(ind[1])
def pkcs7_pad(data: bytes, blocksize: int = 16) -> bytes: """ Implements PKCS7 padding :param data: The data to pad :param blocksize: The block size to calculate padding based on :return: Padded data """ d = bytearray(data) padding_len = blocksize - len(d) % blocksize d.extend(padding_len for _ in range(padding_len)) return bytes(d)
def as_float(val): """ Tries to convert a string to an float. Returns None if string is empty """ try: return(float(val)) except ValueError: return(None)
def interpret_string_as_boolean(str_value): """ Tries to interpret string a boolean. Raises a Value error upon failure. """ if str_value in ("TRUE", "True", "true", "1"): return True elif str_value in ("False", "FALSE", "false", "0"): return False else: raise ValueError(f"Could not interpret '{str_value}' as a boolean. Valid values are: " f"TRUE, True, true, 1, FALSE, False, false, 0")
def bubble_sort(array, length: int = 0): """ :param array: the array to be sorted. :param length: the length of array. :return: sorted array. >>> import random >>> array = random.sample(range(-50, 50), 100) >>> bubble_sort(array) == sorted(array) True >>> import string >>> array = random.choices(string.ascii_letters + string.digits, k = 100) >>> bubble_sort(array) == sorted(array) True >>> array = [random.uniform(-50.0, 50.0) for i in range(100)] >>> bubble_sort(array) == sorted(array) True """ length = length or len(array) swapped = False for i in range(length - 1): if array[i] > array[i + 1]: array[i], array[i + 1] = ( array[i + 1], array[i], ) swapped = True return array if not swapped else bubble_sort(array, length - 1)
def levenshtein_distance(lhs, rhs): """Return the Levenshtein distance between two strings.""" if len(lhs) > len(rhs): rhs, lhs = lhs, rhs if not lhs: return len(rhs) prev = range(len(rhs) + 1) for lidx, lch in enumerate(lhs): curr = [lidx + 1] for ridx, rch in enumerate(rhs): cost = (lch != rch) * 2 curr.append(min(prev[ridx + 1] + 1, # deletion curr[ridx] + 1, # insertion prev[ridx] + cost)) # substitution prev = curr return prev[-1]
def new_state(current_state, direction): """ Calculates new heading from current heading and relative direction. """ new_axis = (current_state['axis']+1) & 1 multiplier = -1 if current_state['axis'] == 0 and direction == 1 or \ current_state['axis'] == 1 and direction == -1 else 1 new_sign = current_state['sign'] * multiplier return {'axis': new_axis, 'sign': new_sign}
def generate_parameter_field_number(parameter, used_indexes, field_name_suffix=""): """Get unique field number for field corresponding to this parameter in proto file. If field number is not stored in metadata of parameter, get the next unused integer value. """ field_name_key = f"grpc{field_name_suffix}_field_number" if field_name_key in parameter: field_number = parameter[field_name_key] else: field_number = next(i for i in range(1, 100) if i not in used_indexes) used_indexes.append(field_number) return field_number
def fixdate(longdate): """ Many podcasts use a super long date format that I hate. This will cut them down to a simple format. Here's the original: "Mon, 08 Jul 2019 00:00:00 -0800" """ tdate = longdate.split(" ") months = {"JAN" : "01", "FEB" : "02", "MAR" : "03", "APR" : "04", "MAY" : "05", "JUN" : "06", "JUL" : "07", "AUG" : "08", "SEP" : "09", "OCT" : "10", "NOV" : "11", "DEC" : "12"} return "%s-%s-%s" % (tdate[3], months[tdate[2].upper()], tdate[1])
def emptyCoords(): """Returns a unit square camera with LL corner at the origin.""" return [[(0.0, 0.0), (0.0, 0.0), (0.0, 0.0)]]
def percentile(sorted_values, p): """Calculate the percentile using the nearest rank method. >>> percentile([15, 20, 35, 40, 50], 50) 35 >>> percentile([15, 20, 35, 40, 50], 40) 20 >>> percentile([], 90) Traceback (most recent call last): ... ValueError: Too few data points (0) for 90th percentile """ size = len(sorted_values) idx = (p / 100.0) * size - 0.5 if idx < 0 or idx > size: raise ValueError('Too few data points ({}) for {}th percentile'.format(size, p)) return sorted_values[int(idx)]
def cast_list(*elements) -> list: """this method transform everything in a list, ex:\n cast_list("a","b") returns: ["a","b"]\n cast_list([1,2]) returns: [1,2]\n""" # when * is used all the arguments will be passed as tuple # means just one element were passed as arg if len(elements) == 1: # means nothing were passed as arg if elements[0] is None: return [] # means a tuple or list were passed as argument if elements[0].__class__ in [list, tuple]: # transform this in a list return list(elements[0]) # else will make a list with elements 0 # this is when a str or int is passed return [elements[0]] else: # if its a larger element, just cast with list return list(elements)
def _select_entries_param(list, param): """Takes a list of logfile_entry objects and returns a sub set of it. The sub set includes a entries that have a source attribute that occurs in the param argument. Positional arguments: list -- a list of logfile_entry objects param -- a list of strings, representing sources """ if not param or len(param) == 0: return [x for x in list] return [x for x in list if x.source in param]
def intersect(t1, t2): """Assumes t1 and t2 are tuples Returns a tuple containing elements that are in both t1 and t2""" result = () for e in t1: if e in t2: result += (e,) return result
def get_index_of_tuple(list_of_tuple, index_of_tuple, value): """ Determine how far through the list to find the value. If the value does not exist in the list, then return the length of the list. Args: list_of_tuple: a list of tuples i.e. [(index1, index2, index3)] index_of_tuple_1: which index in the tuple you want to compare the value to value: the value to search Return: the number of items in the list it has compared """ for index_of_list, tupl in enumerate(list_of_tuple): if tupl[index_of_tuple] == value: return index_of_list + 1 # could not find value in list_of_tuple, so return length of tuple return len(list_of_tuple)
def create_bzip2(archive, compression, cmd, verbosity, interactive, filenames): """Create a BZIP2 archive.""" cmdlist = [cmd, 'a'] if not interactive: cmdlist.append('-y') cmdlist.extend(['-tbzip2', '-mx=9', '--', archive]) cmdlist.extend(filenames) return cmdlist
def human_int(s): """Returns human readable time rounded to the second.""" s = int(round(s)) if s <= 60: return '%ds' % s m = s/60 if m <= 60: return '%dm%02ds' % (m, s%60) return '%dh%02dm%02ds' % (m/60, m%60, s%60)
def azLimit(az): """Limits azimuth to +/- 180""" azLim = (az + 180) % 360 if azLim < 0: azLim = azLim + 360 return azLim - 180
def clamp(val, minval, maxval): """Clamps the given value to lie between the given `minval` and `maxval`""" return min(max(val, minval), maxval)
def _flatten_list(this_list: list) -> list: """ Flatten nested lists. :param this_list: List to be flattened :return: Flattened list """ return [item for sublist in this_list for item in sublist]
def get_res_num(line): """get residue number from line starting with ATOM""" return int(line[22:26])
def ansicolored(string, colour): """ Show a colour output in the absence of termcolor """ colormap = { 'pink': '\033[95m', 'blue': '\033[94m', 'green': '\033[92m', 'yellow': '\033[93m', 'red': '\033[91m', 'end': '\033[0m', } return colormap.get(colour, '') + string + colormap.get('end')
def preTagProc(word): """Preprocessing before tagging""" # tagger tags almost everything as NE if it's in capitals if len(word) > 1 and word.isupper(): word = word.lower() return word
def valid_exception(error): """There are certain Exceptions raised that indicate successful authorization. This method will return True if one of those Exceptions is raised """ VALID_EXCEPTIONS = [ 'DryRunOperation', # S3 'NoSuchCORSConfiguration', 'ServerSideEncryptionConfigurationNotFoundError', 'NoSuchConfiguration', 'NoSuchLifecycleConfiguration', 'ReplicationConfigurationNotFoundError', 'NoSuchTagSet', 'NoSuchWebsiteConfiguration', 'NoSuchKey', # EC2 'InvalidTargetArn.Unknown', ] for exception in VALID_EXCEPTIONS: if exception in str(error): return True return False
def largest_permutation(k, arr): """Hackerrank Problem: https://www.hackerrank.com/challenges/largest-permutation/problem You are given an unordered array of unique integers incrementing from 1. You can swap any two elements a limited number of times. Determine the largest lexicographical value array that can be created by executing no more than the limited number of swaps. For example, if arr = [1, 2, 3, 4] and the maximum swaps k = 1, the following arrays can be formed by swapping the 1 with the other elements: [2,1,3,4] [3,2,1,4] [4,2,3,1] The highest value of the four (including the original) is [4, 2, 3, 1]. If k >= 2, we can swap to the highest possible value: [4, 3, 2, 1]. Solve: We store the values in a dictionary so we can access the position of each value in O(1) speed. We then iterate through the list starting at the highest value, and swap it IF it isn't already in the correct position. We then update the dictionary with the swapped values, and then proceed to the next value to swap. Args: k (int): Number of swaps to perform arr (list): List of numbers where 1 <= arr[i] <= n Returns: list containing the largest lexicographical value array after k swaps """ sorted_array = sorted(arr, reverse=True) vals = {v: idx for idx, v in enumerate(arr)} c = 0 m = len(arr) while k > 0 and c < len(arr): if arr[c] != sorted_array[c]: # Swap the current highest value swap = arr[c] arr[c] = m arr[vals[m]] = swap # Update dictionary prev = vals[m] vals[m] = c vals[swap] = prev k -= 1 m -= 1 c += 1 return arr
def fake_gauss_2(num_list): """Fake Gauss v3""" a = num_list[0] b = num_list[-1] return int(b * (a + b)/2)
def intersect(left, right): """:yaql:intersect Returns set with elements common to left and right sets. :signature: left.intersect(right) :receiverArg left: left set :argType left: set :arg right: right set :argType right: set :returnType: set .. code:: yaql> set(0, 1, 2).intersect(set(0, 1)) [0, 1] """ return left.intersection(right)
def MAE_dust_caponi(wavelengths, formulation="libya25"): """class defining dust Mass Absorption Efficiency (MAE) for several dust sources according to Caponi et al. 2017 """ MAE400_AAE = { # PM2.5 # Sahara 'libya25': (110., 4.1), 'marocco25': (90., 2.6), 'algeria25': (73., 2.8), # Sahel 'mali25': (630., 3.4), # Middle East 'saudi_arabia25': (130., 4.5), 'kuwait25': (310., 3.4), # Southern Africa 'namibia25': (135., 5.1), # Eastern Asia 'china25': (180., 3.2), # Australia 'australia25': (293., 2.9), # PM10 # Sahara 'libya10': (77., 3.2), 'algeria10': (82., 2.5), # Sahel 'bodele10': (27., 3.3), # Middle East 'saudi_arabia10': (81., 4.1), # Southern Africa 'namibia10': (50., 4.7), # Eastern Asia 'china10': (59., 3.), # North America 'arizona10': (103., 3.1), # South America 'patagonia10': (83., 2.9), # Australia 'australia10': (124., 2.9) } if formulation not in MAE400_AAE.keys(): formulation = 'libya25' raise ValueError("Bad caponi formulaion") MAE400, AAE = MAE400_AAE[formulation] MAE = MAE400 * (wavelengths / 400e-9)**(-AAE) return MAE
def decode_text(text): """Decode a given text string to an Unicode string. If `text` can't be decoded to a common encoding, it will be decoded to UTF-8 passing the "replace" options. """ for enc in ('utf-8', 'iso-8859-15', 'iso-8859-1', 'ascii'): try: return text.decode(enc) except UnicodeDecodeError: continue # fallback return text.decode('utf-8', 'replace')
def validVer(ver): """ This helper function validates Android version code. It must be an integer but Google didn't say it must be positive only: https://developer.android.com/guide/topics/manifest/ manifest-element.html#vcode """ if ver is None or not isinstance(ver, int): return False else: return True
def line_value(line): """Return the value associated with a line (series of # and . chars)""" val = 0 for char in line: val <<= 1 val += char == "#" return val
def remove_first_slash_from(resource_path): """ Some REST API calls are written to have resource path without the '/' in the beginning, so this removes it :param resource_path: e.g. /1/0/1 :return: Resource path without the first '/' """ if resource_path[0] == '/': return resource_path[1:] return resource_path
def format_read_request(address): """ Format a read request for the specified address. :param address: address at which to read date from the FPGA. :return: formatted request. """ if address >= 2**(4 * 8): raise ValueError(f'Address {address} is too large (max 4 bytes).') if address < 0: raise ValueError(f'Address {address} cannot be negative.') buff = bytearray(5) buff[0] = 0 buff[1:] = int.to_bytes(address, length=4, byteorder="little") return buff
def _has_exclude_patterns(name, exclude_patterns): """Checks if a string contains substrings that match patterns to exclude.""" for p in exclude_patterns: if p in name: return True return False
def get_ratio_hard(our_percentage): """ Return value is between 0 and 20: 0 -> 20; 0.5 -> 1; 1 -> 0 """ return 100**(0.5 - our_percentage) * (1 - our_percentage) * 2
def get_current_valid_edges(current_nodes, all_edges): """ Returns edges that are present in Cytoscape: its source and target nodes are still present in the graph. """ valid_edges = [] node_ids = {n['data']['id'] for n in current_nodes} for e in all_edges: if e['data']['source'] in node_ids and e['data']['target'] in node_ids: valid_edges.append(e) return valid_edges
def get_number_of_auctions_to_run(q: int, t: int, lmbda: int) -> int: """ Summary line. Extended description of function. Parameters ---------- q: description t: description lmbda: description Returns ------- The appropriate integer for that selection. """ retval: int = 0 if t <= 2: retval = 2400 elif t <= 8: retval = 1600 elif t <= 18: retval = 1200 elif t <= 60: retval = 800 elif t <= 108: retval = 600 elif t <= 144: retval = 500 else: retval = 400 if (q == 1) or (lmbda == 1): retval *= 2 return retval
def tag_nocolor(tag): """ Removes the color information from a Finder tag. """ return tag.rsplit('\n', 1)[0]
def is_valid_pbc(pbc): """ Checks pbc parameter """ values = ['none', 'mol', 'res', 'atom', 'nojump', 'cluster', 'whole'] return pbc in values
def get_exitcode(return_code): """ Calculate and return exit code from a return code :param return_code: code in os.wait format :return: a tuple (signal, exit code) """ signal = return_code & 0x00FF exitcode = (return_code & 0xFF00) >> 8 return signal, exitcode
def getHOGBlocks(rect, blocksizes, stride=1.0): """Returns a set of blocks (rects), with the given (fractional) stride among blocks""" x0, y0, x1, y1 = rect[:4] ret = [] for b in blocksizes: ss = int(b*stride) for y in range(y0, y1+1, ss): if y+b-1 > y1: break for x in range(x0, x1+1, ss): if x+b-1 > x1: break ret.append([x, y, x+b-1, y+b-1]) # -1 to make it an inclusive box return ret
def count_all(a, b): """Count all occurrence of a in b""" return len([1 for w in b if w == a])
def symbol_type(symbol: str) -> str: """Determines the type of the asset the symbol represents. This can be 'STOCK', 'CRYPTO', or 'OPTION' """ if len(symbol) > 6: return "OPTION" elif symbol[0] == "@" or symbol[:2] == "c_": return "CRYPTO" else: return "STOCK"
def is_proxy(obj): """ Return True if `obj` is an array proxy """ try: return obj.is_proxy except AttributeError: return False