content
stringlengths
42
6.51k
def get_slice_coord(bounds, n): """ Given the bounds of an actor, return the point that corresponds to the n% of the bounds range :param bounds: should be a list of two floats :param n: n should be a float in range 0, 1 """ if not isinstance(bounds,(list, tuple)) or not isinstance(bounds[0],float) or not isinstance(bounds[1],float): raise ValueError("bounds should be a list or tuple of floats: {}".format(bounds)) if not isinstance(n, (int, float)): raise ValueError("n should be a float") if n < 0 or n > 1: raise ValueError("n should be in range [0, 1]") b0, b1 = bounds delta = b1 - b0 return b0 + delta*n
def energy_calc(edot, t_int): """ Calculates magnetar giant flare energy for a given flare interval, based on the energy injection rate and a flare interval (directly from the magnetar's age). Args: - edot: Energy injection rate at a specific age - t_int: Flare interval at a given age Returns: - e_f: Flare energy for a given E-dot and flare interval at a specific age """ e_f = edot * t_int # As E-dot is defined as e_f/t_int return e_f
def parse(h): """entity [A|B]----B""" if "[" in h and "|" in h and "]" in h: return h.split("|")[1].split("]")[0] else: return h
def deltatime_str(deltatime_seconds): """ Convert time in a format appropriate of time. Parameters ---------- deltatime_seconds : float Represents the dataset with contains lat, long and datetime. Returns ------- time_str : String Represents time in a format hh:mm:ss:---. Examples -------- >>> from pymove import datetime >>> datetime.deltatime_str(1082.7180936336517) "00:18:02.718" Notes ----- Output example if more than 24 hours: 25:33:57.123 https://stackoverflow.com/questions/3620943/measuring-elapsed-time-with-the-time-module """ time_int = int(deltatime_seconds) time_dec = int((deltatime_seconds - time_int) * 1000) times = (time_int // 3600, time_int % 3600 // 60, time_int % 60, time_dec) time_str = '%02d:%02d:%02d.%03d' % times return time_str
def decode_position(s): """ Parameters ---------- s : str Returns ------- str """ return int(s.replace("K", "000").replace("k", "000").replace("M", "000000").replace("m", "000000"))
def get_method_class(function): """ Get the class of a method by analyzing its name. """ cls_name = function.__qualname__.rsplit('.', 1)[0] if '<locals>' in cls_name: return None return eval(cls_name, function.__globals__)
def _sort(data): """Sort by downloads""" # Only for lists of dicts, not a single dict if isinstance(data, dict): return data data = sorted(data, key=lambda k: k["downloads"], reverse=True) return data
def F(y, t, spring_constant=1.0, mass=1.0): """ Return derivatives for harmonic oscillator: y'' = -(k/m) * y y = displacement in [m] k = spring_constant in [N/m] m = mass in [kg] """ dy = [0, 0] # array to store derivatives dy[0] = y[1] dy[1] = -(spring_constant/mass) * y[0] return dy
def person_is_buyer(name): """If a person's name ends in 'm', then they will buy a CS:GO skin.""" return name[-1] == 'm'
def coerce_list_of_ints(val): """ If single value, try to parse as integer, else try to parse as list of integer """ if isinstance(val, list): return [int(x) for x in val] else: return [int(val)]
def format_currency(value, decimals=2): """ Return a number suitably formatted for display as currency, with thousands separated by commas and up to two decimal points. >>> format_currency(1000) '1,000' >>> format_currency(100) '100' >>> format_currency(999.95) '999.95' >>> format_currency(99.95) '99.95' >>> format_currency(100000) '100,000' >>> format_currency(1000.00) '1,000' >>> format_currency(1000.41) '1,000.41' >>> format_currency(23.21, decimals=3) '23.210' >>> format_currency(1000, decimals=3) '1,000' >>> format_currency(123456789.123456789) '123,456,789.12' """ number, decimal = (('%%.%df' % decimals) % value).split('.') parts = [] while len(number) > 3: part, number = number[-3:], number[:-3] parts.append(part) parts.append(number) parts.reverse() if int(decimal) == 0: return ','.join(parts) else: return ','.join(parts) + '.' + decimal
def rename_entity_id(old_name): """ Given an entity_id, rename it to something else. Helpful if ids changed during the course of history and you want to quickly merge the data. Beware that no further adjustment is done, also no checks whether the referred sensors are even compatible. """ rename_table = { "sensor.old_entity_name": "sensor.new_entity_name", } if old_name in rename_table: return rename_table[old_name] return old_name
def symbol_restore(symbol): """Restore symbol for XueQiu.""" return f'{symbol[2:]}.{symbol[:2]}'
def busqueda(lista, comienzo, final, objetivo): """Search for a number in a sorted list of integers. Big-O Complexity: O(log n)""" if comienzo > final: return False medio = (comienzo + final) // 2 if lista[medio] == objetivo: return True elif lista[medio] < objetivo: return busqueda(lista, medio + 1, final, objetivo) else: return busqueda(lista, comienzo, medio - 1, objetivo)
def text_to_int(textnum): """Convert numbers in word form to integers.""" numwords = {} units = [ "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", ] tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"] modifiers = ["", "quarter", "half", "three-quarters"] numwords["and"] = 0 numwords["a"] = 0 for idx, word in enumerate(units): numwords[word] = idx for idx, word in enumerate(tens): numwords[word] = idx * 10 for idx, word in enumerate(modifiers): numwords[word] = idx * .25 current = 0 for word in textnum.split(): if word not in numwords: raise ValueError("Illegal word: " + word) current += numwords[word] return current
def get_strand_color(is_rev): """ Get color for forward and reverse reads :param is_rev: True if read is reversed :return: """ if 238 <= is_rev <= 244: return 1 else: return 0
def integer(roman): """ Function to convert a roman numeral to integer. :type roman: str :rtype: int """ # Initialize a dictionary of symbol and values symbol_value = { 'M': 1000, 'D': 500, 'C': 100, 'L': 50, 'X': 10, 'V': 5, 'I': 1 } second_last_index = len(roman) - 1 result = 0 # Now traverse the roman string from index 0 to the second last index. # Compare value of the present symbol with the value of the next symbol. # If the present value is smaller than the next value, reduce the # present value from the result. Else add it with the result. for i in range(second_last_index): present_value = symbol_value[roman[i]] next_value = symbol_value[roman[i+1]] if present_value < next_value: result -= present_value else: result += present_value # At last, add the value of the last symbol. result += symbol_value[roman[-1]] return result
def _fixencoding(input, encoding, final=False): """ Replace the name of the encoding in the charset rule at the beginning of ``input`` with ``encoding``. If ``input`` doesn't starts with a charset rule, ``input`` will be returned unmodified. If the encoding can't be found yet, ``None`` is returned. ``final`` specifies whether more data will be available in later calls or not. If ``final`` is true, ``_fixencoding()`` will never return ``None``. """ prefix = '@charset "' if len(input) > len(prefix): if input.startswith(prefix): pos = input.find('"', len(prefix)) if pos >= 0: if encoding.replace("_", "-").lower() == "utf-8-sig": encoding = "utf-8" return prefix + encoding + input[pos:] # we haven't seen the end of the encoding name yet => fall through else: return input # doesn't start with prefix, so nothing to fix elif not prefix.startswith(input) or final: # can't turn out to be a @charset rule later (or there is no "later") return input if final: return input return None
def generateFromSitePaymentObject(signature: str, account_data: dict, data: dict)->dict: """[summary] Creates object for from site chargment request Args: signature (str): signature hash string account_data (dict): merchant_account: str merchant_domain: str data (dict): order + personal data to create charge orderReference (str): timestamp amount (float): order total amount currency (str): 'USD', 'UAH', 'RUB' card (str): user card number expMonth (str): card expires month expYear (str): card expires year cardCvv (str): card cvv cardHolder (str): full name of card holder "Test test" productName (list[str]): product names list productPrice (list[float]): product price list productCount (list[int]): product count list clientFirstName (str): client first name clientLastName (str): client last name clientCountry (str): client country clientEmail (str): client email clientPhone (str): client phone Returns: dict: [description] """ return { "transactionType":"CHARGE", 'merchantAccount': account_data['merchant_account'], "merchantAuthType":"SimpleSignature", 'merchantDomainName': account_data['merchant_domain'], "merchantTransactionType":"AUTH", "merchantTransactionSecureType": "NON3DS", 'merchantSignature': signature, "apiVersion":1, 'orderReference': str(data['orderReference']), 'orderDate': str(data['orderReference']), "amount":data["amount"], 'currency': data['currency'], "card":data['card'], "expMonth":data['expMonth'], "expYear":data['expYear'], "cardCvv":data['cardCvv'], "cardHolder":data['cardHolder'], 'productName': list(map(str, data['productName'])), 'productPrice': list(map(float, data['productPrice'])), 'productCount': list(map(int, data['productCount'])), "clientFirstName":data['clientFirstName'], "clientLastName":data['clientLastName'], "clientCountry":data['clientCountry'], "clientEmail":data['clientEmail'], "clientPhone":data['clientPhone'], }
def _vector_similarity(encode1: list, encode2: list) -> float: """assume the length of encode1 and encode2 are n, time complexity is O(n), space complexity is O(n) """ sim_score = sum([x*y for x, y in zip(encode1, encode2)]) return sim_score
def calculate_row_format(columns, keys=None): """ Calculate row format. Args: columns (dict): the keys are the column name and the value the max length. keys (list): optional list of keys to order columns as well as to filter for them. Returns: str: format for table row """ row_format = '' if keys is None: keys = columns.keys() else: keys = [key for key in keys if key in columns] for key in keys: if len(row_format) > 0: row_format += "|" row_format += "%%(%s)-%ds" % (key, columns[key]) return '|' + row_format + '|'
def get_field_name_from_message_and_descriptor( message: str, field_descriptor: str, topic: str = 'innovation') -> str: """ return the actual field name for a field descriptor e.g. message: ekf2_innovations; field_descriptor: magnetometer_innovations -> mag_innov :param ulog: :return: str (if field not found, None will be returned) """ field_name = '' if message in ['estimator_innovations', 'estimator_innovation_variances', 'estimator_innovation_test_ratios']: field_name = field_descriptor elif message in ['ekf2_innovations', 'estimator_status']: if topic == 'innovation': msg_lookUp_dict = { 'aux_hvel' : 'aux_vel_innov', 'mag_field' : 'mag_innov', 'heading' : 'heading_innov', 'airspeed' : 'airspeed_innov', 'beta' : 'beta_innov', 'flow' : 'flow_innov', 'hagl' : 'hagl_innov', 'drag' : 'drag_innov' } field_name = msg_lookUp_dict[field_descriptor] elif topic == 'innovation_variance': msg_lookUp_dict = { 'aux_hvel' : 'aux_vel_innov_var', 'mag_field' : 'mag_innov_var', 'heading' : 'heading_innov_var', 'airspeed' : 'airspeed_innov_var', 'beta' : 'beta_innov_var', 'flow' : 'flow_innov_var', 'hagl' : 'hagl_innov_var', 'drag' : 'drag_innov_var' } field_name = msg_lookUp_dict[field_descriptor] elif topic == 'innovation_test_ratio': msg_lookUp_dict = { 'pos' : 'pos_test_ratio', 'vel' : 'vel_test_ratio', 'hgt' : 'hgt_test_ratio', 'mag_field' : 'mag_test_ratio', 'airspeed' : 'tas_test_ratio', 'beta' : 'beta_test_ratio', 'hagl' : 'hagl_test_ratio' } field_name = msg_lookUp_dict[field_descriptor] else: raise NotImplementedError('topic {:s} not supported'.format(topic)) else: raise NotImplementedError('message {:s} not supported'.format(message)) return field_name
def get_deletions_y(parsed_mutations): """Get y coordinates of deletion markers to overlay in heatmap. These are the linear y coordinates used in the Plotly graph object. i.e., the indices of data["heatmap_y_strains"] :param parsed_mutations: A dictionary containing multiple merged ``get_parsed_gvf_dir`` return "mutations" values. :type parsed_mutations: dict :return: List of y coordinate values to display deletion markers :rtype: list[str] """ ret = [] for y, strain in enumerate(parsed_mutations): for pos in parsed_mutations[strain]: for mutation in parsed_mutations[strain][pos]: deletion = mutation["mutation_type"] == "deletion" hidden = mutation["hidden_cell"] if deletion and not hidden: ret.append(y) return ret
def truncate(string, length): """ Truncates a string to a given length with "..." at the end if needed """ return string[:(length-3)].ljust(length, ".")
def INVALID_NAME(name): """Error message for invalid names.""" return "invalid name '{}'".format(name)
def int_to_hex_string(value: int, length: int=64) -> str: """returns a hex string of an int""" hex_string = str(hex(value))[2:] return "".join("0" * (length - len(hex_string))) + hex_string
def kamenetsky(number): """ :param number: O numero do fatorial :return: Quantidade de digitos do fatorial de number. """ import math if number < 0: # nao existe return 0 elif number <= 1: return 1 digits = (number * math.log10(number / math.e)) + (math.log10(2 * math.pi * number) / 2) return math.floor(digits) + 1
def secs_to_text(secs): """ Convert number of seconds to human string - *d*h*m*s """ secs = int(secs) min_time = 60 hour_time = 3600 day_time = 3600*24 days = 0 hours = 0 mins = 0 if secs >= day_time: days = int(secs / day_time) secs = secs % day_time if secs >= hour_time: hours = int(secs / hour_time) secs = secs % hour_time if secs >= min_time: mins = int(secs / min_time) secs = secs % min_time str_time = [] if days: str_time.append("{0}d".format(days)) if hours: str_time.append("{0}h".format(hours)) if mins: str_time.append("{0}m".format(mins)) if not len(str_time) or secs > 0: str_time.append("{0}s".format(secs)) return " ".join(str_time)
def example3(S): """Return the sum of the elements with even index in sequence S.""" n = len(S) total = 0 for j in range(n): # loop from 0 to n-1 for k in range(1+j): # loop from 0 to j # note the increment of 2 total += int(S[k]) return total
def _resolve_global_alignment_table(alignment_table, ref_sequence_left, ref_sequence_right, align_char='|', gap_char='-', space_char=' ', **kwargs): """ Resolves global alignment, returns the two sequences and the score. :param alignment_table: :param ref_sequence_left: :param ref_sequence_right: :return: """ # Get corner max_left = max([l for l, _ in alignment_table.keys()]) max_right = max([r for _, r in alignment_table.keys()]) # Get score score = alignment_table[(max_left, max_right)][0] # Traverse until we get to (0,0) left = max_left right = max_right sequence_left = '' sequence_right = '' matched = '' while (left, right) != (0, 0): prev_left, prev_right = alignment_table[(left, right)][1] if left == prev_left: sequence_left += gap_char sequence_right += ref_sequence_right[right - 1] matched += space_char elif right == prev_right: sequence_left += ref_sequence_left[left - 1] sequence_right += gap_char matched += space_char else: if ref_sequence_left[left - 1] == ref_sequence_right[right - 1]: matched += align_char else: matched += space_char sequence_left += ref_sequence_left[left - 1] sequence_right += ref_sequence_right[right - 1] left = prev_left right = prev_right return '\n'.join([sequence_left[::-1], matched[::-1], sequence_right[::-1]]), score
def permission_for_creatable_media_perm(permissions): """ This is the permission that should be used to create a creatable media_perm record.""" for p in permissions: if p.name == 'Perm 5': return p
def format_s3_location(user, key, authurl, bucket, obj): """Helper method that returns a S3 store URI given the component pieces.""" scheme = 's3' if authurl.startswith('https://'): scheme = 's3+https' authurl = authurl[len('https://'):] elif authurl.startswith('http://'): authurl = authurl[len('http://'):] authurl = authurl.strip('/') return "%s://%s:%s@%s/%s/%s" % (scheme, user, key, authurl, bucket, obj)
def get_interface_index_from_api(data, name): """Process data from sw_interface_dump API and return index of the interface specified by name. :param data: Output of interface dump API call. :param name: Name of the interface to find. :type data: list :type name: str :returns: Index of the specified interface. :rtype: int :raises RuntimeError: If the interface is not found. """ for iface in data: if iface["sw_interface_details"]["interface_name"] == name: return iface["sw_interface_details"]["sw_if_index"] else: raise RuntimeError( "Interface with name {name} not found in dump. " "Dumped data: {data}".format(name=name, data=data))
def find_whole_word(word: str, string: str) -> bool: """Returns true if specified word is in the string.""" return word in string.split()
def format_value(value, tag=False): """Quote a value if it isn't quoted yet. :param value: the string to quote :type value: Any :param tag: if this is a tag, then don't quote it, but make sure it has no spaces. :type tag: bool :returns: quoted value :rtype: str """ if tag: if not isinstance(value, str): value = str(value) return value.replace(' ', '-') if isinstance(value, str): if value.startswith('"') and value.endswith('"'): return value return '"{}"'.format(value)
def _get_opengraph(parsed_query): """Return OpenGraph data for `parsed_query`""" url = parsed_query['request_url'] or "" pic_url = url.replace('?', '_') return ( '<meta property="og:image" content="%(pic_url)s_0pq.png" />' '<meta property="og:site_name" content="wttr.in" />' '<meta property="og:type" content="profile" />' '<meta property="og:url" content="%(url)s" />' ) % { 'pic_url': pic_url, 'url': url, }
def tested_function(x): """ Testovaci funkce """ return x**2 + 2 * x - 20
def get_time_with_unit(time): """This method sets seconds in minutes, hours or days.""" sec_in_min = 60 sec_in_hour = 60 * 60 sec_in_day = 24 * 60 * 60 if time % sec_in_day == 0: time = time / sec_in_day unit = 'days' elif time % sec_in_hour == 0: time = time / sec_in_hour unit = 'hours' else: time = time / sec_in_min unit = 'minutes' return "%s %s" % (time, unit)
def unescape_node_name(node_name: str) -> str: """Unescapes any special characters in an ontology node name""" return node_name.replace(r"\|", r"|").replace(r"\.", r".")
def left(x_pos: int, distance: int): """ Find the pixel a given distance from the virtual agent location Params ------ x_pos: int An index along the x axis that will be used as the starting point distance: int The distance we want to trayel along the x axis from that point. Returns ------- An integer for the new location """ return x_pos - distance + 1
def picture_set(A, L): """ This is needed in the find_hexad function below. EXAMPLES:: sage: from sage.games.hexad import * sage: M = Minimog(type="shuffle") sage: picture_set(M.picture00, M.cross[2]) {5, 7, 8, 9, 10} sage: picture_set(M.picture02, M.square[7]) {2, 3, 5, 8} """ return set([A[x] for x in L])
def format_attribute(attr): """Format state attributes""" res = attr.replace("-", " ") res = res.capitalize() res = res.replace(" ip ", " IP ") res = res.replace(" mac ", " MAC ") res = res.replace(" mtu", " MTU") return res
def _item_to_value_identity(iterator, item): """An item to value transformer that returns the item un-changed.""" # pylint: disable=unused-argument # We are conforming to the interface defined by Iterator. return item
def get_signed_headers(headers): """ Get signed headers. :param headers: input dictionary to be sorted. """ return sorted([h.lower().strip() for h in headers])
def VARIABLE(name): """Get string representation containing the reference to a variable with given name. This string is intended to be used as a template parameter reference within workflow specifications in workflow templates. Parameters ---------- name: string Template parameter name Returns ------- string """ return '$[[{}]]'.format(name)
def get_length_c(points): """ Function to calculate the length in a path determined by several points. Input: - points <list> : list of p_i points where p_i=x_i+J*y_i. Output: - <float> : the length. """ L=0. err=0 for j in range(0,len(points)-1): dl=abs(points[j]-points[j+1]) L=L+dl return L
def find_max_where(predicate, prec=1e-5, initial_guess=1, fail_bound=1e38): """Find the largest value for which a predicate is true, along a half-line. 0 is assumed to be the lower bound.""" # {{{ establish bracket mag = 1 if predicate(mag): mag *= 2 while predicate(mag): mag *= 2 if mag > fail_bound: raise RuntimeError("predicate appears to be true " "everywhere, up to %g" % fail_bound) lower_true = mag/2 upper_false = mag else: mag /= 2 while not predicate(mag): mag /= 2 if mag < prec: return mag lower_true = mag upper_false = mag*2 # }}} # {{{ refine # Refine a bracket between *lower_true*, where the predicate is true, # and *upper_false*, where it is false, until *prec* is satisfied. assert predicate(lower_true) assert not predicate(upper_false) while abs(lower_true-upper_false) > prec: mid = (lower_true+upper_false)/2 if predicate(mid): lower_true = mid else: upper_false = mid else: return lower_true # }}}
def get_letter(caracter, cifru): """ decripteaza un caracter folosind cifrul """ if caracter.isalpha(): caracter = ord(caracter) - cifru if caracter < ord('a'): caracter = ord('z') - abs(ord('a')-caracter) + 1 return chr(caracter) else: return caracter
def linear_interpolate(x, y, x_desired): """ Given two lists or arrays x and y, linearly interpolates between adjacent x-values to give an estimated y-value for any particular x Arguments: x = 1-D array or list of floats y = 1-D array or list of floats containing values matching the positions in x x_desired = the x value to interpolate at Returns: interpolated y-value """ index = 0 while x[index] <= x_desired: index = index + 1 x_left = x[index-1] x_right = x[index] y_interp = ((y[index-1] * (x_right - x_desired)) + (y[index] * (x_desired - x_left))) / (x_right - x_left) return y_interp
def mult_matrix(M, N): """Multiply square matrices of same dimension M and N""" # Converts N into a list of tuples of columns tuple_N = zip(*N) # Nested list comprehension to calculate matrix multiplication return [[sum(el_m * el_n for el_m, el_n in zip(row_m, col_n)) for col_n in tuple_N] for row_m in M]
def hist_int(array): """Return a histogram of integers as a list of counts.""" hist = [0] * (max(array) + 1) negative = [] for i in array: if (i >= 0): hist[i] += 1 else: negative.append(i) return hist
def split_files(files, split_train, split_val, use_test): """Splits the files along the provided indices """ files_train = files[:split_train] files_val = files[split_train:split_val] if use_test else files[split_train:] li = [(files_train, 'train'), (files_val, 'val')] # optional test folder if use_test: files_test = files[split_val:] li.append((files_test, 'test')) return li
def cmdline2list(cmdline): """Build an argv list from a Microsoft shell style cmdline str The reverse of list2cmdline that follows the same MS C runtime rules. """ whitespace = ' \t' # count of preceding '\' bs_count = 0 in_quotes = False arg = [] argv = [] for ch in cmdline: if ch in whitespace and not in_quotes: if arg: # finalize arg and reset argv.append(''.join(arg)) arg = [] bs_count = 0 elif ch == '\\': arg.append(ch) bs_count += 1 elif ch == '"': if not bs_count % 2: # Even number of '\' followed by a '"'. Place one # '\' for every pair and treat '"' as a delimiter if bs_count: del arg[-(bs_count / 2):] in_quotes = not in_quotes else: # Odd number of '\' followed by a '"'. Place one '\' # for every pair and treat '"' as an escape sequence # by the remaining '\' del arg[-(bs_count / 2 + 1):] arg.append(ch) bs_count = 0 else: # regular char arg.append(ch) bs_count = 0 # A single trailing '"' delimiter yields an empty arg if arg or in_quotes: argv.append(''.join(arg)) return argv
def format_filepath(filepath: str, run_name: str) -> str: """Appends run_name to filepath""" if run_name == '': return filepath else: return '_'.join([run_name, filepath])
def template_directory_path(instance, filename): """ Static location for Returns template. :param instance: Request. :param filename: Name of file. :return: file path. """ return 'wildlifecompliance/returns/template/{0}'.format(filename)
def get_token_index(char_positions, text): """ Get the index of a token in the text. :param char_positions: Position of the begin character. :param text: Text of the sentence. :return: A list of indices. """ space = [' ','\n','\t'] res = [] index = 0 start = False for i in range(len(text)-1): if text[i] not in space: start = True if text[i] in space and text[i+1] not in space and start: index += 1 if i in char_positions: res.append(index) if len(text)-1 in char_positions: res.append(index) return res
def ek_R56Q_RPR(cell): """ Returns the R56Q-RPR reversal potential (in mV) for the given integer index ``cell``. """ reversal_potentials = { 1: -92.2, 2: -89.0, 3: -90.1, 4: -95.0, 5: -93.1, 6: -93.4 } return reversal_potentials[cell]
def datetime_fields(fields): """Returns datetime fields from a dict of fields """ return {k: v for k, v in list(fields.items()) \ if v.get("optype", False) == "datetime"}
def get_value_by_ref(schema, ref): """ Get value referenced by ref """ value = schema.get(ref[0]) if value is None: return None for key in ref[1].split('/')[1:]: try: value = value[key] except KeyError: raise Exception( 'Ref "{}" not found in file "{}"'.format(ref[1][1:], ref[0]), ) if value is None: return None return value
def extract_undergrad(school, undergrad): """ Extracts information about a LinkedIn user's undergraduate school Parameters ---------- school: the undergraduate school undergrad: a list of other undergraduate schools Returns ------- The updated list of undergraduate schools and the undergraduate year of graduation """ undergrad.append(school['name']) date_range = school['date_range'] if date_range is None: undergrad_yr = 'N/A' elif 'Present' not in date_range: undergrad_yr = int(date_range[-4:]) elif date_range[0:4].isnumeric(): undergrad_yr = int(date_range[0:4]) + 4 else: undergrad_yr = int(date_range[4:8]) return undergrad, undergrad_yr
def mGCD(a,b,m=1): """ Calculates very large GCD Uses squaring for calculating residue Complexity: O(log(n)) """ a1 = a%m p = 1 while b > 0: if b%2 ==1: p *= a1 p = p%m b /= 2 a1 = (a1 * a1)%m return p
def was_always_present(db_entry, arches): """Returns whether the symbol has always been present or not.""" for arch in arches: is_64 = arch.endswith('64') introduced_tag = 'introduced-' + arch if introduced_tag not in db_entry: return False if is_64 and db_entry[introduced_tag] != 21: return False elif not is_64 and db_entry[introduced_tag] != 9: return False # Else we have the symbol in this arch and was introduced in the first # version of it. return True
def int_2_bigend(p_int, p_len): """ Convert an integer to bigendian byte string. """ l_ret = p_int.to_bytes(p_len, byteorder='big') # struct.pack('>', p_int) # l_ret = bytearray(p_len) # l_max = int((256 ** p_len) / 2) # if l_max < p_int: # print('Too Big {} > {} {}'.format(l_max, p_int, l_ret)) return l_ret
def humanreadablesize(kbytes): """Returns sizes in human-readable units. Input is kbytes""" try: kbytes = float(kbytes) except (TypeError, ValueError, UnicodeDecodeError): return "unknown" units = [(" KB", 2**10), (" MB", 2**20), (" GB", 2**30), (" TB", 2**40)] for suffix, limit in units: if kbytes > limit: continue else: return str(round(kbytes/float(limit/2**10), 1)) + suffix
def is_index_like(s): """ Looks like a Pandas Index. ** Borrowed from dask.dataframe.utils ** """ typ = type(s) return ( all(hasattr(s, name) for name in ("name", "dtype")) and "index" in typ.__name__.lower() )
def company_dict(obj): """Creates dictionary for a company field.""" if obj is None: return None return { 'id': str(obj.id), 'name': obj.name, 'trading_names': obj.trading_names, }
def group_by_compatibility(thermodynamic_states): """Utility function to split the thermodynamic states by compatibility. Parameters ---------- thermodynamic_states : list of ThermodynamicState The thermodynamic state to group by compatibility. Returns ------- compatible_groups : list of list of ThermodynamicState The states grouped by compatibility. original_indices: list of list of int The indices of the ThermodynamicStates in theoriginal list. """ compatible_groups = [] original_indices = [] for state_idx, state in enumerate(thermodynamic_states): # Search for compatible group. found_compatible = False for group, indices in zip(compatible_groups, original_indices): if state.is_state_compatible(group[0]): found_compatible = True group.append(state) indices.append(state_idx) # Create new one. if not found_compatible: compatible_groups.append([state]) original_indices.append([state_idx]) return compatible_groups, original_indices
def affected_gene_result(total, contained, at_breakpoint, omim_genes): """Provides expected result for affected_genes calcprop.""" result = { "contained": str(contained) + "/" + str(total), "at_breakpoint": str(at_breakpoint) + "/" + str(total), "omim_genes": str(omim_genes) + "/" + str(total), } return result
def as_string(string): """ Ensure that whatever type of string is incoming, it is returned as an actual string, versus 'bytes' which Python 3 likes to use. """ if isinstance(string, bytes): # we really ignore here if we can't properly decode with utf-8 return string.decode('utf-8', 'ignore') return string
def concat_variable(gx, g_input): """concatenate the inputs to a tuple of variable Inputs: None ~chainer.Variable tuple of variable Outputs: None: When both of gx and g_input is None tuple of variable: Otherwise """ sum_gx = () if isinstance(gx, tuple): sum_gx = gx elif gx is not None: sum_gx = gx, if isinstance(g_input, tuple): sum_gx += g_input elif g_input is not None: sum_gx += g_input, if len(sum_gx) == 0: sum_gx = None, return sum_gx
def physical_shape_3d_from_topology_proto_4d(mesh_shape): """Convert a 4d shape that we get from TPU estimator to a 3d shape. Args: mesh_shape: a list of length 4 Returns: a list of length 3 """ if len(mesh_shape) != 4 or mesh_shape[2] != 1: raise ValueError("Expected a 4d shape [x, y, 1, core]") return [mesh_shape[1], mesh_shape[0], mesh_shape[3]]
def until(val, substr): """ Helper function that returns the substring of a string until a certain pattern. """ if substr not in val: return val return val[:val.find(substr)]
def add_years(date, years, round='down'): """Return a date that's `years` years after the date (or datetime) object `d`. Return the same date date (month and day) in the destination year, if it exists, otherwise use the following day (thus changing February 29 to Feb 28). """ if date is None: return None else: try: return date.replace(year=date.year + years) except ValueError: if round == 'down': return date.replace(year=date.year + years, day=date.day - 1) else: return date.replace(year=date.year + years, month=date.month + 1, day=1)
def features_axis_is_np(is_np, is_seq=False): """ B - batch S - sequence F - features :param d: torch.Tensor or np.ndarray of shape specified below :param is_seq: True if d has sequence dim :return: axis of features """ if is_np: # in case of sequence (is_seq == True): (S, F) # in case of sample: (F) return 0 + is_seq else: # in case of sequence: (B, S, F) # in case of sample: (B, F) return 1 + is_seq
def find_x_overlap(rect1, rect2): """ Return left_x and width of overlapping x of two rects """ r1left = rect1['left_x'] r1right = r1left + rect1['width'] r2left = rect2['left_x'] r2right = r2left + rect2['width'] highest_start_point = r1left if r1left >= r2left else r2left lowest_end_point = r1right if r1right <= r2right else r2right if highest_start_point < lowest_end_point: return highest_start_point, lowest_end_point - highest_start_point else: return None
def is_filelike(obj): """Return whether ``obj`` is a file-like object.""" return hasattr(obj, 'read')
def exponential_growth(level, constant=1): """ The number of samples in an exponentially growing 1D quadrature rule of a given level. Parameters ---------- level : integer The level of the quadrature rule Return ------ num_samples_1d : integer The number of samples in the quadrature rule """ if level == 0: return 1 return constant*2**(level+1)-1
def findCovCostRatio(trajStrajDist, c1, c3, strajCov): """ Calculates coverage cost ratio for pathlet. Args: trajStrajDist: dict having subtrajectory for a trajectory ID with distance from the pathlet. c1, c3 (float): parameters of the greedy algorithm. strajCov : dict storing points of subtrajs. Returns: coverage-cost ratio of the pathlet. """ curCov, curCost = 0, c1 for k, v in list(trajStrajDist.items()): straj, dist = v[0], v[1] if straj is None: continue curCov += strajCov[straj] curCost += 1.0 * c3 * dist if curCov == 0: return 0 else: return (1.0 * curCov) / (1.0 * curCost)
def clean_cases(text: str) -> str: """Makes text all lowercase. Arguments: text: The text to be converted to all lowercase. Returns: The lowercase text. """ return text.lower()
def extract_modified_bs_sequence(sax_sequence): """ This functions extracts the modified Behaviour Subsequence (BS) sequence list, which is the original sax word sequence where every consecutive pairs of sax words are equal are fused into the same sax word. :param sax_sequence: list of original sax words :type sax_sequence: list of str :return: - bs_sequence (:py:class:`list of str`) - list of modified sax words - bs_lengths (:py:class:`list of int`) - list of lengths of each modified sax word """ bs_sequence = [] bs_lengths = [] curr_len = 1 for i in range(len(sax_sequence)): curr_bs = sax_sequence[i] if i < len(sax_sequence)-1: next_bs = sax_sequence[i+1] else: next_bs = '' if curr_bs == next_bs: curr_len = curr_len + 1 else: bs_sequence.append(curr_bs) bs_lengths.append(curr_len) curr_len = 1 return bs_sequence, bs_lengths
def _resolve_nested(level_count, source): """ resolve multiple levels of generators """ assert level_count > 0 if level_count > 1: return [_resolve_nested(level_count - 1, item) for item in source] else: return list(source)
def _GenerateLocalProperties(sdk_dir): """Returns the data for project.properties as a string.""" return '\n'.join([ '# Generated by //build/android/gradle/generate_gradle.py', 'sdk.dir=%s' % sdk_dir, ''])
def partextras(labels): """Return a dictionary of extra labels for use in prompts to the user Intended use is in strings of the form "(l)ocal%(l)s". """ if labels is None: return { "l": "", "o": "", } return { "l": " [%s]" % labels[0], "o": " [%s]" % labels[1], }
def solve(result, answer): """ Validate user answer """ try: return result == int(answer) except ValueError: return False
def strip_annotations(ev): """Strip all annotations from event.""" if "_humilis" in ev: del ev["_humilis"] return ev
def bool_from_string(subject): """ Interpret a string as a boolean. Any string value in: ('True', 'true', 'On', 'on', '1') is interpreted as a boolean True. Useful for JSON-decoded stuff and config file parsing """ if type(subject) == type(bool): return subject if hasattr(subject, 'startswith'): # str or unicode... if subject.strip().lower() in ('true', 'on', '1'): return True return False
def getHttpStatusCode(v): """Return HTTP response code as integer, e.g. 204.""" if hasattr(v, "value"): return int(v.value) # v is a DAVError else: return int(v)
def get_type(line): """ Returns either a string, indicating whether this is a wired, wireless, or loopback interface, or None if this can not be determinted. :param line: line of output from: ip a """ if "fq_codel" in line: return "wired" if "mq" in line: return "wireless" if "noqueue" in line: return "loopback" return None
def get_bool_from_ini(ini_param): """ Get boolean value from M$ INI style configuration. :param ini_param: :return: True, False, or None. """ if not isinstance(ini_param, str): return None if ini_param.lower() in ['0', 'off', 'false']: return False elif ini_param.lower() in ['1', 'on', 'true']: return True else: return None
def insert(rcd, insert_at_junctions, genotype): """ Given the genotype (ie the junction that was chosen), returns the corresponding insert """ junctions = ["x", "y", "z"] if genotype[1] != "-": j = junctions.index(genotype[1]) return insert_at_junctions[j] else: return "-"
def _nipype_logging_config(cachedir): """ This Function takes in... :param wfrun: cachedir :return: """ return { "workflow_level": "INFO", # possible options: "filemanip_level": "INFO", # INFO (default) | DEBUG "interface_level": "INFO", "log_directory": cachedir, }
def postprocess(preds, src): """ since bertMaskedLM output "a,x,b,c,noise,noise", we truncate them """ res = [] for i in range(len(src)): res.append("".join(preds[i].split())[:len(src[i]["input_ids"])-2]) return res
def _format_number_list(number_list): """Return string representation of a list of integers, using ``,`` as a separator and ``:`` as a range separator. """ ret = "" first_in_range = number_list[0] last_in_range = number_list[0] previous = number_list[0] for number in number_list[1:]: if number - previous != 1: # reached end of range if last_in_range > first_in_range: ret += "%d:%d," % (first_in_range, last_in_range) # passed isolated number else: ret += "%d," % previous # reinitialize range first_in_range = number last_in_range = number else: # still inside a continuous range last_in_range = number previous = number # last number if last_in_range > first_in_range: ret += "%d:%d" % (first_in_range, last_in_range) else: ret += "%d" % previous return ret
def t_add(t, v): """ Add value v to each element of the tuple t. """ return tuple(i + v for i in t)
def _validate_dev_idx(dev_idx: int) -> bool: """Validate given D2XX device index. Args: dev_idx: D2XX device index Returns: bool: Is given device index valid? """ return 0 <= dev_idx < (2 ** 31)
def format_track(index, data): """Returns a formatted line of text describing the track.""" return "{}. {artist} - {album} - {track}\n".format(index, **data)
def HexToByte( hexStr ): """ Convert a string hex byte values into a byte string. The Hex Byte values may or may not be space separated. """ # The list comprehension implementation is fractionally slower in this case # # hexStr = ''.join( hexStr.split(" ") ) # return ''.join( ["%c" % chr( int ( hexStr[i:i+2],16 ) ) \ # for i in range(0, len( hexStr ), 2) ] ) bytes = [] hexStr = ''.join( hexStr.split(" ") ) for i in range(0, len(hexStr), 2): bytes.append( chr( int (hexStr[i:i+2], 16 ) ) ) return ''.join( bytes )
def get_original_columns_used(cols_not_removed, cols_used_after_removal): """If a matrix is subset down to only have columns indexed by cols_not_removed, and then is further subset to only contain cols_used_after removal, in that order, than this method returns the index of which columns in the old matrix correspond the the columns in the new matrix.""" return [cols_not_removed[x] for x in cols_used_after_removal]
def typeList(a_list): """assumes a_list is a list with at least 1 element returns a list of the types of all the elements of a_list""" type_list = [] for elem in a_list: type_list.append(type(elem)) return type_list
def parse_code(code): """Method to parse the code to extract line number and snippets """ code_lines = code.split("\n") # The last line from the split has nothing in it; it's an artifact of the # last "real" line ending in a newline. Unless, of course, it doesn't: last_line = code_lines[len(code_lines) - 1] last_real_line_ends_in_newline = False if len(last_line) == 0: code_lines.pop() last_real_line_ends_in_newline = True snippet_lines = [] first = True first_line_number = 1 for code_line in code_lines: number_and_snippet_line = code_line.split(" ", 1) if first: first_line_number = int(number_and_snippet_line[0]) first = False snippet_line = number_and_snippet_line[1] + "\n" snippet_lines.append(snippet_line) if not last_real_line_ends_in_newline: last_line = snippet_lines[len(snippet_lines) - 1] snippet_lines[len(snippet_lines) - 1] = last_line[: len(last_line) - 1] return first_line_number, snippet_lines