content
stringlengths
42
6.51k
def generate_timestamp_format(date_mapper: dict) -> str: """ Description ----------- Generates a the time format for day,month,year dates based on each's specified time_format. Parameters ---------- date_mapper: dict a dictionary for the schema mapping (JSON) for the dataframe filtered for "date_type" equal to Day, Month, or Year. Output ------ e.g. "%m/%d/%Y" """ day = "%d" month = "%m" year = "%y" for kk, vv in date_mapper.items(): if vv["date_type"] == "day": day = vv["time_format"] elif vv["date_type"] == "month": month = vv["time_format"] elif vv["date_type"] == "year": year = vv["time_format"] return str.format("{}/{}/{}", month, day, year)
def convert_question_context_to_standard_format(questions, contexts, qas_ids): """Convert list into list of json Args: questions : list of question contexts : list of context qas_ids : list of qa_ids Returns: list of dict """ holder = [] for i in range(len(questions)): temp = {} temp["paragraph_text"] = contexts[i] temp["question_text"] = questions[i] temp["qas_id"] = qas_ids[i] holder.append(temp) return holder
def compare_svg( current, previous, up_color="default", down_color="default", neutral_color="default", ): """Compare current and previous value and return an SVG based on the result.""" colors = { "good": "#5e9732", "bad": "#c41230", "neutral": "#fdc010", "default": "#006c9c", } up_svg = f""" <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 22" style="width: 0.25in"> <g id="Layer_2" data-name="Layer 2"> <g id="Layer_1-2" data-name="Layer 1"> <polygon style=" fill: {colors.get(up_color)}; stroke: {colors.get(up_color)}; stroke-linecap: round; stroke-linejoin: round; stroke-width: 2px; " points="31 14.78 23.5 7.89 16 1 8.5 7.89 1 14.78 6.66 14.78 6.66 21 10.32 21 22.04 21 25.42 21 25.42 14.78 31 14.78" /> </g> </g> </svg> """ down_svg = f""" <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 22" style="width: 0.25in; height: 0.25in"> <g id="Layer_2" data-name="Layer 2"> <g id="Layer_1-2" data-name="Layer 1"> <polygon style=" fill: {colors.get(down_color)}; stroke: {colors.get(down_color)}; stroke-linecap: round; stroke-linejoin: round; stroke-width: 2px; width: .25in; " points="1 7.22 8.5 14.11 16 21 23.5 14.11 31 7.22 25.34 7.22 25.34 1 21.68 1 9.96 1 6.58 1 6.58 7.22 1 7.22" /> </g> </g> </svg> """ neutral_svg = f""" <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 22" style="width: 0.25in; height: 0.25in;"> <g id="Layer_2" data-name="Layer 2"> <g id="Layer_1-2" data-name="Layer 1"> <polygon style=" fill: {colors.get(neutral_color)}; stroke: {colors.get(neutral_color)}; stroke-linecap: round; stroke-linejoin: round; stroke-width: 2px; " points="31 14.78 23.5 7.89 16 1 8.5 7.89 1 14.78 6.66 14.78 6.66 21 10.32 21 22.04 21 25.42 21 25.42 14.78 31 14.78" transform="rotate(90, 17, 11)" /> </g> </g> </svg> """ if not previous: return neutral_svg if current > previous: return up_svg if current < previous: return down_svg if current == previous: return neutral_svg
def generate_defines_buf(defines_dict): """ XXX: This function is not ready to be used, the defines need to be organized (DO NOT USE) Generate a buffer with the specified defines Args: defines_dict (dictionary): list of define values in the format: 'name':'value' Returns: (string): buffer with the defines specified Raises: Nothing """ if len(defines_dict) == 0: return "" buf = "\n" for define in defines_dict: buf += "`define %s %s\n" % (define, defines_dict[define]) buf += "\n" return buf
def yang(x, epsilon): """ x and epsilon are R^5 vectors, x[i] <= 5 (?) """ return sum([epsilon[i] * abs(x[i]) ** (i + 1) for i in range(len(x))])
def recurse_data(data, keys, bucket=None, handler=None): """ Performs a recursive search in data for values named by any key. If no such keys are present at root level, goes deeper into bucket values. If handler given, calls handler with each found value and key, otherwise returns the first found value. Both data and bucket contents can be dicts or lists or tuples. """ if not isinstance(data, (dict, list, tuple)): return None datas = data if isinstance(data, (list, tuple)) else [data] for item in [x for x in datas if isinstance(x, dict)]: for key in keys: if key in item: if handler: handler(item, key) else: return item[key] if bucket in item: return recurse_data(item[bucket], keys, bucket) return None
def iterative_levenshtein(s, t): """ iterative_levenshtein(s, t) -> ldist ldist is the Levenshtein distance between the strings s and t. For all i and j, dist[i,j] will contain the Levenshtein distance between the first i characters of s and the first j characters of t. Credit: https://www.python-course.eu/levenshtein_distance.php """ rows = len(s)+1 cols = len(t)+1 dist = [[0 for x in range(cols)] for x in range(rows)] # source prefixes can be transformed into empty strings # by deletions: for i in range(1, rows): dist[i][0] = i # target prefixes can be created from an empty source string # by inserting the characters for i in range(1, cols): dist[0][i] = i row, col = 0, 0 for col in range(1, cols): for row in range(1, rows): if s[row-1] == t[col-1]: cost = 0 else: cost = 1 dist[row][col] = min(dist[row-1][col] + 1, # deletion dist[row][col-1] + 1, # insertion dist[row-1][col-1] + cost) # substitution ldist = dist[row][col] edit_ops = list() dist_last = ldist ldist2 = 0 while row > 0 or col > 0: dist_diag = dist[row-1][col-1] if row > 0 and col > 0 else ldist + 1 dist_up = dist[row-1][col] if row > 0 else ldist + 1 dist_left = dist[row][col-1] if col > 0 else ldist + 1 dist_min = min(dist_diag, dist_up, dist_left) if dist_diag == dist_min and dist_min == dist_last: # no change row -= 1 col -= 1 edit_ops.insert(0, "-") elif dist_up == dist_min: # deletion row -= 1 ldist2 += 1 edit_ops.insert(0, "d") elif dist_left == dist_min: # insertion col -= 1 ldist2 += 1 edit_ops.insert(0, "i") elif dist_diag == dist_min and dist_min < dist_last: # substitution row -= 1 col -= 1 ldist2 += 1 edit_ops.insert(0, "s") dist_last = dist_min if ldist != ldist2: print(f"WRONG!!! {ldist}/{ldist2}") for r in range(rows): print(dist[r]) exit(-1) return ldist, ''.join(edit_ops)
def is_email(raw): """Returns true if the raw string represents an "AT"-separated email address. Arguments: raw {str} -- A raw string. Returns: [bool] -- [description] """ is_it_an_email = "AT" in raw return is_it_an_email
def get_new_position(old_position, move): """ get new position based of current position of the man and move direction """ if move == 'l': new_position = [old_position[0] - 1, old_position[1]] elif move == 'r': new_position = [old_position[0] + 1, old_position[1]] elif move == 'u': new_position = [old_position[0], old_position[1] - 1] elif move == 'd': new_position = [old_position[0], old_position[1] + 1] else: print('invalid move') new_position = old_position return new_position
def GetPad(offset, alignment): """Returns the pad necessary to reserve space so that |offset + pad| equals to some multiple of |alignment|.""" return (alignment - (offset % alignment)) % alignment
def advanced_overflow(val): """ Check advanced overflow for 32-bit values. """ return val[31] ^ val[30]
def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs): """A reply handler for commands that haven't been added to the reply list. Returns empty strings for stdout and stderr. """ return '', ''
def recursive_log(num): """Solution to exercise C-4.10. Describe a recursive algorithm to compute the integer part of the base-two logarithm of n using only addition and integer division. """ def recurse(num, count): if num == 1: return count # Base case return recurse(num // 2, count + 1) return recurse(num, 0)
def get_static_data(modelSpec): """ Return a dictionary of static values that all objects of this model have. This applies only to kubernetes resources where ``kind`` and ``apiVersion`` are statically determined by the resource. See the `Kubernetes OpenAPI Spec Readme`__. For example for a v1 Deployment we return :: { 'kind': 'Deployment', 'apiVersion': 'apps/v1' } .. __: https://github.com/kubernetes/kubernetes/blob/master/api/openapi-spec/README.md#x-kubernetes-group-version-kind """ if 'x-kubernetes-group-version-kind' in modelSpec: values = modelSpec['x-kubernetes-group-version-kind'] if len(values) == 1: group = values[0].get('group', '') if group: group = group + '/' return { 'kind': values[0]['kind'], 'apiVersion': group + values[0]['version'] } else: return {} else: return {}
def get_x_y_coordinates_as_list(line_segments): """ Returns x-coordinates and y-coordinates as separate lists. :param line_segments: Line segments containing x and y coordinates. """ line_x_coords = [] line_y_coords = [] for line_seg in line_segments: line_x_coords += line_seg.get_endpoint_x_coordinates() line_y_coords += line_seg.get_endpoint_y_coordinates() return line_x_coords,line_y_coords
def euler_int(f, x, u, T): """ Perform Euler (trapezoidal) numerical integration. The function to integrate is f(x, u, params), where the state varables are collected in the variable x, we assume a constant input vector u over time interval T > 0, and params is an array of the system's parameters. """ x_new = x + T * f(x, u) return x_new
def svg_filter(svg_xml): """ Remove the DOCTYPE and XML version lines from the inline XML SVG """ svgstr = "".join(svg_xml) start_index = svgstr.index("<svg") end_index = svgstr.index("</svg>") return svgstr[start_index:end_index + 6]
def jwt_response_payload_handler(token, user=None, request=None): """ Returns the response data for both the login and refresh views. Override to return a custom response such as including the serialized representation of the User. Deprecated: as some info can be within token xxx.yyy.zzz payload => yyy (base64 encoded) """ return { 'token': token, #'user': UserSerializer(user).data, }
def generatePolicy(effect, principalId, resource): """Generate a policy based on input.""" authResponse = {} authResponse['principalId'] = principalId statementOne = {'Action': 'execute-api:Invoke', 'Effect': effect, 'Resource': resource} policyDocument = {'Version': '2012-10-17', 'Statement': [statementOne]} authResponse['policyDocument'] = policyDocument authResponse['context'] = {'policyGenerator': 'authorizer.authorize'} return authResponse
def testFloat(val): """ Test value for float. Used to detect use of variables, strings and none types, which cannot be checked. """ try: return type(float(val)) == float except Exception: return False
def merge(h1, h2): """ Returns a new dictionary containing the key-value pairs of ``h1`` combined with the key-value pairs of ``h2``. Duplicate keys from ``h2`` overwrite keys from ``h1``. :param h1: A python dict :param h2: A python dict :return: A new python dict containing the merged key-value pairs """ ret = {} ret.update(h1) ret.update(h2) return ret
def filter_by_interface(objects, interface_name): """ filters the objects based on their support for the specified interface """ result = [] for path in objects.keys(): interfaces = objects[path] for interface in interfaces.keys(): if interface == interface_name: result.append(path) return result
def create_record(name, telephone, address): """"Create Record""" record = { 'name': name, 'phone': telephone, 'address': address } return record
def clean_text(text): """ Remove non-ascii characters from a string. Args: text (str): A string. Returns: str: A string with all characters with ord() >= 128 removed. """ return ''.join([i if ord(i) < 128 else '' for i in text])
def cardinal_to_ordinal(n): """Return ordinal string version of cardinal int `n`. Parameters ---------- n : int Cardinal to convert to ordinal. Must be >= 0. Returns ------- str Ordinal version of cardinal `n`. Raises ------ ValueError If `n` is less than 0. Notes ----- This function can be useful when writing human-readable error messages. Examples -------- >>> from skbio.util import cardinal_to_ordinal >>> cardinal_to_ordinal(0) '0th' >>> cardinal_to_ordinal(1) '1st' >>> cardinal_to_ordinal(2) '2nd' >>> cardinal_to_ordinal(3) '3rd' """ # Taken and modified from http://stackoverflow.com/a/20007730/3776794 # Originally from http://codegolf.stackexchange.com/a/4712 by Gareth if n < 0: raise ValueError("Cannot convert negative integer %d to ordinal " "string." % n) return "%d%s" % (n, "tsnrhtdd"[(n//10 % 10 != 1)*(n % 10 < 4)*n % 10::4])
def calc_hash_for_message(message): """ Given an EMDR message string, calculate the hash. :param basestring message: A compressed or uncompressed EMDR message string. :rtype: str :returns: The hash to use for deduping. """ # Use Python's naive 32bit integer hashing for now. It's fast and simple. return hash(message)
def pf(basic): """ pf Is 12% Of Basic Salary""" pf = basic*12/100 return pf
def checkEachLineCount(mat): """ Assert that each line has a constant number of ratings @param mat The matrix checked @return The number of ratings @throws AssertionError If lines contain different number of ratings """ n = sum(mat[0]) assert all( sum(line) == n for line in mat[1:]), "Line count != %d (n value)." % n return n
def getattrib(node, attr_name): """ Get attribute of xml node """ if node is not None: return node.getAttribute(attr_name) else: return None
def _process_messages(msg): """ Sanitize the message to something friendlier to the encryption program @type msg: str @rtype: None """ cleaned_message = '' for char in msg.upper(): if char.isalpha(): cleaned_message += char return cleaned_message
def _item_by_key_postfix(dictionary, key_prefix): """Get item from a dictionary which begins with the given prefix.""" for key, value in dictionary.items(): if key.endswith(key_prefix): return value return {}
def isB(ner_tag: str): """ We store NER tags as strings, but they contain two pieces: a coarse tag type (BIO) and a label (PER), e.g. B-PER :param ner_tag: :return: """ return ner_tag.startswith("B")
def stackhunter(cookie = 0x7afceb58): """Args: [cookie = 0x7afceb58] Returns an an egghunter, which searches from esp and upwards for a cookie. However to save bytes, it only looks at a single 4-byte alignment. Use the function stackhunter_helper to generate a suitable cookie prefix for you. The default cookie has been chosen, because it makes it possible to shave a single byte, but other cookies can be used too. """ cookie = int(cookie) if (cookie & 0xffffff) == 0xfceb58: return """ stackhunter: cmp dword eax, 0x%08x jne stackhunter+1 jmp esp """ % cookie else: return """ stackhunter: pop eax cmp dword eax, 0x%08x jne stackhunter jmp esp """ % cookie
def hklpattern_applies(hkl, condhkl): """/* * helper function to determine if Miller indices fit a certain pattern * * Parameters * ---------- * hkl : array of three integers Miller indices * condhkl : condition string similar to 'hkl', 'hh0', or '0k0' * * Returns * ------- * 1 if hkl fulfills the pattern, 0 otherwise */""" n=0 if (condhkl[n] == '0' and hkl[0] != 0): return 0 n = n + 1 if (condhkl[n] == '-'): n = n + 1 if (condhkl[n] == 'h' and hkl[1] != -hkl[0]): return 0 elif (condhkl[n] == '0' and hkl[1] != 0): return 0 elif (condhkl[n] == 'h' and hkl[1] != hkl[0]): return 0 if (condhkl[len(condhkl)-1] == '0' and hkl[2] != 0): return 0 return 1
def clean_up_spacing(sentence): """ Clean up spacing :param sentence: str a sentence to clean of leading and trailing space characters. :return: str a sentence that has been cleaned of leading and trailing space characters. """ return sentence.strip()
def find_index(column_name, row, error_msg): """ Find what column has the specified label. The search is case insensitive. Args: column_name: Column label to search for row: The row with the headers for this table, typically the first. error_msg: Error message to raise with ValueError. Returns: Index of the column with the specified name. Raises: ValueError: If no column named column_name can be found. """ for i, cell in enumerate(row): if cell.strip().lower() == column_name.lower(): return i else: raise ValueError(error_msg)
def bwtc_scaling_factor(order): """Return the appropriate scaling factor for bandwidth to timeconstant converstion for the provided demodulator order. """ scale = 0.0 if order == 1: scale = 1.0 elif order == 2: scale = 0.643594 elif order == 3: scale = 0.509825 elif order == 4: scale = 0.434979 elif order == 5: scale = 0.385614 elif order == 6: scale = 0.349946 elif order == 7: scale = 0.322629 elif order == 8: scale = 0.300845 else: raise RuntimeError('Error: Order (%d) must be between 1 and 8.\n' % order) return scale
def clear_end(line: str, chars: list): """ Clears line's end from unwanted chars. Parameters ---------- line : str Line to be cleared. chars : list of chars Unwanted chars. Returns ------- line : str Given line, cleared from unwanted chars. """ while any(line.endswith(char) for char in chars): line = line[:-1] return line
def get_position_in_scaffold(exon_list, position): """calculates the position of the nucleotide on the scaffold""" exon_length = [int(i[1]) - int(i[0])+1 for i in exon_list] sum_of_exons = 0 for exon in range(len(exon_length)): if position < sum_of_exons + exon_length[exon]: return position + int(exon_list[exon][0]) - sum_of_exons sum_of_exons += exon_length[exon] return -1
def populate_full_albums_table(db_session, l): """Save all albums to the albums table""" if not l: return False c = db_session.cursor() c.execute("""DELETE FROM albums""") for album_list in l: c.execute("""INSERT INTO albums (artist_id, album_id, name, year, precise_rating) VALUES (?,?,?,?,?)""", album_list) db_session.commit() c.close() return True
def _get_params_name(prefix, item): """Makes the params name for the k,v pair.""" return prefix + '_'+ item
def get_edge_resolution(pixel_x, width, distance, wavelength): """"Calculate EdgeResolution.""" from math import sin, atan distance=float(distance) if abs(distance) > 0.0: rad = 0.5 * float(pixel_x) * int(width) return float(wavelength)/(2*sin(0.5*atan(rad/distance))) else: return 0.
def handle_hw_info(line, hw_info): """handle the hardware information""" for board_line in hw_info: if board_line == " ".join(line.split()[0:1]) or \ board_line == " ".join(line.split()[0:2]) or \ board_line == " ".join(line.split()[0:3]): return True return False
def partition(alist, lower, higher): """Partitions the list within the given range alist - a list to partition lower - index of the lower end in list to start partitioning from higher - index of higher end in list to end the partitioning""" pivot = alist[higher] i = lower j = lower while j < higher: if alist[j] <= pivot: alist[i], alist[j] = alist[j], alist[i] i += 1 j += 1 alist[i], alist[higher] = alist[higher], alist[i] return i
def noNamespace(id:str) -> str: """ Remove the namespace part of an identifier and return the remainder. Example: 'm2m:cnt' -> 'cnt' """ p = id.split(':') return p[1] if len(p) == 2 else p[0]
def _get_youtube_x(insta_x, fa_youtube_width, fa_insta_width): """ Get YouTube icon's x position given Instagram x for centre-alignment. """ return insta_x - (fa_youtube_width - fa_insta_width) // 2
def is_switchport_default(existing): """Determines if switchport has a default config based on mode Args: existing (dict): existing switchport configuration from Ansible mod Returns: boolean: True if switchport has OOB Layer 2 config, i.e. vlan 1 and trunk all and mode is access """ c1 = str(existing['access_vlan']) == '1' c2 = str(existing['native_vlan']) == '1' c3 = existing['trunk_vlans'] == '1-4094' c4 = existing['mode'] == 'access' default = c1 and c2 and c3 and c4 return default
def get_entry_by_filename(entries, fn, key=None): """Search for an entry in entries where key(entry['file']) matches key(fn) and returns it. If key is None than file names are compared directly. Returns None if none is found.""" if key is None: key = lambda x: x for e in entries: if key(fn) == key(e['file']): return e return None
def theheader(n1, n2): """return the csv header""" s = "Object Key, Object Name, Field Name, %s, %s" % ('file1', 'file2') return s.split(',')
def format_treevisitor_str(treevisitor_str: str): """ Transform a string like "(message & author)" into ["message", "author"] or "(message)" into ["message"] """ if not treevisitor_str.startswith("("): return [treevisitor_str] treevisitor_str = treevisitor_str.removeprefix("(").removesuffix(")") if "&" in treevisitor_str: word_list = treevisitor_str.split("&") for i, _ in enumerate(word_list): word_list[i] = word_list[i].strip('" ') return word_list return [treevisitor_str.strip().removeprefix('"').removesuffix('"')]
def search_results(results): """Display search results.""" results = [p.title for p in results] head, tail = results[:3], results[3:] output = ', '.join('\x02{}\x02'.format(i) for i in head) if tail: output += ' and {} more...'.format(len(tail)) return output
def get_scalebin(x, rmin=0, rmax=100, tmin=0, tmax=100, step=10): """ Scale variable x from rdomain to tdomain with step sizes return key, index rmin denote the minimum of the range of your measurement tmax denote the maximum of the range of your measurement tmin denote the minimum of the range of your desired target scaling tmax denote the maximum of the range of your desired target scaling """ newx = (x - rmin) / (rmax - rmin) * (tmax - tmin) + tmin pos = 0 for pos, i in enumerate(range(tmin, tmax, step)): if newx < i + step: return "[%d,%d)" % (i, i+step), pos return ">=%d" % (tmax), pos + 1
def is_all_unique_dict(s): """Uses dictionary.""" count = {} for ch in s: count[ch] = 0 for ch in s: count[ch] += 1 if count[ch] > 1: return False return True
def complement(seq): """return complement sequence Parameters ---------- seq : array_like, or str Returns ------- str """ complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} bases = [complement[base] for base in seq] return ''.join(bases)
def bgOM(r, ra, b0, bi): """ Generalized (Osipkov1979; Merritt 1985) anisotropy profile. Parameters ---------- r : array_like, float Distance from center of the system. ra : float Anisotropy radius. b0 : float Anisotropy at r = 0. bi : float Anisotropy at r -> Infinity. Returns ------- b : array_like, float Anisotropy profile. """ b = b0 + (bi - b0) / (1 + (ra / r) * (ra / r)) return b
def accuracy(y_true, y_pred): """ Function to calculate accuracy :param y_true: list of true values :param y_pred: list of predicted values :return: accuracy score """ # intialize counter correct_counter = 0 for yt, yp in zip(y_true, y_pred): if yt == yp: correct_counter += 1 return correct_counter / len(y_true)
def only_events(posts): """ Goes through the category elements in each post and looks for an "event"-like tag. Returns false if no event tag is found. """ categories = [] actual_events = [] for post in posts: cats = post.getElementsByTagName('category') for c in cats: categories.append(c.childNodes[0].data) if "event" in c.childNodes[0].data.lower(): if post not in actual_events: actual_events.append(post) return actual_events,categories
def job_dict(ts_epoch, request_template_dict, date_trigger_dict): """A date job represented as a dictionary.""" return { 'name': 'job_name', 'id': 'job_id', 'trigger_type': 'date', 'trigger': date_trigger_dict, 'request_template': request_template_dict, 'misfire_grace_time': 3, 'coalesce': True, 'next_run_time': ts_epoch, 'success_count': 0, 'error_count': 0, 'status': 'RUNNING', }
def norm_float(value): """Converts a float (whose decimal part is zero) to integer""" if isinstance(value, float) and value.is_integer(): return int(value) return value
def missing_interger(A): """ Find the smallest missing int greater then 1 in array """ n = len(A) A.sort() # Find First Max missing_int = 1 for i in range(n): if A[i] < 0: pass elif A[i] == missing_int: missing_int = A[i] + 1 return missing_int
def cln_txt(str_inp:str)->str: """ remove special characters """ str_inp=str_inp.replace(u'\xa0',u' ') str_inp=str_inp.replace(u'\n',u' ') str_inp=str_inp.replace(u'\r',u' ') txt=''.join([s for s in str_inp if not s in '!"#$%&\'()*+-;<=>?@[\\]^_`{|}~' ]) return txt.replace(' ','').strip()
def is_on(a, b, c): """Return true if point c is exactly on the line from a to b""" def collinear(a, b, c): "Return true iff a, b, and c all lie on the same line." return (b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1]) def within(p, q, r): "Return true iff q is between p and r (inclusive)." return p <= q <= r or r <= q <= p # (or the degenerate case that all 3 points are coincident) return (collinear(a, b, c) and (within(a[0], c[0], b[0]) if a[0] != b[0] else within(a[1], c[1], b[1])))
def users_can_join_group(groupId, reasonNeeded=False): """ Given a group, return whether the group is is in a joinable state """ assert groupId, "No group ID provided" if reasonNeeded: retval = (True, (100, 'Users can always join')) else: retval = True assert retval return retval
def format_default(value: object) -> str: """ Format a default parameter value for display in the usage documentation. :param value: The value to format. :return: A formatted string, with special handling of certain values to make them more clear. """ if isinstance(value, bool): if value: return f"Enabled by default." return f"Disabled by default." if isinstance(value, str): return f"Default is \"{value}\"" return f"Default is {value}"
def sections_intersect(sec1, sec2): """ Check if two sections intersect """ if sec1[0] <= sec2[0] <= sec1[1] or sec2[0] <= sec1[0] <= sec2[1]: return True return False
def is_attr_protected(attrname): """return True if attribute name is protected (start with _ and some other details), False otherwise. """ return attrname[0] == '_' and not attrname == '_' and not ( attrname.startswith('__') and attrname.endswith('__'))
def _family_and_clan_to_just_clan( family_and_clan): """Converts family_and_clan to just a clan if there is one. Args: family_and_clan: a set of either just a family, or a family and its associated clan. Returns: If family_and_clan is only a family, return family_and_clan. If family_and_clan has a clan, only return the clan. Raises: ValueError if len(family_and_clan != 1 or 2. Also raises if len(family-and_clan) == 2 and there's no clan in it. """ if len(family_and_clan) == 1: return frozenset(family_and_clan) if len(family_and_clan) == 2: for f_or_c in family_and_clan: if f_or_c.startswith('Pfam:CL'): return frozenset([f_or_c]) raise ValueError('family_and_clan was length 2, but did not have a clan in ' 'it. family_and_clan was {}'.format(family_and_clan)) raise ValueError('Expected either one or two values for family_and_clan. ' 'Was {}'.format(family_and_clan))
def stroke_width(width: float): """ Returns an SVG stroke width attribute using the given width. :param width: `float` stroke width :return: stroke-width="<width>" """ return f'stroke-width="{str(width)}"'
def _Cycle(value, unused_context, args): """Cycle between various values on consecutive integers.""" # @index starts from 1, so used 1-based indexing return args[(value - 1) % len(args)]
def get_converted_base(base, unconverted_base, converted_base): """ :param base: base that is subject to conversion :param unconverted_base: base that should be converted :param converted_base: base that should be converted to """ # if the base is a c/C convert it to a T if base.upper() == unconverted_base.upper(): return converted_base.upper() # capitalize base else: return base.upper()
def jsnl_to_augmented_data_dict(jsnlRatings): """ Input: json lines that has format users - books - ratings - etc Output: Users dictionary: keys as user ID's; each key corresponds to a list of book ratings by that user Books dictionary: keys as book ID's; each key corresponds a list of ratings of that book by different users """ to_users_dict = dict() to_books_dict = dict() for row in jsnlRatings: if row['in0'][0] not in to_users_dict: to_users_dict[row['in0'][0]] = [(row['in1'][0], row['label'])] else: to_users_dict[row['in0'][0]].append((row['in1'][0], row['label'])) if row['in1'][0] not in to_books_dict: to_books_dict[row['in1'][0]] = list(row['in0']) else: to_books_dict[row['in1'][0]].append(row['in0']) return to_users_dict, to_books_dict
def scale(val, src, dst): """ Scale the given value from the scale of src to the scale of dst. val: float or int src: tuple dst: tuple example: print scale(99, (0.0, 99.0), (-1.0, +1.0)) """ return (float(val - src[0]) / (src[1] - src[0])) * (dst[1] - dst[0]) + dst[0]
def prettify_model(m): """Prettifies model representation""" tups = list(zip(*m)) out = [] for a in tups: if a[0] and a[1]: out.append("AandB") elif a[0] and not a[1]: out.append("A") elif not a[0] and a[1]: out.append("B") elif not a[0] and not a[1]: out.append("M") return out
def shared_secret(a, B, p): """a is your private key, B is partner's public key, p is the modulus""" return pow(B, a, p)
def string_to_list(line, sep=','): """convert a comma (or sep) separated string to list. If line is None (or any falsy value), return []. """ return line.split(sep) if line else []
def copy_bodyplan(bodyplan): """get bodyplan for arbitrary feed forward network model. Args: An ffn model Returns: bodyplan: A list of L layers representing the model architecture with layers represented as dictionaries with keys "layer", "n", "activation", and "regval". These keys contain the layer index, integer number of units in that layer, the name of the activation function employed, and the L2 regularization parameter employed respectively. """ import numpy as np import copy #init bodyplan as empty list newbodyplan = [] for mlayer in bodyplan: layer = {} layer["layer"] = copy.copy(mlayer["layer"]) layer["n"] = copy.copy(mlayer["n"]) layer["activation"] = copy.copy(mlayer["activation"]) layer["regval"] = copy.copy(mlayer["regval"]) layer["lreg"] = copy.copy(mlayer["lreg"]) layer["desc"] = copy.copy(mlayer["desc"]) newbodyplan.append(layer) return newbodyplan
def get_embedding_tids(tids, mapping): """Obtain token IDs based on our own tokenization, through the mapping to BERT tokens.""" mapped = [] for t in tids: mapped += mapping[t] return mapped
def normalize_extend_ids(extend_ids): """ Get extend id from the string provided Parameters: extend_ids (obj): List of extend ids. Returns: normalized_ids (obj): Processed list of ids . """ normalized_ids = [] for id in extend_ids: if "http" in id: id = id.split("/")[-1] normalized_ids.append(id) else: normalized_ids.append(id) return normalized_ids
def _missing_parameter(params_required, params_actual, parent=''): """Recursively search for the first missing parameter.""" for param_name in params_required: if param_name not in params_actual: return '{}.{}'.format(parent, param_name) param_required = params_required.get(param_name) param_actual = params_actual.get(param_name) if isinstance(param_required, dict): param_missing = _missing_parameter(param_required, param_actual, param_name) if param_missing is not None: return '{}.{}'.format(parent, param_missing) return None
def format_long_time(timespan): """ Formats a long timespan in a human-readable form with a precision of a 100th of a second. """ formatted_time = [] units = (('d', 24 * 60 * 60), ('h', 60 * 60), ('m', 60), ('s', 1)) for unit, length in units: value = int(timespan / length) if value > 0: timespan %= length formatted_time.append('%i%s' % (value, unit)) if timespan < 1: break return ' '.join(formatted_time)
def compare_dicts(dict1,dict2): """Compare to two dictionaries to see what cahnged.""" ret={} #Comapre common keys: for k in set(dict1) & set(dict2): if dict1[k]!=dict2[k]: ret[k]=(dict1[k],dict2[k]) # In dict1 and not in dict2 for k in set(dict1)-set(dict2): ret[k]=(dict1[k],None) # In dict2 and not in dict1 for k in set(dict2)-set(dict1): ret[k]=(None,dict2[k]) return False if len(ret)==0 else ret
def find_dict_if_matched_key_val(dict_tmp, key, value): """ check if a key/value pair match in a given dictionnary Parameters ---------- dict_tmp: (dict) the dictionnary to be tested key: (str) a key string to look for in dict_tmp value: (object) any python object Returns ------- bool: True if the key/value pair has been found in dict_tmp """ if key in dict_tmp: if dict_tmp[key] == value: return True return False
def str2list(string): """Return a list of strings.""" str_lst = string.split(",") for i, s in enumerate(str_lst): str_lst[i] = s.strip() return str_lst
def get_gvk(crd): """ Extracts group, version(s), kind data from CRD """ group = crd["spec"]["group"].split(".")[0] kind = crd["spec"]["names"]["kind"].lower() try: version = crd["spec"]["version"] # v1beta1 CRD except KeyError: version = crd["spec"]["versions"] # v1 CRD return group, version, kind
def is_payload_valid(payload, rules): """ Checks if a payload is valid based on some predefined rules args: payload, rules ret: is_valid """ if type(payload) != dict: return False for rule in rules: if type(rule) == list: count = 0 for nested_rule in rule: count += 1 if (nested_rule[0] in payload and type( payload[nested_rule[0]]) in nested_rule[1]) else 0 if count == 0: return False else: if (rule[0] not in payload or type(payload[rule[0]]) not in rule[1]) and (len(rule) == 2 or not rule[2]): return False return True
def _make_specific_identifier(param_name, identifier): # type: (str, str) -> str """ Only adds an underscore between the parameters. """ return "{}_{}".format(param_name, identifier)
def lfnGroup(job): """ _lfnGroup_ Determine the lfnGroup from the job counter and the agent number provided in the job baggage, the job counter and agent number default both to 0. The result will be a 5-digit string. """ modifier = str(job.get("agentNumber", 0)) jobLfnGroup = modifier + str(job.get("counter", 0) // 1000).zfill(4) return jobLfnGroup
def backend_address_pool_id(subscription_id, resource_group_name, load_balancer_name, name): """Generate the id for a backend address pool""" return '/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/loadBalancers/{}/backendAddressPools/{}'.format( subscription_id, resource_group_name, load_balancer_name, name )
def micro_avg_precision(guessed, correct, empty = None): """ Tests: >>> micro_avg_precision(['A', 'A', 'B', 'C'],['A', 'C', 'C', 'C']) 0.5 >>> round(micro_avg_precision([0,0,0,1,1,1],[1,0,1,0,1,0]), 6) 0.333333 """ correctCount = 0 count = 0 idx = 0 while idx < len(guessed): if guessed[idx] != empty: count += 1 if guessed[idx] == correct[idx]: correctCount +=1 idx +=1 precision = 0 if count > 0: precision = correctCount / count return precision
def directional_transitions_no_closes(a1, a2, h1, h2): """ classification of the movement of lines where -1 is closed """ # how to metaprogram the enumeration of combinations given binary relations propositions = [ # directionals (a1 == a2 and h1 == h2), (a1 < a2 and h1 == h2), (a1 > a2 and h1 == h2), (a1 == a2 and h1 < h2), (a1 == a2 and h1 > h2), (a1 < a2 and h1 < h2), (a1 > a2 and h1 > h2), (a1 < a2 and h1 > h2), (a1 > a2 and h1 < h2), ] return propositions
def _dict_with_extra_specs(inst_type_query): """Takes an instance, volume, or instance type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = dict([(x['key'], x['value']) for x in inst_type_query['extra_specs']]) inst_type_dict['extra_specs'] = extra_specs return inst_type_dict
def translate(string): """strip out invalid chars""" return string.replace('&', '').replace('.', '').replace('#', '')
def get_space_indices(plaintext): """Description: for a given string input, returns a list containing the indices in which spaces occur. Arguments: plaintext (string): string used to either encode or decode. Returns: space_indices (list): list contains all indices in which spaces occur. """ space_indices = [] count = 0 for i in plaintext: if i == ' ': space_indices.append(count) count += 1 return space_indices
def obsmode_name(mode): """Return full name of the observing mode""" if type(mode) is not list: mode = [mode] full_names = {'fiducial': 'Fiducial', 'binospec': 'Binospec', 'hectochelle': 'Hectochelle', 'desi': 'DESI-like', 'gaia': 'Gaia-like', 'exgal': 'Extragalactic'} keys = full_names.keys() names = [] for m in mode: if m in keys: name = full_names[m] else: name = m names += [name] return names
def replace_country(record: dict, country_name: str, area_name: str): """ Replace country name with an `area_covered` name. Promote a string in `area_covered` to `country_territory_area`. Applies to records where a WHO recognised country is defined as an administrative region of a different country. Parameters ---------- record : dict Input record. country_name : str Country name to be matched. area_name : str Area name to be matched. Returns ------- type Record with country `area_covered` promotion applied. """ if record['country_territory_area'] == country_name and record['area_covered'] == area_name: record['country_territory_area'] = area_name record['area_covered'] = None return(record)
def check_result(result): """ check result """ if isinstance(result, dict) and 'error' in result.keys(): return True return False
def merge_dicts(*ds): """Given any number of dicts, shallow copy and merge into a new dict, precedence goes to key value pairs in latter dicts. Notes ----- http://stackoverflow.com/questions/38987/how-to-merge-two-python-dictionaries-in-a-single-expression """ result = ds[0] for d in ds[1:]: result.update(d) return result
def delete_comma(sentence): """ This function to delete ',' if there is at the end of sentence Input=sentence Output=sentence """ #init i = 0 #There is a comma in the end, we have to delete it if sentence[-2].endswith(","): sentence[-2] = sentence[-2][:-1] #If the comma is as a string, we append it at the end of the previous word while i < len(sentence): if sentence[i] == ',': sentence = sentence[:i - 1] + [sentence[i - 1] + ','] + sentence[i + 1:] i += 1 return sentence
def check_legacy_headers(headers, legacy_headers): """Gather values from old headers.""" for legacy_header in legacy_headers: try: value = headers[legacy_header.lower()] return value.split(',')[-1].strip() except KeyError: pass return None
def _d4(E, f, d3): """ Solve Eq. 23 """ return -f[0] / (f[1] + 0.5 * d3 * f[2] + (d3**2) * f[3] / 6.)