content
stringlengths
42
6.51k
def delete_key(ls, key): """ Question 6.5: Given an array and an input key, remove the key from the array """ write_idx = 0 for idx, elt in enumerate(ls): if elt != key: ls[write_idx] = elt write_idx += 1 while write_idx < len(ls): ls[write_idx] = None write_idx += 1 return ls
def scale_down(threshold, dim): """Rescale 0 =< output =< 1""" out = float(dim/threshold) if threshold else 0.0 return out
def subnetToList(listoflist): """ :param listoflist: :return: """ d_temp = [] for l in listoflist: d_temp.append(l.encode('ascii')) print(d_temp) return d_temp
def _validate_import_name(import_name): """ Validates the given `import_name` value. Parameters ---------- import_name : `str` The import name's value. Returns ------- import_name : `str` The validated import name. Raises ------ TypeError If `import_name` was not given as `str` instance. ValueError If `import_name` is an empty string. """ if type(import_name) is str: pass elif isinstance(import_name, str): import_name = str(import_name) else: raise TypeError( f'`import_name` can be given as `str` instance, got {import_name.__class__.__name__}.' ) if not import_name: raise ValueError(f'`import_name` cannot be given as empty string.') return import_name
def get_safe_filename(title): """ Get a safe string to use as a filename :param title: your target file title :return: the filename to use :rtype: str """ keep_chars = (' ', '.', '_') return "".join(c for c in title if c.isalnum() or c in keep_chars).rstrip()
def CYAN(obj): """Format an object into string of cyan color in console. Args: obj: the object to be formatted. Returns: None """ return '\x1b[1;36m' + str(obj) + '\x1b[0m'
def generate_dhm_request(public_key: int) -> str: """Generate DHM key exchange request :param: client's DHM public key :return: string according to the specification """ return "DHMKE:" + str(public_key)
def average(numlist): """ Find average of a list of numbers. Uses for loop to iterate through parameter NUMLIST and adds the sum of each element until the end of the list. AVERAGE then returns the sum divided by the length of NUMLIST. """ numlist_sum = 0 # initialize sum to zero # Iterate over NUMLIST and add each element to the sum for num in numlist: numlist_sum += num # Return NUMLIST_SUM divided by LEN(NUMLIST) to calculate average return numlist_sum / len(numlist)
def _is_fix_comment(line, isstrict): """ Check whether line is a comment line in fixed format Fortran source. :param str line: Line of code to check :param bool isstrict: Whether we are strictly enforcing fixed/free fmt References ---------- :f2008:`3.3.3` """ if line: if line[0] in '*cC!': return True if not isstrict: i = line.find('!') if i != -1: start = line[:i].lstrip() if not start: if i == 5: # line continuation return False return True else: # inline comment or ! is used in character context # inline comments are handled elsewhere pass elif line == '': return True return False
def _noisepeak(amp, npk1): """ Private function intended to insert a new RR interval in the buffer. ---------- Parameters ---------- amp : int Amplitude of the peak under analysis. npk1 : int Actual value of NPK1 parameter defined in Pan-Tompkins real-time R peak detection algorithm (named noise peak). Returns ------- npk1 : float Updated value of NPK1 parameter. """ npk1 = 0.125 * amp + 0.875 * npk1 # npk1 is the running estimate of the noise peak return npk1
def _check_list(str_or_list): """ If `str_or_list` is a list will return it as it is. If it is a str, will return a list with the str inside. Parameters ---------- str_or_list: str or list Returns ------- list """ if str_or_list is None: return None if isinstance(str_or_list, list): return str_or_list if isinstance(str_or_list, str): return [str_or_list] raise ValueError('Expected a `str` or a `list`, ' \ 'got {}.'.format(type(str_or_list)))
def median_rank(PESSI_SORT, OPTI_SORT, A): """ Calculates the median rank of each action. :param PESSI_SORT: Dictionary containing the actions classified according to the pessimistic procedure :param OPTI_SORT: Dictionary containing the actions classified according to the optimistic procedure. :param A: List containing the names of the actions as strings. :return med_rank: Dictionary containing the median rank of each action. The keys are the names of the actions and the values are the median ranks. """ med_rank = {} for a in A: med_rank[a] = (OPTI_SORT[1][a] + PESSI_SORT[1][a]) / 2 return med_rank
def get_alphabetical_value(string: str) -> int: """Get alphabetical value of a given string `string`. Example: 'COLIN' -> 53 = 3 + 15 + 12 + 9 + 14 """ return sum([(ord(char) - ord('a') + 1) for char in string.lower()])
def check_uniqueness_in_rows(board: list): """ Check buildings of unique height in each row. Return True if buildings in a row have unique length, False otherwise. >>> check_uniqueness_in_rows(['***21**', '412453*', '423145*',\ '*543215', '*35214*', '*41532*', '*2*1***']) True >>> check_uniqueness_in_rows(['***21**', '452453*', '423145*',\ '*543215', '*35214*', '*41532*', '*2*1***']) False >>> check_uniqueness_in_rows(['***21**', '412453*', '423145*',\ '*553215', '*35214*', '*41532*', '*2*1***']) False """ for row in board[1:-1]: set_row = set(row[1:-1]) if len(row[1:-1]) != len(set_row): return False return True
def norm_suffix(f, suffix): """ Add a suffix if not present """ p = str(f) e = p[-len(suffix):] if e != suffix: p = p + suffix return p
def percent_used(maximum, used, accuracy=2): """Calculates percentage to a certain accuracy""" int_percent = round(used/maximum*100, accuracy) percentage = str(int_percent) + "%" return percentage
def OwnWork(pagetext): """Check if own work by uploader.""" LowerCasePageText = pagetext.lower() if (LowerCasePageText.find('{{own}}') != -1): return True elif (LowerCasePageText.find('own work') != -1): return True return False
def __sign_str_concat(email, expiration_str): """This is the method to concatenate strings that are to be used in HMAC signature generation. Email should NOT be url encoded. """ return ' '.join([email, expiration_str])
def flatten(L, maxdepth=100): """Flatten a list. Stolen from http://mail.python.org/pipermail/tutor/2001-January/002914.html""" # 2009-09-10 16:54 IJC: Input. if type(L) != type([]): return [L] if L == []: return L else: maxdepth -= 1 return flatten(L[0]) + flatten(L[1:], maxdepth=maxdepth)
def truncate(obj, nlen): """ Convert 'obj' to string and truncate if greater than length""" str_value = str(obj) if len(str_value) > nlen: return str_value[:nlen-3] + '...' return str_value
def listFiles (wildcardstr): """ List the files names in the current directory using the wildcard argument eg te.listFiles ('*.xml') :param wildcardstr: WIld card using during the file search :returns: list of file names that match the wildcard """ import glob return glob.glob (wildcardstr)
def rev_reg_id(cd_id: str, tag: str) -> str: """ Given a credential definition identifier and a tag, return the corresponding revocation registry identifier, repeating the issuer DID from the input identifier. :param cd_id: credential definition identifier :param tag: tag to use :return: revocation registry identifier """ return '{}:4:{}:CL_ACCUM:{}'.format(cd_id.split(':', 1)[0], cd_id, tag)
def parse_ucsc_file_index(stream, base_url): """Turn a UCSC DCC files.txt index into a dictionary of name-value pairs """ file_index = {} for line in stream: filename, attribute_line = line.split('\t') filename = base_url + filename attributes = {} for assignment in attribute_line.split(';'): name, value = assignment.split('=') attributes[name.strip()] = value.strip() file_index[filename] = attributes return file_index
def find_by_name(seq, name): """Returns the first element in seq with the given name.""" for item in seq: if item['name'] == name: return item return None
def clusters_are_identical(one, two): """Tests if two collections of Subject objects are identical. This function compares clusters element-wise for IPG numbers, returning True if all are identical. """ if not len(one) == len(two): return False for subA, subB in zip(one, two): if not subA.ipg or not subB.ipg: return False if subA.ipg != subB.ipg: return False return True
def remove_spaces(line): """ Removes double spaces """ line = line.lstrip() result = "" for i, ch in enumerate(line): if i+1 == len(line): result = result + ch elif (ch == " " and (line[i+1] == " " or line[i+1] == "\n")): pass else: result = result + ch return result
def parse_plot_args(*args, **options): """Parse the args the same way plt.plot does.""" x = None y = None style = None if len(args) == 1: y = args[0] elif len(args) == 2: if isinstance(args[1], str): y, style = args else: x, y = args elif len(args) == 3: x, y, style = args return x, y, style
def styles(): """Creates a list of standard styles.""" return [ ("STANDARD", "txt"), ("OpenSans-Light", "OpenSans-Light.ttf"), ("OpenSans-Light-Italic", "OpenSans-LightItalic.ttf"), ("OpenSans", "OpenSans-Regular.ttf"), ("OpenSans-Italic", "OpenSans-Italic.ttf"), ("OpenSans-SemiBold", "OpenSans-SemiBold.ttf"), ("OpenSans-SemiBoldItalic", "OpenSans-SemiBoldItalic.ttf"), ("OpenSans-Bold", "OpenSans-Bold.ttf"), ("OpenSans-BoldItalic", "OpenSans-BoldItalic.ttf"), ("OpenSans-ExtraBold", "OpenSans-ExtraBold.ttf"), ("OpenSans-ExtraBoldItalic", "OpenSans-ExtraBoldItalic.ttf"), ("OpenSansCondensed-Bold", "OpenSansCondensed-Bold.ttf"), ("OpenSansCondensed-Light", "OpenSansCondensed-Light.ttf"), ("OpenSansCondensed-Italic", "OpenSansCondensed-LightItalic.ttf"), ("LiberationSans", "LiberationSans-Regular.ttf"), ("LiberationSans-Bold", "LiberationSans-Bold.ttf"), ("LiberationSans-BoldItalic", "LiberationSans-BoldItalic.ttf"), ("LiberationSans-Italic", "LiberationSans-Italic.ttf"), ("LiberationSerif", "LiberationSerif-Regular.ttf"), ("LiberationSerif-Bold", "LiberationSerif-Bold.ttf"), ("LiberationSerif-BoldItalic", "LiberationSerif-BoldItalic.ttf"), ("LiberationSerif-Italic", "LiberationSerif-Italic.ttf"), ("LiberationMono", "LiberationMono-Regular.ttf"), ("LiberationMono-Bold", "LiberationMono-Bold.ttf"), ("LiberationMono-BoldItalic", "LiberationMono-BoldItalic.ttf"), ("LiberationMono-Italic", "LiberationMono-Italic.ttf"), ]
def escape_path(path): """Returns path with escaped space and apostrophe""" return path.replace("'", "\\'").replace(" ", "\ ")
def dictionary_table1(): """Creates dictionary to rename variables for summary statistics table. :return: dic """ dic = {"gewinn_norm": "Rank improvement (normalized)", "listenplatz_norm": "Initial list rank (normalized)", "age": "Age", 'non_university_phd': "High school", 'university': 'University', 'phd': 'Phd', 'architect': 'Architect', 'businessmanwoman': "Businesswoman/-man", 'engineer': "Engineer", 'lawyer': "Lawyer", 'civil_administration': "Civil administration", "teacher": "Teacher", 'employed': "Employed", 'selfemployed': "Self-employed", "student": "Student", 'retired': "Retired", 'housewifehusband': "Housewife/-husband"} return dic
def get_attribute(obj, value): """ Normally the result of list_items for listviews are a set of model objects. But when you want a GROUP_BY query (with 'values' method), than the result will be a dict. This method will help you find an item for either objects or dictionaries. """ if type(obj) == dict: return dict.get(obj, value) else: return getattr(obj, value)
def add_prefix(inputs, prefix): """Add prefix for dict. Args: inputs (dict): The input dict with str keys. prefix (str): The prefix to add. Returns: dict: The dict with keys updated with ``prefix``. """ outputs = dict() for name, value in inputs.items(): outputs[f"{prefix}.{name}"] = value return outputs
def viewitems(obj, **kwargs): """ Function for iterating over dictionary items with the same set-like behaviour on Py2.7 as on Py3. Passes kwargs to method.""" func = getattr(obj, "viewitems", None) if not func: func = obj.items return func(**kwargs)
def guesscol(code): """ Not used in fine, used to validate given examples before I realized that the whole boardingpass was a binary number when I re-read the instructions carefully. >>> guesscol("RLR") 5 >>> guesscol("RRR") 7 >>> guesscol("RLL") 4 """ code = code.translate(code.maketrans("RL", "10")) return int(code, 2)
def setCompare(iter1, iter2): """ Compares two groups of objects, returning the sets: onlyIn1, inBoth, onlyIn2 """ s1 = set(iter1) s2 = set(iter2) intersect = s1 & s2 return s1 - intersect, intersect, s2 - intersect
def freq_to_compare(f): """Allow a frequency of type [float] to be compared with ==""" f = int(round(f)) return f
def _get_num_to_fold(scale_factor: float, ngates: int) -> int: """Returns the number of gates to fold to achieve the desired (approximate) scale factor. Args: scale_factor: Floating point value to scale the circuit by. ngates: Number of gates in the circuit to fold. """ return int(round(ngates * (scale_factor - 1.0) / 2.0))
def cut_arguments(url: str) -> str: """Delete query arguments from the link. """ mark = url.find('?') if mark != -1: url = url[:mark] return url
def _format_data_as_table(data): """Format data as a table """ if isinstance(data, dict): data = [data] # Get common keys common_keys = {key for key, val in data[0].items() if isinstance(val, str)} for idx in range(1, len(data)): common_keys = common_keys.intersection(set(data[idx].keys())) common_keys = sorted(common_keys) # Construct output as table column_width = {val: len(data[0][val]) for val in common_keys} row_format = ''.join(['{:' + str(width) + '}\t\t' for _, width in column_width.items()]) title = row_format.format(*column_width.keys()) separator_column_width = ['-' * width for _, width in column_width.items()] separator = row_format.format(*separator_column_width) formatted_data = title + '\n' + separator # Construct each row data for entry in data: row_data = [entry[key] for key in common_keys] formatted_data += '\n' + row_format.format(*row_data) return formatted_data
def WaitForResponseMiddleware(app, wsgi_env, start_response): """WSGI middleware wrapper that waits until response is ready. Some middlewares here and some external middlewares rely on behavior that app calls finish when response is ready and they do post-processing that should be done when handler has returned all response. This middleware added for compatibility with such middlewares, once all users migrated to Python3, all middlewares should use `yield app()` instead of `return app()` and this one should be removed. Args: app: (callable) a WSGI app per PEP 3333. wsgi_env: see PEP 3333 start_response: see PEP 3333 Returns: A wrapped <app>, which is also a valid WSGI app. """ return list(app(wsgi_env, start_response))
def get_from(items, data): """Recursively get value from dictionary deep key. Args: items (list): List of dictionary keys data (dict): Portion of dictionary to operate on Returns object: Value from dictionary Raises: KeyError: If key does not exist """ if not isinstance(data, dict): raise KeyError item = items.pop(0) data = data[item] return get_from(items, data) if items else data
def safe_get(dct, *keys, **kwargs): """Return the dict value for the given ordered keys. Args: dct (dict): the dictionary that will be consulted. *keys: positional arguments which contains the key path to the wanted value. **kwargs: default -> If any key is missing, default will be returned. Examples: >>> my_dict = {"a": {"b": {"c": "my_value"}}} >>> safe_get(my_dict, "a", "b", "c") 'my_value' >>> safe_get(my_dict, "a", "z") >>> safe_get(my_dict, "a", "z", default="my_other_value") 'my_other_value' >>> my_other_dict = {"a": ["first"]} >>> safe_get(my_other_dict, "a", 0) 'first' >>> safe_get(my_other_dict, "a", 1) >>> safe_get(my_other_dict, "a", 1, default="second") 'second' Returns: Any: the dictionary value for the given ordered keys. """ default_value = kwargs.get("default") for key in keys: try: dct = dct[key] except (KeyError, IndexError): return default_value return dct
def split_alpha_numeric(s): """Take a string containing letters followed by numbers and spli them.""" not_numbers = s.rstrip("0123456789") numbers = s[len(not_numbers):] return not_numbers, int(numbers)
def question_answer_to_evidence(question_struct_item, observation_value): """Return new evidence obtained via abswering the one item contained in a question with the given observation value (status).""" return [{'id': question_struct_item['id'], 'choice_id': observation_value}]
def _encodeMode(ori_str, mode_dict): """Replace the ori_str to corresponding mode in mode_dict""" for mode, value in mode_dict.items(): ori_str = ori_str.replace(mode, value) return ori_str.strip(",")
def align_origin(origin, w, h, align=('left','top')): """Calculates size of text box and returns an origin as if aligned from the given origin. Accepted alignments are: left, center, right top, center, bottom""" if align==('left','top'): return origin aligns = {'left': 0, 'center': .5, 'right': 1, 'top': 0, 'bottom': 1} ox, oy = origin x_align, y_align = align return (ox - (w * aligns[x_align]), oy + (h * aligns[y_align]))
def _infer_coreml_output_shape(tf_shape): """Infer CoreML output shape from TensorFlow shape. """ shape = [] if len(tf_shape) == 1: shape = [tf_shape[0], 1, 1] elif len(tf_shape) == 2: if tf_shape[0] == 1: # (B,C) shape = [tf_shape[1]] else: shape = None elif len(tf_shape) == 3: # since output shape is not required by CoreML and rank-3 tensor in TF is ambiguous, we do not assign a shape shape = None elif len(tf_shape) == 4: assert tf_shape[0] == 1, "Output 4D tensor's first dimension (Batch) " + \ "must be 1." shape = [tf_shape[3], tf_shape[1], tf_shape[2]] #(C,H,W) elif len(tf_shape) == 0: # scalar shape = [1] else: raise ValueError('Unrecognized TensorFlow output shape ' + str(tf_shape)) return shape
def reorder_by_run_ids(p_files, p_order): """Function used to reorder images by their run-id IDS tag. The images are sorted according to the input parameters. If more images are available, they remaining are sorted in scending order. Parameters ---------- p_files : list of string List of image paths - containing a 'run-' id tag, to be reordered p_order : list of int List of expecting run id order. Examples -------- >>> in_files = ['sub-01_run-2_T2w.nii.gz', 'sub-01_run-5_T2w.nii.gz', 'sub-01_run-3_T2w.nii.gz', 'sub-01_run-1_T2w.nii.gz'] >>> my_order = [1,5,3] >>> reorder_by_run_ids(in_files, my_order) """ orig_order = [[int(f.split('_run-')[1].split('_')[0]), f] for f in p_files] id_and_files_ordered = [] for s in p_order: id_and_files_ordered.append([[s, f[1]] for f in orig_order if ('run-' + str(s)) in f[1]][0]) # # Todo: this if statement could be ignored to remove extra series. # if len(p_files) > len(p_order): # remainings = [s for s in orig_order if s[0] not in p_order] # remainings.sort() # id_and_files_ordered = id_and_files_ordered + remainings return [i[1] for i in id_and_files_ordered]
def get_regression_predictions(input_feature, intercept, slope): """ Purpose: Compute predictions Input : input_feature (x), intercept (w0), slope (w1) Output : Predicted output based on estimated intercept, slope and input feature """ predicted_output = intercept + slope * input_feature return(predicted_output)
def get_unknown_labels_to_keep(labels_curr, pattern_dict): """ Filter out the labels that match the configuration file :param labels_curr: known labels :param pattern_dict: dirtionary containing all the mach rules :type labels_curr: list :type pattern_dict: dictionary :returns: list of labels :rtype: list """ ret = [] pat = [] for entry in pattern_dict: pat.append(entry) for l in labels_curr: if l in pat: continue else: ret.append(l) return ret
def _stringify(values): """internal method: used to convert values to a string suitable for an xml attribute""" if type(values) == list or type(values) == tuple: return " ".join([str(x) for x in values]) elif type(values) == type(True): return "1" if values else "0" else: return str(values)
def get_horizontal_table_type(freq_inds, most_common_diff, num_cols, num_diff, count_disorders): """ - get the type of a horizontal table, the type will decide the following extraction process count_disorders is not very useful currently, because we know that all tables we collected contain 'disorder', but it is added for better scalability. Output: True or False, True means we can go ahead to extract adrs, False means that some necessary exception handling steps are needed """ # if soc_label equals to 0, there is no separate column for system of classself. # therefore, the soc_label = num_cols - len(freq_inds) if most_common_diff !=1: # mislabelled table type, the table should be vertical tbl_label = -1 elif soc_label >1: # a type of exception that is similar to 7236, the extraction can still be easily done, # but a careful check of the freqeuncy list is needed tbl_label = 2 elif soc_label ==0 and count_disorders>0: # 'system of class' is not put inot a separate column tbl_label = 0 elif num_diff==2: # standard horizaontal table, very easy to extract tbl_label = 1 elif num_diff <0 and num_diff>-5: # one more step of check table structure, then just like standard table extraction tbl_label = 1 else: # first we need to use soc names as marks, and for rows that exceed the number of columns # we simply remove some cells, 410, 1922 tbl_label = 3 return tbl_label
def _edge_label(ci: int, pi: int, debug: bool) -> str: """Generate edge label given""" return '<<font color=\"' + ('green' if debug else 'white') + '\">' + \ 'ci=' + str(ci) + ' pi=' + str(pi) + '</font>>'
def scrub_db_specific_data(document): """ :type document: dict :return: """ document.pop('_id', None) document.pop('_rev', None) return document
def split_tagblock(nmea): """ Split off the tagblock from the rest of the message Note that if the nmea is a concatenated multipart message then only the tagblock of the first message will be split off """ tagblock = '' if nmea.startswith("\\") and not nmea.startswith("\\!"): tagblock, nmea = nmea[1:].split("\\", 1) return tagblock, nmea
def update(event, context): """Noop.""" return event["PhysicalResourceId"], {}
def parse_channel_info(data): """ Expected format: `<channel_id>,<from_addr>,<api_url>;<channel_id>,<from_addr>,<api_url>;` Returns a tuple. The first item is the assumed default junebug channel which can be used as a default fallback for JUNEBUG_DEFAULT_CHANNEL_ID. The second item is the dictionary that can be passed to JUNEBUG_CHANNELS :returns: tuple """ channels = {} default_channel_id = None for channel in data.strip().split(';'): channel_id, from_addr, api_url = channel.split(',') if not default_channel_id: default_channel_id = channel_id.strip() channels[channel_id.strip()] = { 'FROM_ADDRESS': from_addr.strip(), 'API_URL': api_url.strip(), } return default_channel_id, channels
def Base64Length(byte_length): """ Calculate the maximum length needed to stock a base64 encoded stream of bytes. @param byte_length: Length of byte stream. @return: The length needed in byte. """ return ((byte_length + 3 - (byte_length % 3)) /3) * 4
def substructure_dict_to_substructure_vec(substructure_dict): """convert the substructure dictionary to vector""" substructure_vec = [] for i in sorted([int(j) for j in list(substructure_dict.keys())]): print(i, len(substructure_dict[str(i)])) substructure_vec.extend(substructure_dict[str(i)]) print(len(substructure_vec)) return substructure_vec
def find_old_list_count(list_id, all_lists_state): """ Returns the last list size saved for the provided list :param list_id: :param all_lists_state: :return: """ last_size = 0 for x in all_lists_state: if x['id'] == list_id: last_size = x['member_count'] return last_size
def write_file(file_path, data): """ Write data to a specified file path by appending """ try: with open(file_path, 'a+') as fp: fp.write(data) return True except Exception as err: raise err return False
def list_shape(obj): """retrieves the shape of list - (len(list),)""" return (len(obj),)
def make_album(artist_name, album_title, num_of_songs = None): """Builds a dictionary describing a music album""" album = {'Artist': artist_name.title(), 'Album': album_title.title()} if num_of_songs: album['num_of_songs'] = num_of_songs return album
def _IsCustomMeta(header): """Returns true if header (which must be lowercase) is a custom header.""" return header.startswith('x-goog-meta-') or header.startswith('x-amz-meta-')
def calculate_precision_recall(tp_totals, fp_totals, fn_totals): """ Calculate list of precision and recall values from lists of true pos., false pos., false neg. values. """ precision = [tp / (tp+fp) if (tp+fp)>0 else 0 for (tp, fp) in zip(tp_totals, fp_totals)] recall = [tp / (tp+fn) if (tp+fn)>0 else 0 for (tp, fn) in zip(tp_totals, fn_totals)] return precision, recall
def linear_reward_function(choice_history_buffer, causal_prefetch_item, use_window): """ Linear reward function, positive for item that was used, negative one if not used """ if causal_prefetch_item is None: return -1 prefetch_delay = choice_history_buffer.step - causal_prefetch_item.step return (use_window - prefetch_delay) / use_window
def find_neighbor(x, y): """ :param x: (int) X coordinates of cur_letter :param y: (int) Y coordinates of cur_letter :return: (lst) Coordinates(Tuple) of all neighbor letters """ neighbor_lst = [] for i in range(-1, 2): for j in range(-1, 2): if 0 <= x + i < 4: if 0 <= y + j < 4: if not i == j == 0: # Not itself neighbor_lst.append((x + i, y + j)) return neighbor_lst
def generateContentRange(tup): """tup is (rtype, start, end, rlen) rlen can be None. """ rtype, start, end, rlen = tup if rlen == None: rlen = '*' else: rlen = int(rlen) if start == None and end == None: startend = '*' else: startend = '%d-%d' % (start, end) return '%s %s/%s' % (rtype, startend, rlen)
def assoc(k, v, orig): """Given an original dictionary orig, return a cloned dictionary with `k` set to `v`""" out = orig.copy() out[k] = v return out
def parse_peprec_mods(mods, ptm_list): """Parse PEPREC modification string out of MSP Mod string.""" if mods.split("/")[0] != "0": num_mods = mods[0] mod_list = [mod.split(",") for mod in mods.split("/")[1:]] peprec_mods = [] for location, aa, name in mod_list: if not (location == "0" and name == "iTRAQ"): location = str(int(location) + 1) peprec_mods.append(location) peprec_mods.append(name) if name not in ptm_list: ptm_list[name] = 1 else: ptm_list[name] += 1 peprec_mods = "|".join(peprec_mods) else: peprec_mods = "-" return peprec_mods
def count_bigram(sequence, first, second): """ counts the number of occurrences of a given bigram in a sequence. Parameters ---------- sequence : str The DNA sequence to be analysed. first : char the first letter of the bigram to be considered. second : char the second letter of the bigram to be considered. Returns ------- int the number of occurrences of the bigram. """ count = 0 cmp = False for char in sequence: if cmp is True: cmp = False if char == second: count += 1 if char == first: cmp = True return count
def _inside_this_control_function(cf, file, line): """Determine action to which string belong.""" if cf['file'] == file and cf['begin'] <= line <= cf['end']: return True else: return False
def as_list(x): """A function to convert an item to a list if it is not, or pass it through otherwise Parameters ---------- x : any object anything that can be entered into a list that you want to be converted into a list Returns ------- list a list containing x """ if isinstance(x, list): return x return [x]
def slice(d, keys): """return dictionary with given keys""" result = dict() for k in keys: if k in d: result[k] = d[k] return result
def __ConvertOSBGToLocal(easting, northing, Eo, No, one_over_CSF): """ Convert OSBG36 Easting-Northing to local grid coordinates :param easting: easting in OSBG36 :param northing: northing in OSBG36 :param Eo: delta easting of local grid :param No: delta northing of local grid :param one_over_CSF: reciprocal CSF (combinated scale factor, = 1/CSF) :returns: todo """ #x-coord in local grid x_local = (easting - Eo)*(one_over_CSF) #y-coord in local grid y_local = (northing - No)*(one_over_CSF) #return a tuple of coordinates return (x_local, y_local)
def remove_empty_from_dict(d): """Removes empty keys from dictionary""" if type(d) is dict: return dict((k, remove_empty_from_dict(v)) for k, v in d.items() if v and remove_empty_from_dict(v)) elif type(d) is list: return [remove_empty_from_dict(v) for v in d if v and remove_empty_from_dict(v)] else: return d
def __extract_network_from_docker_container(docker_container): """Extract network information for a docker container.""" network_bindings = docker_container.get("networkBindings") bindings = [] if network_bindings: bindings = [ network["bindIP"] + " (" + str(network["hostPort"]) + "[host] -> " + str(network["containerPort"]) + "[network])" for network in network_bindings ] if bindings is []: bindings = "no network binding" else: bindings = ", ".join(bindings) return docker_container["name"] + " -> " + bindings
def get_attr_values(data): """Slice all the attribute lists, which are lists of single key/value dicts mapped to the keys in a sofine data set, to return the value from each dict in each list. * `data` - `dict of string -> list of dict`. The data type of the attributes associated with a key in a returned data set.""" out = [] attrs_rows = data.values() for attrs_row in attrs_rows: for attr in attrs_row: out.append(attr.values()[0]) return out
def GetCppPtrType(interface_name): """Returns the c++ type associated with interfaces of the given name. Args: interface_name: the name of the interface you want the type for, or None. Returns: the c++ type which wayland will generate for this interface, or void* if the interface_name is none. We use "struct foo*" due to a collision between typenames and function names (specifically, wp_presentation has a feedback() method and there is also a wp_presentation_feedback interface). """ if not interface_name: return 'void*' return 'struct ' + interface_name + '*'
def andGate(a, b): """ Input: a, b Two 1 bit values as 1/0 Returns: 1 if both incoming bits are 1 otherwise 0 """ if a == 1 and b == 1: return 1 else: return 0
def _slugify(text: str) -> str: """Turns the given text into a slugified form.""" return text.replace(" ", "-").replace("_", "-")
def egg_drop(n: int, k: int) -> int: """ What is the minimum number of trials we need to drop eggs to determine which floors of a building are safe for dropping eggs, given n eggs and k floors? :param n: number of eggs :param k: number of floors :return: the minimum number of trials >>> egg_drop(1, 5) 5 >>> egg_drop(2,36) 8 """ # Base cases. # If we have one egg, we need to try each floor. if n == 1: return k # If we have one floor, we need to try it. if k == 1 or k == 0: return k # Drop an egg from floor x: # 1. If it breaks, then we know the floor is <= x, and we have n-1 eggs left to do it, so E(n-1, x). # 2. If it doesn't break, then we know the floor is > x (which means k-x floors to try( and we have n eggs left # to do it, so E(n, k-x). return 1 + min([max(egg_drop(n-1, x-1), egg_drop(n, k-x)) for x in range(1,k+1)])
def url_gen_dicter(inlist, filelist): """ Prepare name:URL dicts for a given pair of names and URLs. :param inlist: List of dictionary keys (OS/radio platforms) :type inlist: list(str) :param filelist: List of dictionary values (URLs) :type filelist: list(str) """ pairs = {title: url for title, url in zip(inlist, filelist)} return pairs
def prime_factorize(loop_bounds): """ Factorize the original loops bounds into a list of prime factors. Input: a list of loop bounds Output: a super-list of prime factor lists """ prime_factor_list = [] for loop_bound in loop_bounds: prime_factors = [] while loop_bound%2 == 0: prime_factors.append(2) loop_bound /= 2 if loop_bound > 3: for i in range(3, loop_bound, 2): while loop_bound%i == 0: prime_factors.append(i) loop_bound /= i if loop_bound > 2: prime_factors.append(loop_bound) prime_factor_list.append(prime_factors) return prime_factor_list
def zern_name(i): """Return a human-readable text name corresponding to some Zernike term as specified by `j`, the index Only works up to term 22, i.e. 5th order spherical aberration. """ names = ['Null', 'Piston', 'Tilt X', 'Tilt Y', 'Focus', 'Astigmatism 45', 'Astigmatism 0', 'Coma Y', 'Coma X', 'Trefoil Y', 'Trefoil X', 'Spherical', '2nd Astig 0', '2nd Astig 45', 'Tetrafoil 0', 'Tetrafoil 22.5', '2nd coma X', '2nd coma Y', '3rd Astig X', '3rd Astig Y', 'Pentafoil X', 'Pentafoil Y', '5th order spherical'] if i < len(names): return names[i] else: return "Z%d" % i
def normalize_line_endings(string, eol): """Return a string with consistent line endings.""" string = string.replace("\r\n", "\n").replace("\r", "\n") if eol != "\n": string = string.replace("\n", eol) return string
def dotprod(K,L): """dot product of two lists Parameters : K(Iterable):- a numerical list L(Iterable):- a numerical list Return ; p(float):- dot product of K and L """ if(len(K) == len(L)): try: return(sum([x*y for x,y in zip(K,L)]) ) except ValueError: print('elements of K and L are not all numeric') exit() else: print("K and L are not of the same length")
def first(iterable, default=None, key=None): """ Return the first element in the iterable """ if key is None: for el in iterable: return el else: for key_, el in iterable: if key == key_: return el return default
def sentence_case(sentence, exciting=False): """Capitalise the first letter of the sentence and add a full stop.""" sentence = sentence[0].upper() + sentence[1:] if sentence[-1] in {'.', '!', '?'}: return sentence elif exciting: return sentence + "!" else: return sentence + "."
def strip_str(string): """Strip string.""" return string.strip('"').strip("'").strip()
def extract_destination_address(params): """ Extract 'destination' item address (index or UID) from the list of parameters. Returns the list of remaining parameters. The source address is represented by 1 or 2 parameters: if 1st parameter is a keywords ``front``, ``back``, or integer number, then the parameter represents a positional address (index); if the 1st parameter is a keyword ``before`` or ``after``, the 2nd parameter is considered to represent item UID (string). If the 1st parameter can not be converted to ``int`` or equal to one of the keywords, it is considered that the address is not found. Parameters ---------- params : list(str) List of parameters. The 1st and optionally the 2nd parameters are interpreted as a destination address. Returns ------- dict Dictionary that contains destination address. Elements: ``pos`` - positional address (value is int or a string from the set ``front``, ``back``), ``before_uid`` or ``after_uid`` - uid of the item preceding or following the destination for the item in the queue (value is a string representing UID). Empty dictionary if no address is found. list(str) List of the remaining parameters. Raises ------ IndexError Insufficient number of parameters is provided. """ n_used = 0 pos, uid, uid_key = None, None, None if params[0] in ("front", "back"): pos = params[0] n_used = 1 elif params[0] in ("before", "after"): uid = params[1] # Keys are "before_uid" and "after_uid" uid_key = f"{params[0]}_uid" n_used = 2 else: try: pos = int(params[0]) n_used = 1 except Exception: ... if pos is not None: addr_param = {"pos": pos} elif uid is not None: addr_param = {uid_key: uid} else: addr_param = {} return addr_param, params[n_used:]
def max_common_sequence_length(seq, indices, cur_size): """ Return the length of the longest commmon sequence for all subseq starting at index indices[0], indices[1], ....etc """ seqlen = len(seq) j = cur_size+1 while j < seqlen-indices[-1]: firstnt = seq[indices[0]+j] if not all(seq[i+j]==firstnt for i in indices[1:]): return j j += 1 return j
def manhat_dist(a, b): """Return manhattan distance between a and b""" return abs(a[0] - b[0]) + abs(a[1] - b[1]) + abs(a[2] - b[2])
def isOdd(x): """ Returns True if number x is odd :param x: :return: """ if x % 2 == 0: return False else: return True
def to_local_datetime(dt): """ datetime.isoformat does not append +0000 when using UTC, javascript needs it, or the date is parsed as if it were in the local timezone """ if not dt: return None ldt = dt.isoformat() return ldt if ldt[-6] == "+" else "%s+0000" % ldt
def get_topic_name(prefix, table, operation): """Create a topic name. The topic name needs to be synced between the agents. The agent will send a fanout message to all of the listening agents so that the agents in turn can perform their updates accordingly. :param prefix: Common prefix for the agent message queues. :param table: The table in question (TUNNEL, LOOKUP). :param operation: The operation that invokes notification (UPDATE) :returns: The topic name. """ return '%s-%s-%s' % (prefix, table, operation)
def scale_to_total(value): """\ Convert a mapping of distinct quantities to a mapping of proportions of the total quantity. """ total = float(sum(value.values())) return {k: (v / total) for k, v in value.items()}
def is_run_el(obj): """Object contains executable method 'run'.""" return hasattr(obj, 'run') and callable(obj.run)
def opcode_modes(instruction): """ Return (opcode, (mode_1st, mode_2nd, mode_3rd)) from an instruction """ digits = f"{instruction:05}" # five digits e.g. 101 => '00100' opcode = int(digits[-2:]) modes = (int(digits[-3]), int(digits[-4]), int(digits[-5])) return (opcode, modes)
def compute_merged_total_length(ref, hyp): """Compute the total length of the union of reference and hypothesis. Args: ref: a list of tuples for the ground truth, where each tuple is (speaker, start, end) of type (string, float, float) hyp: a list of tuples for the diarization result hypothesis, same type as `ref` Returns: a float number for the union total length """ # Remove speaker label and merge. merged = [(element[1], element[2]) for element in (ref + hyp)] # Sort by start. merged = sorted(merged, key=lambda element: element[0]) i = len(merged) - 2 while i >= 0: if merged[i][1] >= merged[i + 1][0]: max_end = max(merged[i][1], merged[i + 1][1]) merged[i] = (merged[i][0], max_end) del merged[i + 1] if i == len(merged) - 1: i -= 1 else: i -= 1 total_length = 0.0 for element in merged: total_length += element[1] - element[0] return total_length