content
stringlengths
42
6.51k
def getEditDist(str1, str2): """ return edit distance between two strings of equal length >>> getEditDist("HIHI", "HAHA") 2 """ assert(len(str1)==len(str2)) str1 = str1.upper() str2 = str2.upper() editDist = 0 for c1, c2 in zip(str1, str2): if c1!=c2: editDist +=1 return editDist
def _clean_accounting_column(x: str) -> float: """ Perform the logic for the "accounting" cleaning style. This is a private function, not intended to be used outside of `currency_column_to_numeric``. It is intended to be used in a pandas `apply` method. :param x: A string representing currency. :returns: A float representing currency. """ y = x.strip() y = y.replace(",", "") y = y.replace(")", "") y = y.replace("(", "-") if y == "-": return 0.00 return float(y)
def bencode(data): """ Encoder implementation of the Bencode algorithm (Bittorrent). """ if isinstance(data, int): return b'i%de' % data elif isinstance(data, (bytes, bytearray)): return b'%d:%s' % (len(data), data) elif isinstance(data, str): return b'%d:%s' % (len(data), data.encode()) elif isinstance(data, (list, tuple)): encoded_list_items = b'' for item in data: encoded_list_items += bencode(item) return b'l%se' % encoded_list_items elif isinstance(data, dict): encoded_dict_items = b'' keys = data.keys() for key in sorted(keys): encoded_dict_items += bencode(key) encoded_dict_items += bencode(data[key]) return b'd%se' % encoded_dict_items else: raise TypeError("Cannot bencode '%s' object" % type(data))
def replace_cr_with_newline(message: str): """ TQDM and requests use carriage returns to get the training line to update for each batch without adding more lines to the terminal output. Displaying those in a file won't work correctly, so we'll just make sure that each batch shows up on its one line. :param message: the message to permute :return: the message with carriage returns replaced with newlines """ if '\r' in message: message = message.replace('\r', '') if not message or message[-1] != '\n': message += '\n' return message
def rtnd(number, n): """ Round number to a max of n digits after the decimal point :param number: Given number :param n: Requested number of digits after the decimal points :return: number with a max of n digits after the decimal point """ return int(number * 10 ** n) / 10 ** n
def strip_js_window_open(js): """Strips the javascript window.open function from a link. """ function_start = js.find('window.open(') function_end = js.find(');') arguments = js[function_start:function_end] broken = arguments.split(',') link = broken[0].split('(')[1:] link = '('.join(link) link = link[1:-1] return link
def get_value_of_field(field): """ Returns the point value for the current field """ ret = 0 for y in range(4): for x in range(4): if field[x][y] == 0: ret += 1 return ret
def dead_end(accessible_domains): """Returns true if the domains is empty; therefore is a dead-end Parameters: accessible_domains (dict) Represents domains as dict(variables:values) Returns (boolean) True if an assignment is dead-end,false otherwise """ return any(value==[] for value in accessible_domains.values())
def converter(L): """Takes picobot code, as a list, and returns a picobot dictionary""" picobotDict = {} for item in L: key = (int(item[0]), str(item[2:6])) value = (str(item[10]), int(item[12])) picobotDict[key] = value return picobotDict
def MSE(x, y): """Computation of the mean-square error. """ return (x - y)**2
def prolog_args(json_term): """ Return the arguments from json_term if json_term is in the Prolog JSON format. See `swiplserver.prologserver` for documentation on the Prolog JSON format. """ return json_term["args"]
def get_simple_nominal_time(data_object, sample, delta=None): """ Returns a float. Returns the nominal time of the specified sample number without using any time packet packet information, i.e. only the header information is used. """ hdr = data_object['HEADER'] if delta is None: delta = hdr['TIME']['DELTA'] start = hdr['TIME']['START'] return start + delta * sample
def float_or_0(data, default=0.0): """Helper fnc. Returns data casted to a float value if possible, else to 0. >>> float_or_0(5.0) 5.0 >>> float_or_0(None) 0.0 >>> float_or_0(0) 0.0 >>> float_or_0(-1) -1.0 >>> float_or_0([]) 0.0 >>> float_or_0(tuple()) 0.0 """ if not data: return default try: return float(data) except (TypeError, ValueError): # FIXME do we really want to allow to convert anything to zero here or is it better, not to # catch the type and value errors an let casting fail? return default
def pressure(v, t, n): """Compute the pressure in pascals of an ideal gas. Applies the ideal gas law: http://en.wikipedia.org/wiki/Ideal_gas_law v -- volume of gas, in cubic meters t -- absolute temperature in degrees kelvin n -- particles of gas """ k = 1.38e-23 # Boltzmann's constant return n * k * t / v
def count_leading_empty_lines(cell): """Count the number of leading empty cells.""" lines = cell.splitlines(keepends=True) if not lines: return 0 for i, line in enumerate(lines): if line and not line.isspace(): return i return len(lines)
def filter_inactive_guides(rows): """Filter two inactive guides. For some reason, two guides were completely inactive -- probably a technical issue. Filter these out. Returns: rows with two inactive guides filtered """ inactive_guides = ['block18_guide0', 'block7_guide13'] rows_filtered = [] for row in rows: if row['crRNA'] in inactive_guides: # Verify this is inactive assert float(row['median']) < -2.5 else: # Keep it rows_filtered += [row] return rows_filtered
def getText (nodelist): """ Helper to retrieve text from an XML node list """ rc = [] for node in nodelist: if node.nodeType == node.TEXT_NODE: rc.append(node.data) return ''.join(rc)
def any(p, xs): """Returns true if at least one of elements of the list match the predicate, false otherwise. Dispatches to the any method of the second argument, if present. Acts as a transducer if a transformer is given in list position""" for x in xs: if p(x): return True return False
def extract_non_tty_phone(public_liaison): """ Extract a non-TTY number if one exists, otherwise use the TTY number. If there are multiple options, for now pick the first one. Return None if no phone number """ if 'phone' in public_liaison: non_tty = [p for p in public_liaison['phone'] if 'TTY' not in p] if len(non_tty) > 0: return non_tty[0] elif len(public_liaison['phone']) > 0: return public_liaison['phone'][0]
def intpol(y1, y2, x1, x2, x): """Interpolate between (*x1*, *y1*) and (*x2*, *y2*) at *x*. Parameters ---------- y1, y2 : float or pd.Series x1, x2, x : int """ if x2 == x1 and y2 != y1: print('>>> Warning <<<: No difference between x1 and x2,' 'returned empty!!!') return [] elif x2 == x1 and y2 == y1: return y1 else: y = y1 + ((y2 - y1) / (x2 - x1)) * (x - x1) return y
def clean_text(text, patterns): """ Applies the given patterns to the input text. Ensures lower-casing of all text. """ txt = text for pattern in patterns: txt = pattern[0].sub(pattern[1], txt) txt = ''.join([letter for letter in txt if (letter.isalnum() or letter.isspace())]) return txt.lower()
def get_company_by_email(domains_index, email): """Get company based on email domain Automatically maps email domain into company name. Prefers subdomains to root domains. :param domains_index: dict {domain -> company name} :param email: valid email. may be empty :return: company name or None if nothing matches """ if not email: return None name, at, domain = email.partition('@') if domain: parts = domain.split('.') for i in range(len(parts), 1, -1): m = '.'.join(parts[len(parts) - i:]) if m in domains_index: return domains_index[m] return None
def mk_chks_and_noise_tags(chks_and_tags_enum, thresh_chks): """ Returns the chunks that are listed in thresh_chks, along with noise_tags which indicates False if a chunk is in thresh_chks (and is then not background noise) and True if a chunk is not in thresh_chks >>> chks = [[1,1], [2,2], [3,3]] >>> tags = ['foo', 'bar', 'cho'] >>> chks_and_tags_enum = enumerate(zip(chks, tags)) >>> thresh_chks = [1] >>> chks, noise_tags = mk_chks_and_noise_tags(chks_and_tags_enum, thresh_chks) >>> assert chks[0] == [2,2] >>> assert noise_tags == {0: True, 1: False, 2: True} """ chks = [] noise_tags = {} for idx, chk_tag in chks_and_tags_enum: if idx in thresh_chks: noise_tags[idx] = False chks.append(chk_tag[0]) else: noise_tags[idx] = True return chks, noise_tags
def get_sub_repo_abbrev_origin_path(repo_name, origin_path): """Return abbrev path for sub repo based on `repo_name` and `origin_path`. Arguments: - `repo_id`: - `origin_path`: """ if len(origin_path) > 20: abbrev_path = origin_path[-20:] return repo_name + '/...' + abbrev_path else: return repo_name + origin_path
def pybool_to_js(val): """Change 1/True to 'true' and 0/None/False to 'false'""" return 'true' if val else 'false'
def rayleigh_range(w0, k): """ Computes the rayleigh range, which is the distance along the propagation direction of a beam from the waist to the place where the area of cross section is doubled. Args: w0: waist radius of beam k: wave number in the direction of propagation of beam Returns: rayleigh range """ return 0.5 * k * w0**2
def get_column(data, index): """ Get single column """ return list(map(lambda item: item[index], data))
def get_list_from_str(text): """ helper function for get_funghi_type_dict_from_csv() Parameters ---------- text: str text containing an attribute value in Python list format Return ------------ list of strs or list of floats list of the values as direct translation from the text nominal attributes as strs, metrical attributes as floats Example ------------- text = "['x', 'f']" -> return ['x', 'f'] text = "[10.0, 20.0]" -> return [10.0, 20.0] """ remove_strs = ['[', ']', ' ', '\n'] for remove_str in remove_strs: text = text.replace(remove_str, '') if ',' in text: result_list = text.split(',') else: result_list = [text] # if elements are not numbers returns as str, otherwise converts to float return result_list if not result_list[0].isdigit() else [float(n) for n in result_list]
def transpose_list_of_lists(list_of_lists): """ Transposes a list of lists. """ return [list(i) for i in zip(*list_of_lists)]
def get_min_max(ints): """ Return a tuple(min, max) out of list of unsorted integers. Args: ints(list): list of integers containing one or more integers """ if len(ints) == 0: return min_num = ints[0] max_num = ints[0] for num in ints: if num < min_num: min_num = num elif num > max_num: max_num = num return min_num, max_num
def to_upper(string: str) -> str: """ Converts :string: to upper case. Intended to be used as argument converter. Args: string: string to format Returns: string to upper case """ return string.upper()
def count_not_none(*args): """ Returns the count of arguments that are not None. """ return sum(x is not None for x in args)
def wrap_values(values): """ Transforms form values to format, used in model and rendering functions :param values: values from form in {key: value} format :return: wrapped dict """ return { key: {'value': values[key]} for key in values }
def align_down(number, alignment=16): """ Subtract from *number* so it is divisible by *alignment*. :param int number: The value to decrease. :param int alignment: The value to make *number* divisible by. :return: The modified value. :rtype: int """ return number - (number % alignment)
def mission_identifier(target): """Identifies the mission ('Kepler', 'K2' or 'TESS') from the identifier of the target. Parameters ---------- target : str or int Name of the target as a string, e.g. "TOI-175" or, if mission is passed as the numerical identifier of the input catalog. Returns ------- mission : str 'Kepler', 'K2' or 'TESS' Raises ------ ValueError If the target was not resolved or linked to a supported mission """ if not isinstance(target, str): target = str(target) # Deal with dot as a separator. e.g "TOI-175.01" target = target.split('.')[0] if target[:3].upper() == 'TIC': mission = 'TESS' elif target[:3].upper() == 'TOI': mission = 'TESS' elif target[:3].upper() == 'KIC': mission = 'Kepler' elif target[:3].upper() == 'KEP': mission = 'Kepler' elif target[:4].upper() == 'EPIC': mission = 'K2' elif target[:2].upper() == 'K2': mission = 'K2' else: raise ValueError( "targname {} could not be linked to a supported mission " "('Kepler', 'K2' or 'TESS')".format(str(target))) return mission
def analyze_user(user, verbosity, error_level): """analyze AD user object user -- user object to be analyzed verbosity - NAGIOS verbosity level - ignored error_level - ignored Returns the Nagios error code (always 0) and error message (user defn) """ return 0, '{principal name:%s, displayName: %s, objectId:%s, given name:%s,'\ ' surname:%s, mail nickname:%s}' % (user['userPrincipalName'], user['displayName'], user['objectId'], user['givenName'], user['surname'], user['mailNickname'])
def test_side_effect(x): """ int -> int """ y,a = 1,0 #z: int z = x return z
def read_lwostring(raw_name): """Parse a zero-padded string.""" i = raw_name.find(b'\0') name_len = i + 1 if name_len % 2 == 1: # Test for oddness. name_len += 1 if i > 0: # Some plugins put non-text strings in the tags chunk. name = raw_name[0:i].decode("utf-8", "ignore") else: name = "" return name, name_len
def _is_pdas(filename): """ Checks whether a file is a PDAS file or not. :type filename: str :param filename: Name of file to be checked. :rtype: bool :return: ``True`` if a PDAS file. """ try: with open(filename, "rb") as fh: header_fields = [fh.readline().split()[0].decode() for i_ in range(11)] expected_headers = ['DATASET', 'FILE_TYPE', 'VERSION', 'SIGNAL', 'DATE', 'TIME', 'INTERVAL', 'VERT_UNITS', 'HORZ_UNITS', 'COMMENT', 'DATA'] if header_fields == expected_headers: return True else: return False except Exception: return False
def first_missing_positive_int(integers): """Finds the first missing positive integer in the unsorted list Returns: num(int): missing int in the sequence(unsorted) """ seen_nums = [] for integer in integers: if integer < 0: continue if integer + 1 > len(seen_nums): extended_size = integer - len(seen_nums) + 1 seen_nums.extend([False] * extended_size) seen_nums[integer] = True # return seen_nums # finding the actual number miss_int = 0 for i in range(1, len(seen_nums)): if seen_nums[i] is False: miss_int = i return miss_int # if we never found the missing integer and seen_nums is empty # then we know first missing positive int is 1 if miss_int == 0 and len(seen_nums) == 0: return 1 else: return len(seen_nums)
def is_namedtuple_class(c): """check if c is a namedtuple class""" if not isinstance(c, type): return False # should have only tuple as superclass bases = c.__bases__ if len(bases) != 1 or bases[0] != tuple: return False # should have _make method if not hasattr(c, '_make'): return False # should have _fields that is all string fields = getattr(c, '_fields', None) if not isinstance(fields, tuple): return False return all(isinstance(f, str) for f in fields)
def getProjectNames(): """Returns an unsorted collection of strings, where each string represents the name of a project on the Gateway. If no projects exist, returns an empty list. This function only ever returns project names, ignoring project titles. The function also ignores the "enabled" property, including disabled projects in the results. Returns: list[str]: A list containing string representations of project names on the Gateway. """ return ["MyProject", "DisabledProject"]
def _valueWithType(tag, tagValue): """Return tagValue, handling some type conversions.""" tagType = tag.get('type') if tagType == 'int': tagValue = int(tagValue) elif tagType == 'float': tagValue = float(tagValue) return tagValue
def zimmermann(x): """ Zimmermann function: a non-continuous function, Equation (24-26) of [2] minimum is f(x)=0.0 at x=(7.0,2.0) """ x0, x1 = x #must provide 2 values (x0,y0) f8 = 9 - x0 - x1 c0,c1,c2,c3 = 0,0,0,0 if x0 < 0: c0 = -100 * x0 if x1 < 0: c1 = -100 * x1 xx = (x0-3.)*(x0-3) + (x1-2.)*(x1-2) if xx > 16: c2 = 100 * (xx-16) if x0 * x1 > 14: c3 = 100 * (x0*x1-14.) return max(f8,c0,c1,c2,c3)
def _filter_files(members, type='tar'): """Filter out non-csv files in zip file.""" if type == "tar": new_members = filter( lambda member: member.name.endswith('.csv'), members) else: new_members = filter(lambda member: member.endswith('.csv'), members) new_members = list(new_members) return new_members
def path_in_cc(path, cc): """Determines whether all vertices of a given path belong to a given connected component. :param path: :param cc: list of vertices representing a connected component in a graph :return: True if the path vertices are found in the connected component, False otherwise """ for node in path: if node not in cc: return False return True
def bitcount(i): """ Count set bits of the input. """ res = 0 while i > 0: res += i&1 i>>=1 return res
def globulize_filepath(filepath): """ Take a filepath defined and if a specific file is not specified, make it greedy in glob format. """ # remove leading slash if filepath[0] == "/": filepath = filepath[1:] # is targeting a file specifically, no change needed if "." in filepath.split("/")[-1]: return filepath # /src/ --> /src/* if filepath[-1] == "/": filepath += "*" # /src --> /src/* elif filepath[-1] != "*": filepath += "/*" return filepath
def matchDirectImageToGrismImage(direct, grism): """ Matches direct images to grism images in an extremely dummy way. Will just check that either RA or DEC are equal. This can potentially lead to crazy results as it does not guarantee that there is overlap. Moreover, if one has applied a tiny offset to the grism image to for example account for the fact that the dispersion is always towards the same direction, then this matching would fail. One should probably use some header keyword information such as POSTARGs together with the RA and DEC to find the closest matches. However, for the testing that will be performed in April 2011 this dummy way of matching should be sufficient. """ match = {} for key1 in direct: for key2 in grism: if direct[key1][2] == grism[key2][2] or\ direct[key1][3] == grism[key2][3]: #found the exact same coordinates #in reality this matching wouldn't #be enough as there might be offset #so such info should be taken from #the header and used if key1 in match.keys(): match[key1] += [key2, ] else: match[key1] = [key2, ] return match
def flatten(l): """Flatten a list of lists. Parameters ---------- l : list of lists A nested list Returns ------- list A flat list """ return [item for sublist in l for item in sublist]
def get_amount_and_variables(settings): """Read amount and species from settings file""" # species for which results are expected as amounts amount_species = settings['amount'] \ .replace(' ', '') \ .replace('\n', '') \ .split(',') # IDs of all variables for which results are expected/provided variables = settings['variables'] \ .replace(' ', '') \ .replace('\n', '') \ .split(',') return amount_species, variables
def get_incoming_grammatical_relation(sdp, event_token, event_index): """First check whether this is a collapesed conj or prep then search for it in the the rest of the parse. """ prep_or_conj=[e for e in sdp if e[0].endswith("_"+event_token)] if len(prep_or_conj)>0: return prep_or_conj[0][0] for e in sdp: if e[3]==event_token and e[4]==event_index: return e[0] raise Exception("This cant be happening. This sdp "+str(sdp)+"has no incoming grammatical relation for "+event_token+ " "+ str(event_index))
def smart_lower(value): """ >>> smart_lower('RequestFriendHandler') 'request_friend_handler' """ result = [value[0].lower()] for c in value[1:]: if c >= 'A' and c <= 'Z': result.append('_') result.append(c.lower()) return ''.join(result)
def prepare_data_for_cyclomatic_complexity_chart(cyclomatic_complexity): """Prepare data (values, labels, colors) for the cyclomatic complexity chart.""" filtered_complexity = {k: v for k, v in cyclomatic_complexity.items() if v > 0 and k != "status"} labels = sorted(filtered_complexity.keys()) fractions = [filtered_complexity[key] for key in labels] colors = ('#60a060', 'yellow', 'orange', 'red', 'magenta', 'black') return labels, fractions, colors
def decode_int(astring, alphabet): """Decode a Base X encoded string into the number Arguments: - `astring`: The encoded string - `alphabet`: The alphabet to use for decoding """ strlen = len(astring) base = len(alphabet) num = 0 idx = 0 for char in reversed(astring): power = (strlen - (idx + 1)) num += alphabet.index(char) * (base ** power) idx += 1 return num
def recognize_source(line: str) -> bool: """ Recognizes .po file source string. """ if line.startswith("msgid"): return True return False
def get_test_diff(test_a, test_b): """ Finding difference in 2 test results :param test_1: dict, {finalstatus, subresults} - previous. :param test_2: dict, {finalstatus, subresults} - current. :return: index """ state_index = {'active': 3, 'succeeded': 2, 'finished': 1, 'failed': 0} result_translation = { -1: 'active', 0: 'ok', 1: 'warning_b', 2: 'warning', 3: 'warning_w', 4: 'alert', } diff_matrix = [ [4, 2, 0, -1], [4, 9, 0, -1], [4, 2, 0, -1], [-1, -1, -1, -1] ] result = diff_matrix[state_index[test_a['finalresult']]][state_index[test_b['finalresult']]] if result == 9: # compare substep results is_diff = None for step in range(0, len(test_b['subresults'])): try: if len(test_a['subresults']) - 1 > step and test_a['subresults'][step]['name'] == test_b['subresults'][step]['name']: if test_a['subresults'][step]['result'] != test_b['subresults'][step]['result']: is_diff = test_b['subresults'][step]['result'] - test_a['subresults'][step]['result'] break except: print('ddd') if is_diff is not None: result = 1 if is_diff < 0 else 3 else: result = 2 return result_translation[result]
def _matrix_M_entry(row, col): """Returns one entry for the matrix that maps alpha to theta. Args: row (int): one-based row number col (int): one-based column number Returns: (float): transformation matrix entry at given row and column """ # (col >> 1) ^ col is the Gray code of col b_and_g = row & ((col >> 1) ^ col) sum_of_ones = 0 while b_and_g > 0: if b_and_g & 0b1: sum_of_ones += 1 b_and_g = b_and_g >> 1 return (-1) ** sum_of_ones
def check_envelope(new, old): """expand old envelope to max extents of new envelope """ if len(new) == len(old) and len(new) == 4: # update envelope if polygon extends beyond bounds if new[0] < old[0]: old[0] = new[0] if new[1] > old[1]: old[1] = new[1] if new[2] < old[2]: old[2] = new[2] if new[3] > old[3]: old[3] = new[3] elif len(old) == 0 and len(new) == 4: # initialize envelope for x in new: old.append(x) return old
def grading_genes(gene2family, gene_ids, qvalues, pvalues): """Classify genes according to the q value and check for which family it belongs. """ family_class=dict() for i,fid in enumerate(gene_ids): if qvalues[i]<0.05: # 5% threshold value if fid in gene2family: for family in gene2family[fid]: if family in family_class: if 'dif_exp' in family_class[family].keys(): family_class[family]['dif_exp']+=1 else: family_class[family]['dif_exp']=1 else: family_class[family]={'dif_exp':1} else: if 'other' in family_class: if 'dif_exp' in family_class['other'].keys(): family_class['other']['dif_exp']+=1 else: family_class['other']['dif_exp']=1 else: family_class['other']={'dif_exp':1} elif qvalues[i]>0.05: # expressed or not-expressed if pvalues[i]<0.05: # based on p-value accept alternative hypothesis:- Ho:not expressed, Ha:expressed if fid in gene2family: for family in gene2family[fid]: if family in family_class: if 'mod_exp' in family_class[family].keys(): family_class[family]['mod_exp']+=1 else: family_class[family]['mod_exp']=1 else: family_class[family]={'mod_exp':1} else: if 'other' in family_class: if 'mod_exp' in family_class['other'].keys(): family_class['other']['mod_exp']+=1 else: family_class['other']['mod_exp']=1 else: family_class['other']={'mod_exp':1} elif pvalues[i]>0.05: # not-expressed if fid in gene2family: for family in gene2family[fid]: if family in family_class: if 'not_exp' in family_class[family].keys(): family_class[family]['not_exp']+=1 else: family_class[family]['not_exp']=1 else: family_class[family]={'not_exp':1} else: if 'other' in family_class: if 'not_exp' in family_class['other'].keys(): family_class['other']['not_exp']+=1 else: family_class['other']['not_exp']=1 else: family_class['other']={'not_exp':1} return family_class
def _isNumeric(j): """ Check if the input object is a numerical value, i.e. a float :param j: object :return: boolean """ try: x = float(j) except ValueError: return False return True
def _filter_treatment(dataset, seed): """Mapping function. Constructs bool variable (treated = True, control = False) Args: dataset: tf.data.Dataset seed: int Returns: dataset: tf.data.Dataset """ t = False if dataset[f'image/sim_{seed}_pi/value'] == 1: t = True dataset['t'] = t return dataset
def _parse_sigmak(line, lines): """Parse Energy, Re sigma xx, Im sigma xx, Re sigma zz, Im sigma zz""" split_line = line.split() energy = float(split_line[0]) re_sigma_xx = float(split_line[1]) im_sigma_xx = float(split_line[2]) re_sigma_zz = float(split_line[3]) im_sigma_zz = float(split_line[4]) return {"energy": energy, "re_sigma_xx": re_sigma_xx, "im_sigma_xx": im_sigma_xx, "re_sigma_zz": re_sigma_zz, "im_sigma_zz": im_sigma_zz}
def cast_ulonglong(value): """ Cast value to 64bit integer """ value = value & 0xffffffffffffffff return value
def is_float4x4(items): """Verify that the sequence contains 4 sequences of each 4 :obj:`float`. Parameters ---------- items : iterable The sequence of items. Returns ------- bool """ return ( len(items) == 4 and all( len(item) == 4 and all(isinstance(i, float) for i in item) for item in items ) )
def contains_recursive(text, pattern, index=None): """Return a boolean indicating whether pattern occurs in text.""" assert isinstance(text, str), 'text is not a string: {}'.format(text) assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text) if index is None: index = 0 max_index = len(text) - (len(pattern) - 1) if index > max_index: return False curr_range = index + len(pattern) curr_letters = text[index:curr_range] if curr_letters == pattern: return True else: return contains_recursive(text, pattern, index + 1)
def is_private(path): """ Determine if a import path, or fully qualified is private. that usually implies that (one of) the path part starts with a single underscore. """ for p in path.split("."): if p.startswith("_") and not p.startswith("__"): return True return False
def OldFracturesGuids(fracture_list): """ Function to return the list of old guids of fractures in the domain before trimming Parameter -------- fracture_list: list list of fracture objects """ list = [fracture_list[i].fracture_GUID for i in range(len(fracture_list))] return list
def create_content_item_id_set(id_set_list: list) -> dict: """ Given an id_set.json content item list, creates a dictionary representation""" res = dict() for item in id_set_list: for key, val in item.items(): res[key] = val return res
def get_deep(config, key_seq): """Get a value from a clang config or template using the given sequence of keys.""" if 1 == len(key_seq): return config[key_seq[0]] else: return get_deep(config[key_seq[0]], key_seq[1:])
def modular_exp(a, b, n): """Computes a^b mod n. Complexity O(log(b))""" res = 1 q = a while b > 0: if b % 2 == 1: res = q*res % n q = q*q % n b //= 2 return res
def count_vowels(string): """ Function which returns the count of all vowels in the string str """ vowels = "aeiouy" counter = 0 if string: for ch in string.lower(): if ch in vowels: counter += 1 return counter
def compute_padding(J_pad, N): """ Computes the padding to be added on the left and on the right of the signal. It should hold that 2**J_pad >= N Parameters ---------- J_pad : int 2**J_pad is the support of the padded signal N : int original signal support size Returns ------- pad_left: amount to pad on the left ("beginning" of the support) pad_right: amount to pad on the right ("end" of the support) References ---------- This is a modification of https://github.com/kymatio/kymatio/blob/master/kymatio/scattering1d/utils.py Kymatio, (C) 2018-present. The Kymatio developers. """ N_pad = 2**J_pad if N_pad < N: raise ValueError('Padding support should be larger than the original ' 'signal size!') to_add = 2**J_pad - N pad_right = to_add // 2 pad_left = to_add - pad_right return pad_left, pad_right
def _guess_type(name): """ guess the type based on the given file name and returns one of teh following: fasta, fastq, map or None if no type could be detected @param name: the name of the file @type name: string @return: one of fasta, fastq, map or None @rtype: string """ name = name.upper() if name.endswith(".GZ"): name = name[:-3] if name.endswith(".FASTA") or name.endswith("FA"): return "fasta" elif name.endswith(".FASTQ") or name.endswith("FQ"): # fixes issue #5 return "fastq" elif name.endswith(".MAP"): return "map" elif name.endswith(".SAM"): return "sam" elif name.endswith(".BAM"): return "bam" return None
def other_count(string): """Regular dict is used.""" r = {} for c in string: if c in r: r[c] += 1 else: r[c] = 1 return r
def text2int(textnum, numwords={}): """Convert text words into an integer. Source: https://stackoverflow.com/questions/493174/is-there-a-way-to-convert-number-words-to-integers""" if not numwords: units = [ "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", ] tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"] scales = ["hundred", "thousand", "million", "billion", "trillion"] numwords["and"] = (1, 0) for idx, word in enumerate(units): numwords[word] = (1, idx) for idx, word in enumerate(tens): numwords[word] = (1, idx * 10) for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0) current = result = 0 for word in textnum.split(): if word not in numwords: raise Exception("Illegal word: " + word) scale, increment = numwords[word] current = current * scale + increment if scale > 100: result += current current = 0 return result + current
def interprete_math_expr(operands, expr): """ :param operands: list of operands :param expr: the expression to use: + or - or * :return: mathematical result """ result = operands[0] for operand in operands[1:]: if expr == '+': result += operand elif expr == '-': result -= operand elif expr == '*': result *= operand else: raise Exception('[*] Exception parsing math expression: Unknown Value \'%s\'!' % expr) return result
def is_telemarketer(phone_number): """Determine whether the given telephone number is a know telemarketer""" return phone_number[:3] == '140'
def find_layer_name(lines): """ Find the layer name :param lines: :return: """ layer_name = None top_name = None flag_count = 0 first_line = lines[0] assert first_line.split()[1] is '{', 'Something is wrong' brack_count = 1 for l in lines[1:]: if '{' in l: brack_count += 1 if '}' in l: brack_count -= 1 if brack_count == 0: break if 'name' in l and brack_count == 1: flag_count += 1 _, layer_name = l.split() layer_name = layer_name[1:-1] if 'top' in l and brack_count == 1: flag_count += 1 _, top_name = l.split() top_name = top_name[1:-1] assert layer_name is not None, 'no name of a layer found' return layer_name, top_name
def find_std(arr): """ This function determines the standard deviation of the given array. Args: arr = numpy array for which the standard deviation and means are to be determined Returns: std = standard deviation of the given array mean = mean value of the given array Usage: import TA_functions as taf std, mean = taf.find_std(y_positions) """ N = float(len(arr)) mean = sum(arr) / N diff2meansq_list = [] for a in arr: diff = a - mean diffsq = diff * diff diff2meansq_list.append(diffsq) std = (1.0 / N * sum(diff2meansq_list)) ** 0.5 #print ('sigma = ', std, ' mean = ', mean) return std, mean
def _check_tgr_pesummary_file(f): """Check the contents of a dictionary to see if it is a pesummary TGR dictionary Parameters ---------- f: dict dictionary of the contents of the file """ labels = f.keys() if "version" not in labels: return False try: if all( "imrct" in f[label].keys() for label in labels if label != "version" and label != "history" ): return True else: return False except Exception: return False
def escape_variables(environ): """ Escape environment variables so that they can be interpreted correctly by python configparser. """ return {key: environ[key].replace('%', '%%') for key in environ}
def get_drive_letter(path_in, mnt: bool): """Return the letter of the windows drive of the path.""" if mnt: return path_in[5].lower() return path_in[0].lower()
def int_to_bytes_big_endian(x: int, n_bytes: int) -> bytearray: """Converts integer to bytes in big endian mode""" if x >= 256 ** n_bytes: raise ValueError("Conversion overflow") res = bytearray(n_bytes) shift = 0 for i in range(n_bytes - 1, -1, -1): res[i] = (x >> shift) & 0xff shift += 8 return res
def formatGpuCount(gpuCount): """ Convert the GPU Count from the SLurm DB, to an int The Slurm DB will store a string in the form "gpu:1" if a GPU was requested If a GPU was not requested, it will be empty """ if gpuCount: intGpuCount = int("0"+gpuCount.split(":")[1]) return intGpuCount else: return int(0)
def group2text(group): """ Returns formatted content of namelist group ``&TRACER``. """ lines = ['&TRACER'] for key, value in group.items(): if key == '' or value == '': continue if key == 'yshort_name': value = "'%s'" % value lines.append(' %s = %s,' % (key, value)) lines.append('/\n') return '\n'.join(lines)
def connect_points(pts1, pts2): """ Connects each point in the first list with all points in the second. If the first list has N points and the second has M, the result are 2 lists with N*M points each, representing the connections. Parameters: * pts1 : list List of (x, y) coordinates of the points. * pts2 : list List of (x, y) coordinates of the points. Returns: * results : lists of lists = [connect1, connect2] 2 lists with the connected points """ connect1 = [] append1 = connect1.append connect2 = [] append2 = connect2.append for p1 in pts1: for p2 in pts2: append1(p1) append2(p2) return [connect1, connect2]
def weighted_mean(interactions, similarities): """Computes the mean interaction of the user (if user_based == true) or the mean interaction of the item (item user_based == false). It computes the sum of the similarities multiplied by the interactions of each neighbour, and then divides this sum by the sum of the similarities of the neighbours.""" sim_sum, interaction_sum = 0, 0 for interaction, similarity in zip(interactions, similarities): interaction_sum += similarity * interaction sim_sum += similarity return interaction_sum / sim_sum if sim_sum > 0 else None
def single_byte(bs, b): """ Implementation of a single-byte XOR cipher. """ # XOR each byte in the array with the single byte and return. return bytearray([bt ^ b for bt in bs])
def _islist(string): """ Checks if a string can be converted into a list. Parameters ---------- value : str Returns ------- bool: True/False if the string can/can not be converted into a list. """ return (list(string)[0] == "[") and (list(string)[-1] == "]")
def _Basename(path): """Returns the final component of a pathname.""" i = path.rfind("/") + 1 return path[i:]
def failure_message_from_response(response): """ Given EMR response, returns a descriptive error message """ fail_details = response['Step']['Status'].get('FailureDetails') if fail_details: return 'for reason {} with message {} and log file {}'\ .format( fail_details.get('Reason'), fail_details.get('Message'), fail_details.get('LogFile') )
def build_regex(pattern, pattern_name=None, **kwargs): """ Return regex string as a named capture group. See: https://tonysyu.github.io/readable-regular-expressions-in-python.html """ pattern = pattern.format(**kwargs) if pattern_name is not None: return r'(?P<{name}>{pattern})'.format(name=pattern_name, pattern=pattern) return pattern
def _filterfnodes(tagfnode, nodes): """return a list of unique fnodes The order of this list matches the order of "nodes". Preserving this order is important as reading tags in different order provides different results.""" seen = set() # set of fnode fnodes = [] for no in nodes: # oldest to newest fnode = tagfnode.get(no) if fnode and fnode not in seen: seen.add(fnode) fnodes.append(fnode) return fnodes
def null_node_formatter(nodetext, optionstext, caller=None): """ A minimalistic node formatter, no lines or frames. """ return nodetext + "\n\n" + optionstext
def avg_and_total(iterable): """Compute the average over a numeric iterable.""" items = 0 total = 0.0 for item in iterable: total += item items += 1 return total / items, total
def GetNextObject(op): """Retrieve the next object in the document. Source: https://developers.maxon.net/?p=596 """ if op==None: return None if op.GetDown(): return op.GetDown() while not op.GetNext() and op.GetUp(): op = op.GetUp() return op.GetNext()
def absolute_url(relative_url): """Converts relative url to absolute.""" url = relative_url if not relative_url.startswith('https://'): if not relative_url.startswith('http://'): url = ''.join(['http://', relative_url]) else: url = ''.join(['https://', relative_url]) return url
def sanitizePath(path, failIfEmptyString = True): """ converts all backslashes to forward slashes and adds a slash at the end of the given string, if not already present @param path the path that should be sanitized @return returns the sanitized path """ if path == '' or path is None: if failIfEmptyString: raise Exception("path must not be empty") else: return '/' path.replace("\\", "/") if path[-1] != '/': path += '/' return path
def bitsToChar(bitSeq): """Converts each 8 bit length padded bit sequences back to a char based on its unicode value""" value = 0 for bit in bitSeq: value = (value * 2) + bit # This for loop will determine the numeric value of the binary bit sequence input return chr(value)