content
stringlengths
42
6.51k
def white_space_fix(text): """Returns text from which succesive white spaces characters are replaced by a single white space character. Args: text (str): input text to process. Output: result (str): corresponding text in which succesive white spaces characters are replaced by a single white space character. """ return ' '.join([v.strip() for v in text.strip().split()])
def normalize_tuple(value, n, name): """Transforms a single integer or iterable of integers into an integer tuple. Args: value: The value to validate and convert. Could an int, or any iterable of ints. n: The size of the tuple to be returned. name: The name of the argument being validated, e.g. "strides" or "kernel_size". This is only used to format error messages. Returns: A tuple of n integers. Raises: ValueError: If something else than an int/long or iterable thereof was passed. """ if isinstance(value, int): return (value,) * n else: error_msg = (f'The `{name}` argument must be a tuple of {n} ' f'integers. Received: {value}') try: value_tuple = tuple(value) except TypeError: raise ValueError(error_msg) if len(value_tuple) != n: raise ValueError(error_msg) for single_value in value_tuple: try: int(single_value) except (ValueError, TypeError): error_msg += (f'including element {single_value} of ' f'type {type(single_value)}') raise ValueError(error_msg) return value_tuple
def get_hashtype(pwhash): """ Tries to figure out the type of hash """ # man 3 crypt if pwhash[0:3] == "$1$": return "MD5" if pwhash[0:3] == "$2$" or pwhash[0:4] == "$2a$": return "Blowfish" if pwhash[0:3] == "$5$": return "SHA-256" if pwhash[0:3] == "$6$": return "SHA-512" else: return "DES"
def get_pixdist_ratio(m_size, ant_rad): """Get the ratio between pixel number and physical distance Returns the pixel-to-distance ratio (physical distance, in meters) Parameters ---------- m_size : int The number of pixels used along one-dimension for the model (the model is assumed to be square) ant_rad : float The radius of the antenna trajectory during the scan, in meters Returns ------- pix_to_dist_ratio : float The number of pixels per physical meter """ # Get the ratio between pixel and physical length pix_to_dist_ratio = m_size / (2 * ant_rad) return pix_to_dist_ratio
def untar(directory): """ Untar the data from stdin into the specified directory. :param directory: The directory to write files to. """ return "tar -C %s -x" % directory
def standardize_axes_names(data_dict): """For all dataframes in the given dictionary, sets the name of the index axes to "Patient_ID", because that's what they all are by that point, and sets the name of the column axes to "Name". Parameters: data_dict (dict): The dataframe dictionary of the dataset. Returns: dict: The dataframe dictionary, with the dataframe axes' names standardized. Keys are str of dataframe names, values are pandas.DataFrame """ for name in data_dict.keys(): # Loop over the keys so we can alter the values without any issues df = data_dict[name] df.index.name = "Patient_ID" df.columns.name = "Name" data_dict[name] = df return data_dict
def silverman(n: int, ess: float) -> float: """ Returns Silverman's factor for KDE approximation. Args: n: The dimension of the space to construct the KDE for. ess: The ESS of the samples. """ return (ess * (n + 2) / 4) ** (-1 / (n + 4))
def expand_definition(memory_maps, name): """\ Recursively expand the '__based_on__' keys to create a 'flat' definition for the given MCU name. """ map = dict(memory_maps[name]) # get a copy of the dict try: base = map.pop('__based_on__') except KeyError: pass else: map.update(expand_definition(memory_maps, base)) if '__name__' in map: del map['__name__'] # name was overwritten by lowest base map['__name__'] = name return map
def greatest_common_divisor(a: int, b: int) -> int: """ Calculate Greatest Common Divisor (GCD). see greatest_common_divisor.py >>> greatest_common_divisor(24, 40) 8 >>> greatest_common_divisor(1, 1) 1 >>> greatest_common_divisor(1, 800) 1 >>> greatest_common_divisor(11, 37) 1 >>> greatest_common_divisor(3, 5) 1 >>> greatest_common_divisor(16, 4) 4 """ return b if a == 0 else greatest_common_divisor(b % a, a)
def test_even(value): """Return true if the variable is even.""" return value % 2 == 0
def compute_iom(box_a: list, box_b: list) -> float: """Compute ratio of intersection area over min area.""" # determine the (x, y)-coordinates of the intersection rectangle xa = max(box_a[0], box_b[0]) ya = max(box_a[1], box_b[1]) xb = min(box_a[2], box_b[2]) yb = min(box_a[3], box_b[3]) # compute the area of intersection rectangle inter_area = max(0, xb - xa + 1) * max(0, yb - ya + 1) # compute the area of both the prediction and ground-truth rectangles box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1) box_b_area = (box_b[2] - box_b[0] + 1) * (box_b[3] - box_b[1] + 1) return inter_area / min(box_a_area, box_b_area)
def merge_dictionaries(x,y): """ Merge 2 dictionaries with y taking overwriting x if a key collision is found This is mainly useful for maintaining the dictionary arguments to allow for more expressive & extensible arguments. https://stackoverflow.com/questions/38987/how-do-i-merge-two-dictionaries-in-a-single-expression-in-python-taking-union-o Args: x (dict): Input dictionary y (dict): Input dictionary Returns: The combined dictionary of x & y with y taking preference on the occasion of key collision """ if x is None: x = {} if y is None: y = {} try: return {**x,**y} except: z = x.copy() z.update(y) return z
def getGussVariables(project_vars: dict) -> tuple: """Collects from project_variables.csv the parameters related to the activation of GUSS tool. Args: project_vars (dict): project variables collected from project_variables.csv. Raises: Exception: GUSS should be "yes" or "no" in project_variables.csv Exception: GUSS_parallel should be "yes" or "no" in project_variables.csv Exception: GUSS_parallel_threads must be an integer in project_variables.csv Returns: tuple: 3-element tuple containing - **guss** (*bool*): activation of GUSS tool. - **guss_parallel** (*bool*): run GUSS tool in parallel. - **guss_parallel_threads** (*int*): number CPUs used to run GUSS tool in parallel. """ if project_vars["GUSS"].lower() in ["yes", "no"]: guss = True if project_vars["GUSS"].lower() == "yes" else False else: raise Exception('GUSS should be "yes" or "no"') if project_vars["GUSS_parallel"].lower() in ["yes", "no"]: guss_parallel = ( True if project_vars["GUSS_parallel"].lower() == "yes" else False ) else: raise Exception('GUSS_parallel should be "yes" or "no"') if isinstance(project_vars["GUSS_parallel_threads"], int): guss_parallel_threads = project_vars["GUSS_parallel_threads"] elif project_vars["GUSS_parallel_threads"].isdigit(): guss_parallel_threads = int(project_vars["GUSS_parallel_threads"]) else: raise Exception("GUSS_parallel_threads must be an integer") return guss, guss_parallel, guss_parallel_threads
def _static_idx(idx, size): """Helper function to compute the static slice start/limit/stride values.""" assert isinstance(idx, slice) start, stop, step = idx.indices(size) if (step < 0 and stop >= start) or (step > 0 and start >= stop): return 0, 0, 1, False # sliced to size zero if step > 0: return start, stop, step, False else: k = (start - stop - 1) % (-step) return stop + k + 1, start + 1, -step, True
def prefix_connection_id(connection_id, parent_connection_id): """Used to distinguish connection ids when they have the same id as a parent_connection. """ if not len(connection_id) > len(parent_connection_id): return parent_connection_id + connection_id return connection_id
def read(file): """ Reads the current contents a file :param file: the file to be read :return: the contents of the file """ f = open(str(file), "r") string = f.read() f.close() return string
def gg(d): """dict vals to tuples (ID, Gloss) or ("", "")""" return {k: (d[k][0][0], d[k][0][1]) if d[k] else ("", "") for k in d}
def response_error(error_code): """Send a response erorr.""" err = ('HTTP/1.1 ' + error_code + '\r\n\r\n').encode('utf8') err += b'Sorry we could not fulfill your request.\r\n\r\n' return err
def canberra(v1, v2): """ Computes Canberra distance between two points. http://en.wikipedia.org/wiki/Canberra_distance """ return sum(( (abs(v[0] - v[1]) / (abs(v[0] + abs(v[1])))) for v in zip(v1, v2)))
def recommend_test(test_param,is_norm): """ This function recommends a list of significance tests based on previous results. if normal, then use t test (other tests are also applicable) if not normal but mean is a good measure of central tendancy, use: - bootstrap test based on mean (t ratios) or medians - sign test - sign test calibrated by permutation (based on mean or median) - Wilcoxon signed rank test - t test (may be okay given large samples) if not normal and highly skewd, use: - bootstrap test for median - sign test - sign test calibrated by permutation (based on median) - wilcoxon signed rank test @param test_param: "mean" or "median" @param is_norm: True or False @return: a list of recommended test """ if is_norm==True: return([('t','The student t test is most appropriate for normal sample and has the highest statistical power.'), ('bootstrap','The bootstrap test based on t ratios can be applied to normal sample.'), ('permutation','The sign test calibrated by permutation based on mean difference is also appropriate for normal sample, but its statistical power is relatively low due to loss of information.'), ('wilcoxon','The Wilcoxon signed-rank test can be used for normal sample, but since it is a nonparametric test, it has relatively low statistical power. Also the null hypothesis is that the the pairwise difference has location 0.'), ('sign','The (exact) sign test can be used for normal sample, but it has relatively low statistical power due to loss of information.')]) else: if test_param=="mean": return([('bootstrap','The bootstrap test based on t ratios does not assume normality, and thus is appropriate for testing for mean difference.'), ('permutation','The sign test calibrated by permutation based on mean difference is nonparametric and does not assume normality.'), ('wilcoxon','The Wilcoxon signed-rank test can be used for this case, but since it is a nonparametric test, it has relatively low statistical power. Also the null hypothesis is that the the pairwise difference has location 0.'), ('sign','The (exact) sign test can be used for this case, but it has relatively low statistical power due to loss of information. Also, the null hypothesis is that the median is 0.'), ('t','The student t test may be appropriate for non-normal data if the sample size is large enough, but the iid assumption must hold.')]) else: return([('bootstrap_med','The bootstrap test based on median is appropriate for testing for median.'), ('wilcoxon','The Wilcoxon signed-rank test is appropriate for comparing medians.'), ('permutation_med','The sign test calibrated by permutation based on median difference is appropriate for testing for median.'), ('sign','The sign test is appropriate for testing for median, but it has relatively low statistical power due to loss of information.')])
def in_corner(size, radius, x, y, base_offset=0): """judge whether a point is corner of icon""" x -= base_offset y -= base_offset center = (0, 0) if x < radius and y < radius: center = (radius, radius) elif x < radius and y > size - radius: center = (radius, size - radius) elif x > size - radius and y < radius: center = (size - radius, radius) elif x > size - radius and y > size - radius: center = (size - radius, size - radius) if center != (0, 0): if (x - center[0]) ** 2 + (y - center[1]) ** 2 > radius ** 2: return True return False
def py__interpolice(ff, x): """ real interpolice method USE ONLY BY INTERPOLICE """ res = 0.0 for i in ff: if not(-12 < (i[0] - x) < 12): continue li = 1.0 for j in ff: if not(-12 < (j[0] - x) < 12): continue if i[0] != j[0]: z = (i[0] - j[0]) li *= ((x - j[0]) / z) res += li * i[1] return res
def bacc(pos_acc, neg_acc): """compute balanced accuracy""" return float(pos_acc + neg_acc)/2
def by_name(text, hash_length): """Fast and shallow hash rep validity probe.""" hash_rep_length, base = hash_length, 16 if len(text) != hash_rep_length: return False try: _ = int(text, base) except ValueError: return False return True
def get_voxel_coord(index, s): """ Based on provided integer voxel index and lateral size of the 3D neighborhood, determines the coordinates of the voxel in the neighborhood :type index: int :param index: :type s: int :param s: :return: """ s_squared = s ** 2 z = index // s_squared remainder = index - (z * s_squared) y = remainder // s x = remainder - (y * s) return x, y, z
def getidx_ordered(p, pmax, idmax): """ gives ids (up to idxmax) for job p of pmax jobs to do. """ return [i for i in range(p, idmax, pmax)]
def _SymbolsWithSameOffset(profiled_symbol, name_to_symbol_info, offset_to_symbol_info): """Expand a profiled symbol to include all symbols which share an offset with that symbol. Args: profiled_symbol: the string symbol name to be expanded. name_to_symbol_info: {name: [symbol_info1], ...}, as returned by GetSymbolInfosFromBinary offset_to_symbol_info: {offset: [symbol_info1, ...], ...} Returns: A list of symbol names, or an empty list if profiled_symbol was not in name_to_symbol_info. """ if not profiled_symbol in name_to_symbol_info: return [] symbol_infos = name_to_symbol_info[profiled_symbol] expanded = [] for symbol_info in symbol_infos: expanded += (s.name for s in offset_to_symbol_info[symbol_info.offset]) return expanded
def _unquote(string): """remove optional quotes (simple or double) from the string :type string: str or unicode :param string: an optionally quoted string :rtype: str or unicode :return: the unquoted string (or the input string if it wasn't quoted) """ if not string: return string if string[0] in '"\'': string = string[1:] if string[-1] in '"\'': string = string[:-1] return string
def fromUnicodeSymbols(s): """ Convert a braille string (with unicode symbols) to the representation used in this program. Used for debugging and as a tool to integrate the Nemeth code. """ s_ = s.split(" ") retObj = [] for wrd in s_: word_repr = [] for ch in wrd: hex_val = hex(ord(ch)).replace("0x", "") while(len(hex_val) < 4): hex_val = "0" + hex_val hex_val = hex_val[2:] raise_dot = "{0:b}".format(int(hex_val, 16))[::-1] while len(raise_dot) < 6: raise_dot += "0" word_repr.append(raise_dot) retObj.append(word_repr) return retObj
def version_to_sortkey(v): """ Version limits: 'a.b.c.d' , where a < 2^5 """ MAX_SIZE = 4 MAX_VERSION_SIZE_2_POW = 5 v = v.split('.') res = 0 for (ind, val) in enumerate(v): res += int(val) << ((MAX_SIZE - ind) * MAX_VERSION_SIZE_2_POW) return res
def _parse_window_bits(bits): """Return parsed integer value iff the given string conforms to the grammar of the window bits extension parameters. """ if bits is None: raise ValueError('Value is required') # For non integer values such as "10.0", ValueError will be raised. int_bits = int(bits) # First condition is to drop leading zero case e.g. "08". if bits != str(int_bits) or int_bits < 8 or int_bits > 15: raise ValueError('Invalid value: %r' % bits) return int_bits
def get_memory_limit(component_limit, overall_limit): """ Return the minimum of the component and overall limits or None if neither is set. """ limits = [limit for limit in [component_limit, overall_limit] if limit is not None] return min(limits) if limits else None
def get_entries_num(used, available): """ Get number of entries needed to be created that 'used' counter reached one percent """ return ((used + available) / 100) + 1
def remove(lst, *vals): """Returns a copy of list 'lst' with values 'vals' removed""" delset = set(vals) return [i for i in lst if i not in delset]
def add(vec1, vec2): """Adds two vectors. Adds a length-n list to another length-n list. Args: vec1 (list): First vector. vec2 (list): Second vector. Returns: Sum of vec1 and vec2. """ assert len(vec1) == len(vec2) return [vec1[i] + vec2[i] for i in range(len(vec1))]
def to_pascalcase(s: str) -> str: """convert a python identifier string to pascal case. Examples: >>> to_pascalcase('my_identifier') myIdentifier >>> to_pascalcase('my_long_identifier') myLongIdentifier >>> to_pascalcase('crab') crab """ first, *other = s.split('_') return f'{first}{"".join([word.title() for word in other])}'
def CalcVersionValue(ver_str="0.0.0"): """Calculates a version value from the provided dot-formated string 1) SPECIFICATION: Version value calculation AA.BBB.CCC - major values: < 1 (i.e 0.0.85 = 0.850) - minor values: 1 - 999 (i.e 0.1.85 = 1.850) - micro values: >= 1000 (i.e 1.1.85 = 1001.850) @keyword ver_str: Version string to calculate value of """ ver_str = ''.join([char for char in ver_str if char.isdigit() or char == '.']) ver_lvl = ver_str.split(u".") if len(ver_lvl) < 3: return 0 major = int(ver_lvl[0]) * 1000 minor = int(ver_lvl[1]) if len(ver_lvl[2]) <= 2: ver_lvl[2] += u'0' micro = float(ver_lvl[2]) / 1000 return float(major) + float(minor) + micro
def is_ascii(s): """ Check if a character is ascii """ return all(ord(c) < 128 for c in s)
def convert_militar_time_format_to_int_minutes(list): """ Convert the list of hour ranges to a list of minutes ranges. >>> convert_militar_time_format_to_int_minutes([['9:00', '10:00'], ['12:00', '13:00']]) [[540, 600], [720, 780]] """ result = [] for i in list: hours, minutes = i[0].split(':') x1 = int(hours) * 60 + int(minutes) hours, minutes = i[1].split(':') x2 = int(hours) * 60 + int(minutes) result.append([x1, x2]) return result
def isstruct(ob): # SSDF compatibility """ isstruct(ob) Returns whether the given object is an SSDF struct. """ if hasattr(ob, "__is_ssdf_struct__"): return bool(ob.__is_ssdf_struct__) else: return False
def tile_coords_and_zoom_to_quadKey(x, y, zoom): """Create a quadkey for use with certain tileservers that use them.""" quadKey = '' for i in range(zoom, 0, -1): digit = 0 mask = 1 << (i - 1) if (x & mask) != 0: digit += 1 if (y & mask) != 0: digit += 2 quadKey += str(digit) return quadKey
def calculate_carbon_from_biovolume(invalue, category): """Calculate the cellular carbon from the given biovolume value based on what category the image is assigned and how large it is. Conversion formulas are from Table 4 in Menden-Deuer and Lessard (2000). inputs: invalue (float) = the biovolume value from the features file converted to microns category (str) = the category to which the image was assigned returns: carbon_value (float) = the carbon calculated from the formulas """ diatoms = ['Asterionellopsis', 'Centric', 'Ch_simplex', 'Chaetoceros', 'Corethron', 'Cylindrotheca', 'Cymatosira', 'DactFragCeratul', 'Ditlyum', 'Eucampia', 'Eucampiacornuta', 'Guinardia', 'Hemiaulus', 'Leptocylindrus', 'Licmophora', 'Melosira', 'Odontella', 'Pleurosigma', 'Pseudonitzschia', 'Rhizosolenia', 'Skeletonema', 'Thalassionema', 'Thalassiosira', 'centric10', 'pennate', ] if category in diatoms: if invalue > 3000.: # diatoms > 3000 cubic microns (um**3) carbon_value = (10**(-0.933)) * (invalue ** 0.881) else: carbon_value = (10**(-0.541)) * (invalue ** 0.811) else: if invalue < 3000.: # protist plankton < 3000 cubic microns (um**3) carbon_value = (10**(-0.583)) * (invalue ** 0.860) else: carbon_value = (10**(-0.665)) * (invalue ** 0.939) return carbon_value
def if_function(condition, true_result, false_result): """Return true_result if condition is a true value, and false_result otherwise. >>> if_function(True, 2, 3) 2 >>> if_function(False, 2, 3) 3 >>> if_function(3==2, 'equal', 'not equal') 'not equal' >>> if_function(3>2, 'bigger', 'smaller') 'bigger' """ if condition: return true_result else: return false_result
def map_row_to_schema(row): """Associate a value from a Stat CSV export with the correct identifier from BQ schema. When first adding a client to this system we may have historical data that we want to import. That data comes from Stat's ranking export. We need to map values from Stat's data into the schema we've design to interface with Data Studio. This function handles that mapping. Args: row: A dict extracted from Stat's ranking CSV, that corresponds with a single observation of a keyword ranking. Returns: A dict representing data for a single keyword observation that complies with the BQ schema of our client tables. Keys that were missing from Stat's response get None/NULL values. """ return { "timestamp": row["Date"] + " 00:00", "keyword": row["Keyword"], "market": row["Market"], "location": row["Location"], "device": row["Device"], "rank": row["Rank"], "base_rank": row["Rank"], "url": row["URL"] if row["URL"] else None, "advertiser_competition": row["Advertiser Competition"], "gms": row["Global Monthly Searches"], "rms": row["Regional Monthly Searches"], "cpc": row["CPC"], "tags": [tag.strip() for tag in row["Tags"].split("/")] if row["Tags"] else [], }
def color_gradient(ratio, min_col='#FFFFFF', max_col='#D65F5F', output_hex=True): """Calculate a proportional mix between two colors. """ min_col_hex = min_col.lstrip('#') min_col_rgb = tuple(int(min_col_hex[i:i+2], 16) for i in (0, 2 ,4)) max_col_hex = max_col.lstrip('#') max_col_rgb = tuple(int(max_col_hex[i:i+2], 16) for i in (0, 2 ,4)) mix_col_rgb = [int(ratio*x + (1-ratio)*y) for x, y in zip(max_col_rgb, min_col_rgb)] if output_hex: return('#%02x%02x%02x' % tuple(mix_col_rgb)).upper() else: return mix_col_rgb
def splitUIAElementAttribs(attribsString): """Split an UIA Element attributes string into a dict of attribute keys and values. An invalid attributes string does not cause an error, but strange results may be returned. @param attribsString: The UIA Element attributes string to convert. @type attribsString: str @return: A dict of the attribute keys and values, where values are strings @rtype: {str: str} """ attribsDict = {} tmp = "" key = "" inEscape = False for char in attribsString: if inEscape: tmp += char inEscape = False elif char == "\\": inEscape = True elif char == "=": # We're about to move on to the value, so save the key and clear tmp. key = tmp tmp = "" elif char == ";": # We're about to move on to a new attribute. if key: # Add this key/value pair to the dict. attribsDict[key] = tmp key = "" tmp = "" else: tmp += char # If there was no trailing semi-colon, we need to handle the last attribute. if key: # Add this key/value pair to the dict. attribsDict[key] = tmp return attribsDict
def build_goat_json(mongodb_result, hug_timer): """ For reducing the duplicate lines in the 'get_goat_movies' function. """ combined_json_list = [] for result in mongodb_result: combined_json_list.append({'imdbID': result['imdbID'], 'year': result['year'], 'title': result['title'], 'upvotes': result['goat_upvotes'], 'downvotes': result['goat_downvotes'], 'success': True, 'valid_key': True, 'took': float(hug_timer)}) return combined_json_list
def _split_lines(text): """Split multi-line text content into list of string.""" lines = [l.strip() for l in text.split('\n') if l.strip()] return lines
def split_list_by(lst, key): """ Splits a list by the callable *key* where a negative result will cause the item to be put in the first list and a positive into the second list. """ first, second = [], [] for item in lst: if key(item): second.append(item) else: first.append(item) return (first, second)
def QuickSort(lol, start, stop): """Implements the QuickSort algorithm. Parameters ---------- lol : list[list[str]] start: int stop : int Returns ------- A `lol` sorted alphabetically by the first item in each list. """ if start < stop: i = start # i is going to be the correct position for pivot pivot = lol[stop][1] for j in range(start, stop): if lol[j][1] <= pivot: lol[i], lol[j] = lol[j], lol[i] i += 1 lol[i], lol[stop] = lol[stop], lol[i] # put pivot at the right position if i > len(lol) / 2: lol = QuickSort(lol, i + 1, stop) lol = QuickSort(lol, start, i - 1) else: lol = QuickSort(lol, start, i - 1) lol = QuickSort(lol, i + 1, stop) return lol
def online_variance(data): """ A numerically stable algorithm for calculating variance http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#On-line_algorithm """ n = 0 mean = 0 M2 = 0 for x in data: n = n + 1 delta = x - mean mean = mean + delta/n M2 = M2 + delta*(x - mean) variance_n = M2/n variance = M2/(n - 1) return (variance, variance_n)
def parse_taxa_report(local_path): """Return a dict of taxa_name to read_counts.""" out, abundance_sum = {}, 0 with open(local_path) as taxa_file: for line_num, line in enumerate(taxa_file): line = line.strip() tkns = line.split('\t') if not line or len(tkns) < 2: continue if len(tkns) == 2: out[tkns[0]] = float(tkns[1]) abundance_sum += float(tkns[1]) else: if line_num == 0: continue out[tkns[1]] = float(tkns[3]) abundance_sum += float(tkns[3]) out = {k: v for k, v in out.items() if 's__' in k and 't__' not in k} return out
def QLineEdit_parseVector3(lineedit): """ Return 3 component real vector as list from comma separated text in QLineEdit widget or None if invalid. """ try: text = lineedit.text() values = [ float(value) for value in text.split(",") ] if len(values) == 3: return values except: pass return None
def csv2json(csvdata, header=True): #----------------------------------------<<< """Convert CSV data to JSON (i.e., list of dictionaries). csvdata = string containing a CSV file e.g., open('filename.csv').read() header = whether the data contains a header row (if not, output fields are named 'field0,field1,etc') Returns a list of dictionaries, with each dictionary corresponding to a row of data from the CSV data. """ if not csvdata: return '' # no CSV data found row1 = csvdata.split('\n')[0] if header: fldnames = row1.split(',') # get field names from CSV header else: # no CSV header included, so make up field names fldnames = ['field' + str(fieldno) for fieldno, _ in enumerate(row1.split(','))] jsondata = [] firstline = True for row in csvdata.split('\n'): if not row: continue # skip blank lines if firstline and header: firstline = False continue values = row.split(',') rowdict = dict() for fieldno, fldname in enumerate(fldnames): rowdict[fldname] = values[fieldno] jsondata.append(rowdict) return jsondata
def int_if_possible(val): """ Returns integer value of s if conversion succeeds, else s. """ if type(val) is list or type(val) is tuple: val = val[0] try: i = int(val) except ValueError: i = val return i # def make_info_dict(callers, records, pos1, pos2): # """ generate 'median' results for info fields from all records """ # callermap = { # "delly": {"fields": ["DV", "RV"], "tumor": 0}, # "svaba": {"fields": ["SR", "DR"], "tumor": 1}, # "gridss": {"fields": ["RP", "SR"], "tumor": 1}, # "brass": {"fields": ["PS", "RC"], "tumor": 1}, # "smoove": {"fields": ["SR", "PE"], "tumor": 0}, # } # # fields = ['CHR2', 'END', 'SVTYPE', 'SVLEN'] + ["SR", "DR"] + ["DV", "RV"] + ["RP"] # fields = [x["fields"] for x in callermap.values()] # fields = [item for sublist in fields for item in sublist] # info = {} # for field in fields: # answers = [] # for caller, record in zip(callers, records): # if caller in callermap.keys() and field in callermap[caller]["fields"]: # if field in record.format: # answers.append([caller,int_if_possible(record.samples[callermap[caller]["tumor"]][field])]) # elif field in record.info: # answers.append([caller,int_if_possible(record.info[field])]) # # elif field in record.format: # # answers.append([caller,int_if_possible(record.samples[callermap[caller]["tumor"]][field])]) # nanswers = len(answers) # if nanswers > 0: # # sorted_answers = sorted(answers) # # # doesn't quite deal with even #s correctly - can't average strings # # median_pos = int(nanswers / 2) # # median_answer = sorted_answers[median_pos] # # if not median_answer == 0: # # info[field] = median_answer # for a in answers: # info[a[0] + "_" + field] = a[1] if "SVTYPE" in info and info["SVTYPE"] in ["DUP", "DUP:TANDEM", "DEL", "INV"]: if not "SVLEN" in info: info["SVLEN"] = pos2 - pos1 return info
def calc_density(Wfn): """ :param Wfn: List, each element is a wavefunction component starting with psi_+2 :return: n: total atomic density """ return sum(abs(wfn) ** 2 for wfn in Wfn)
def proper_suffixes(word): """ Return a list of nonempty proper suffixes of the given word (sorted in decreasing length). """ return [word[i:] for i in range(1, len(word))]
def string_like(value, name, optional=False, options=None, lower=True): """ Check if object is string-like and raise if not Parameters ---------- value : object Value to verify. name : str Variable name for exceptions. optional : bool Flag indicating whether None is allowed. options : tuple[str] Allowed values for input parameter `value`. lower : bool Convert all case-based characters in `value` into lowercase. Returns ------- str The validated input Raises ------ TypeError If the value is not a string or None when optional is True. ValueError If the input is not in ``options`` when ``options`` is set. """ if value is None: return None if not isinstance(value, str): extra_text = " or None" if optional else "" raise TypeError("{0} must be a string{1}".format(name, extra_text)) if lower: value = value.lower() if options is not None and value not in options: extra_text = "If not None, " if optional else "" options_text = "'" + "', '".join(options) + "'" msg = "{0}{1} must be one of: {2}".format( extra_text, name, options_text ) raise ValueError(msg) return value
def metade(preco, moeda=''): """ --> Divide o valor inserido por 2. :param preco: valor a ser dividido :param moeda: qual a moeda a ser exibida :return: metade do valor """ final = preco / 2 if moeda == '': return f'{final}'.replace('.', ',') else: return f'{moeda} {final:.2f}'.replace('.', ',')
def ipv4_range_type(string): """ Validates an IPv4 address or address range. """ import re ip_format = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' if not re.match("^{}$".format(ip_format), string): if not re.match("^{ip_format}-{ip_format}$".format(ip_format=ip_format), string): raise ValueError return string
def cosmic_link(variant_obj): """Compose link to COSMIC Database. Args: variant_obj(scout.models.Variant) Returns: url_template(str): Link to COSMIIC database if cosmic id is present """ cosmic_ids = variant_obj.get("cosmic_ids") if not cosmic_ids: return None else: cosmic_id = str(cosmic_ids[0]) if cosmic_id.startswith("COS"): url_template = "https://cancer.sanger.ac.uk/cosmic/search?q={}" else: url_template = "https://cancer.sanger.ac.uk/cosmic/mutation/overview?id={}" return url_template.format(cosmic_id)
def findDataType(param): """Converts the argument passed to respective data type namely int, float or string Parameters ---------- param : str The parameter in the query """ try: num = int(param) except ValueError: try: num = float(param) except ValueError: num = str.encode(param) return num
def preprocessing(settings): """Check all settings.""" columns = settings.get('columns', []) if not isinstance(columns, list): columns = [columns] if len(columns) == 0: raise Exception('`columns` do not must be empty.') asc = settings.get('ascending', []) if not isinstance(asc, list): asc = [asc] n1, n2 = len(columns), len(asc) if n1 > n2: asc = asc + [True for _ in range(n2-n1)] elif n2 > n1: asc = asc[:n1] settings['columns'] = columns settings['ascending'] = asc return settings
def ensure_tuple_size(tup, dim): """Returns a copy of `tup' with `dim' values by either shortened or padded with zeros as necessary.""" tup = tuple(tup) + (0,) * dim return tup[:dim]
def con_frac_per(n, d): """convert f(a fraction) into a percentage""" x = n / d return x * 100
def count_bits_set(n: int) -> int: """ >>> count_bits_set_fast(0b101010101) 5 >>> count_bits_set_fast(2 << 63) 1 >>> count_bits_set_fast((2 << 63) - 1) 64 """ return format(n, 'b').count('1')
def class_name(obj): """ Return the name of a class as a string. """ return obj.__class__.__name__
def calc_nraw(reg_dict): """ Return the number of raw frames used in a depth frame Parameters ---------- reg_dict : dict The dictionary that contains all the register information Returns nraw : int The number of raw frames in a depth sequence """ return reg_dict["PHASE_COUNT"][2]
def convert_to_xy(bounding_box): """ convert to x_min, x_max, y_min, y_max :param bounding_box: :return: """ x_min = bounding_box[0] x_max = bounding_box[0] + bounding_box[2] y_min = bounding_box[1] y_max = bounding_box[1] + bounding_box[3] return x_min, x_max, y_min, y_max
def point_Not_In_List(point, list): """ Whether a point is in the list""" for l in list: if abs(point[0] - l[0]) < 1 and abs(point[1] - l[1]) < 1: return False return True
def MatchAlignedDGP(dgp, idxmap_aligne2seq, posindexmap, aligned_toposeq):#{{{ """ match dgp (a list of tuples) to the aligned toposeq posindexmap is the index map from the shrinked seq to the original seq idxmap_aligne2seq is a dictionary of index map from the original (non-shrinked) MSA to the gapless seq """ aligned_dgp = [] lenAlignedSeq = len(aligned_toposeq) resMap = {} inew = 0 if len(posindexmap) == 0: isShrink = False else: isShrink = True # convert dgp in to dictionary dgp_dt = {} for (idx, dg) in dgp: dgp_dt[idx] = dg for j in range(lenAlignedSeq): if aligned_toposeq[j] != '-': if isShrink: j_origseq = idxmap_aligne2seq[posindexmap[j]] else: j_origseq = idxmap_aligne2seq[j] try: dg = dgp_dt[j_origseq] aligned_dgp.append((j, dg)) except KeyError: pass return aligned_dgp
def make_string(d, key_list): """ Make a string from dictionary values using keys given as a list. """ return ';'.join([str(d[k]) for k in key_list])
def normalizeIndex(value): """ Normalizes index. * **value** must be an ``int`` or ``None``. * Returned value is the same type as the input value. """ if value is not None: if not isinstance(value, int): raise TypeError("Indexes must be None or integers, not %s." % type(value).__name__) return value
def in_bisect_recursion(word_list: list, value: str) -> bool: """Takes a sorted list and a target value and returns True if the word is in the list and False if it's not. """ if len(word_list) == 0: return False middle = len(word_list) // 2 if word_list[middle] == value: return True elif word_list[middle] > value: # Search the first half return in_bisect_recursion(word_list[:middle], value) else: # Search the second half return in_bisect_recursion(word_list[middle + 1:], value)
def quick_onePy(lis): """python styled quick one""" key = lis[0] smLis = [] bgLis = [] for n in lis[1:]: if n<=key: smLis.append(n) else: bgLis.append(n) return smLis+[key]+bgLis,len(smLis)
def make_rooted(graph, u, memo=None): """Returns: a tree in the format (label, children) where children is a list of trees""" # if memo is None: memo = set() # memo.add(u) # children = [make_rooted(graph, v, memo) for v in graph[u] if v not in memo] # return (u, children) if memo is None: memo = set() memo.add(u) children = [make_rooted(graph, v, memo) for v in graph[u] if v not in memo] return (u, children)
def _update_key_number(key, d, sep='.', offset=0): """ Update key number pairs. Despite the original config can accept repated header keys, a python dictionary cannot. So a integer suffix is added. """ _num = str(sum(1 for k in d if k.startswith(key)) + offset) return key + sep + _num
def default_result_verbose(d): """Converts the terse default result `dict` returned by :func:`prepare_default_result_dict` into a more verbose version. """ v = { "key": d["k"], "done": d["d"], "nodes": [{ "id": n["i"].decode("utf8"), "address": n["a"].decode("utf8"), "expired": n["x"] } for n in d["n"]] } if "r" in d: v["result"] = d["r"] return v
def is_dict_like(obj): """ Check if the object is dict-like. Parameters ---------- obj : The object to check. Returns ------- is_dict_like : bool Whether `obj` has dict-like properties. Examples -------- >>> is_dict_like({1: 2}) True >>> is_dict_like([1, 2, 3]) False """ return hasattr(obj, '__getitem__') and hasattr(obj, 'keys')
def addReference(inData, reference): """ """ from copy import deepcopy data = deepcopy(inData) existing_refs = [x for x in data['relatedIdentifiers'] if x['relationType']=='References'] ref_list = [ x['relatedIdentifier'] for x in existing_refs] if ( reference not in ref_list): print(reference, 'is NOT in existing references, adding it.') else: print(reference, 'is in existing references, do Noting.') return None # temporary. r = {"relatedIdentifier": reference, "relatedIdentifierType": 'DOI', "relationType": 'References'} data['relatedIdentifiers'].append(r) return data
def appear_only_at_sentence_beginning(word, title, sents): """ if the token appears in the text only at the beginning of the sentences >>> title = [u"Feng", u"Chao", u"Liang", u"Blah"] >>> doc = [[u"Feng", u"Chao", u"Liang", u"is", u"in", u"Wuhan", u"."], [u"Chao", u"Liang", u"is", u"not", u"."], [u"Liang", u"Chao", u"is", u"not", u"."]] >>> appear_only_at_sentence_beginning(u"Feng", title, doc) True >>> appear_only_at_sentence_beginning(u"Chao", title, doc) False >>> appear_only_at_sentence_beginning(u"Blah", title, doc) False """ assert (word in title), "The word should be a title word" appear_at_sentence_beginning = False for sent in sents: sent_start = True for w in sent: if sent_start and w == word and word[0].isupper(): appear_at_sentence_beginning = True elif w == word: # appeared cap in the middle of sentence return False sent_start = False if appear_at_sentence_beginning: return True else: return False
def getAllParents(node, includeNode=False): """ Return all parents of a node Args: node: A node to find parents for includeNode: A bool, whether to include the given node in the result Returns: A list of nodes """ if isinstance(node, str): split = node.split('|') return ['|'.join(split[:i]) for i in reversed(range(2, len(split)))] parents = [] parent = node.getParent() if parent is not None: parents.append(parent) parents.extend(getAllParents(parent)) if includeNode: parents.insert(0, node) return parents
def opu_version(config_d: dict) -> str: """Given an OPU config dict, returns array with OPU name, version, and core information""" opu_name = config_d.get('name', "NA") opu_version_ = config_d.get('version', "NA") opu_location = config_d.get('location', "NA") version = f"OPU {opu_name}-{opu_version_}-{opu_location}; " opu_type = config_d.get('core_type', "NA") opu_core_version = config_d.get('core_version', "NA") version += f"core type {opu_type}, core version {opu_core_version}" return version
def _process_plotsummary(x): """Process a plot (contributed by Rdian06).""" xauthor = x.get('author') xplot = x.get('plot', u'').strip() if xauthor: xplot += u'::%s' % xauthor return xplot
def fft_bin_to_hz(n_bin, sample_rate_hz, fft_size): """Convert FFT bin index to frequency in Hz. Args: n_bin (int or float): The FFT bin index. sample_rate_hz (int or float): The sample rate in Hz. fft_size (int or float): The FFT size. Returns: The value in Hz. """ n_bin = float(n_bin) sample_rate_hz = float(sample_rate_hz) fft_size = float(fft_size) return n_bin*sample_rate_hz/(2.0*fft_size)
def turn_weight_function_length(v, u, e, pred_node): """ Weight function used in modified version of Dijkstra path algorithm. Weight is calculated as the sum of edge length weight and turn length weight (turn length weight keyed by predecessor node) This version of the function takes edge lengths keyed with 'length' Args: v (var): edge start node u (var): edge end node e (dict): edge attribute dictionary with keys pred_node (var): predecessor node Returns: calculated edge weight (float) """ if pred_node == None: weight = e[0]['length'] else: weight = e[0]['length'] + e[0]['turn_length'][pred_node] return(weight)
def _is_interesting_cell(cell): """ See if a cell value looks interesting. @param cell (XLM_Object) The cell to check. @return (boolean) True if the cell value is interesting, False if not. """ cell_str = str(cell).replace('"', '').strip() return (len(cell_str) > 0)
def remove_last_occurence(s, to_remove): """Remove last occurence of :obj:`to_remove` from :obj:`s`.""" li = s.rsplit(to_remove, 1) return ''.join(li)
def rec_linear_search(sequence: list, low: int, high: int, target: int) -> int: """ A pure Python implementation of a recursive linear search algorithm :param sequence: a collection with comparable items (as sorted items not required in Linear Search) :param low: Lower bound of the array :param high: Higher bound of the array :param target: The element to be found :return: Index of the key or -1 if key not found Examples: >>> rec_linear_search([0, 30, 500, 100, 700], 0, 4, 0) 0 >>> rec_linear_search([0, 30, 500, 100, 700], 0, 4, 700) 4 >>> rec_linear_search([0, 30, 500, 100, 700], 0, 4, 30) 1 >>> rec_linear_search([0, 30, 500, 100, 700], 0, 4, -6) -1 """ if not (0 <= high < len(sequence) and 0 <= low < len(sequence)): raise Exception("Invalid upper or lower bound!") if high < low: return -1 if sequence[low] == target: return low if sequence[high] == target: return high return rec_linear_search(sequence, low + 1, high - 1, target)
def is_blank(text: str) -> bool: """Return whether the text is blank.""" return len(text.strip()) == 0
def create_request_url(title): """Replaces space characters with '+' to form a suitable query string for the API""" q_string = title.replace(' ', '+') return f"https://google-search3.p.rapidapi.com/api/v1/search/q={q_string}num=2"
def find(function, list): """ Returns the first item in the list for which function(item) is True, None otherwise. """ for item in list: if function(item) == True: return item
def _add_to(mapping, key, value): """Add key-value to mapping""" if key not in mapping: mapping[key] = value elif isinstance(value, list): mapping[key] += value return mapping[key]
def blendConcaveDec(d=0.0, u=2.0, s=1.0, h=1.0): """ blending function decreasing concave d = delta x = xabs - xdr u = uncertainty radius of xabs estimate error s = tuning scale factor eq 3.13 returns blend """ d = float(d) u = float(u) s = float(s) m = (u - 2 * s + d) if m == 0: return h if d <= s: return h else: b = (h * (u - s)) / m return b
def get_pixel_format(subsampling, depth): """ helper to set pixel format from subsampling, assuming full range, for converting source to yuv prior to encoding """ pixel_format = None if depth == '8': if subsampling == '420': pixel_format = 'yuvj420p' elif subsampling == '444': pixel_format = 'yuvj444p' else: raise RuntimeError('Unsupported subsampling ' + subsampling) elif depth == '16': if subsampling == '420': pixel_format = 'yuv420p16le' elif subsampling == '444': pixel_format = 'yuv444p16le' else: raise RuntimeError('Unsupported subsampling ' + subsampling) else: raise RuntimeError('Unsupported depth ' + depth) return pixel_format
def get_trees_max_depth(state, max_depth): """ >>> s = [6, True, -5, True, 0, True, 9, True] >>> get_trees_max_depth(s, 2) ([(0, 1, 7)], [(4, False, False), (6, False, False), (2, 3, 5)]) >>> s = [2, True, 1] >>> get_trees_max_depth(s, 2) ([(0, 1, -1), (2, -1, -1)], []) >>> s = [2, True, 1, True, 0] >>> get_trees_max_depth(s, 2) ([(0, 1, -1), (2, 3, -1)], [(4, False, False)]) >>> s = [2, True, 1, True, 0, False, True, 3, False, False] >>> get_trees_max_depth(s, 2) ([], [(4, False, False), (2, 3, 5), (7, 8, 9), (0, 1, 6)]) >>> s = [2, True, 1, True, 0, False, True, 3, False, True] >>> get_trees_max_depth(s, 2) ([(0, 1, 6), (7, 8, 9)], [(4, False, False), (2, 3, 5)]) >>> get_trees_max_depth([-10, True, 9, False, True, -8, True], 2) ([(0, 1, 6)], [(5, False, False), (2, 3, 4)]) """ parent_stack = [] full_trees = [] # Utility function which puts parents in full_trees if all their children are in full_trees def pop_finished_parents(): for i in reversed(range(len(parent_stack))): positions = parent_stack[i] if -1 in positions: break children_positions = [pos[0] for pos in full_trees] left_child_pos = positions[1] + 1 right_child_pos = positions[2] + 1 if state[positions[1]] and left_child_pos not in children_positions: break if state[positions[2]] and right_child_pos not in children_positions: break full_trees.append(positions) parent_stack.pop(i) for i in range(len(state)): e = state[i] if 'int' in str(type(e)): if len(parent_stack) < max_depth: parent_stack.append((i, -1, -1)) else: full_trees.append((i, False, False)) pop_finished_parents() else: parent = parent_stack[-1] if parent[1] == -1: parent_stack[-1] = (parent[0], i, -1) elif parent[2] == -1: parent_stack[-1] = (parent[0], parent[1], i) if not e: full_trees.append(parent_stack.pop()) pop_finished_parents() parent_stack = [posns for posns in parent_stack if posns not in full_trees] return parent_stack , full_trees
def _increm_bias(bias, dif): """Increm bias if it is successful""" return 0.2*bias+0.4*(dif+bias)
def extract_context(*args, **kwargs): """General method for extracting the renderer_context Used by decorators to extract the renderer_context from the arguments of the method they are wrapping. The renderer_context is the 4th argument if not specified by keyword """ if len(args) < 4 and kwargs is None: return None renderer_context = None if len(args) >= 4: renderer_context = args[3] elif kwargs is not None and 'renderer_context' in kwargs: renderer_context = kwargs['renderer_context'] if renderer_context is None: return None # Check for presense of a Response object. if 'response' not in renderer_context: return None return renderer_context
def unflatten(dictionary): """Turn all keys with format {key}.{subkey} into nested dictionaries""" unflattened_dictionary = dict() for key, value in dictionary.items(): parts = key.split(".") sub_dictionary = unflattened_dictionary for part in parts[:-1]: if part not in sub_dictionary: sub_dictionary[part] = dict() sub_dictionary = sub_dictionary[part] sub_dictionary[parts[-1]] = value return unflattened_dictionary
def _lower_case(s): """Convert a string to lowercase and remember its original case.""" return s.lower(), [c.islower() for c in s]