content
stringlengths
42
6.51k
def remove_double_blanks(lines): """ Takes a list of lines and condenses multiple blank lines into a single blank line. """ new_lines = [] prev_line = "" # empty line here will remove leading blank space for line in lines: if len(line.strip()) or len(prev_line.strip()): new_lines.append(line) prev_line = line return new_lines
def odb_header(which): """ :param which: :return: """ all_headers = {'genes': ['odb_gene_id', 'tax_id', 'protein_id', 'uniprot_id', 'gene_name', 'ncbi_gid', 'desc'], 'OGs': ['og_id', 'level', 'og_name'], 'OG2genes': ['og_id', 'odb_gene_id']} return all_headers[which]
def home(command): """ Check if command is to go back to home screen (h | home). """ return (command.strip().lower() == "h" or command.strip().lower() == "home")
def contains(text, pattern): """Return a boolean indicating whether pattern occurs in text.""" assert isinstance(text, str), 'text is not a string: {}'.format(text) assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text) index = 0 if len(pattern) == 0: return True for letter in text: if index > 0 and letter != pattern[index]: index = 0 if letter == pattern[index]: index += 1 if index > len(pattern) - 1: return True return False
def clean_data(record): """Cleans data to get single ',' separated byte messages.""" record = ','.join(record.split()) return record.encode('utf-8')
def fmt(message, prefix): """ Formats the given message by adding `prefix` at the start of each line. If the message is multi-line then split it according to the end-of-line terminators and add the prefix string to the start of each line. The `prefix` is not added to the beginning of a line if the line is completely empty (only whitespace). """ message = str(message).splitlines(True) if not message: return prefix output = "" for index, line in enumerate(message): if index >= 1 and not line.strip(): output += line else: output += "{} {}".format(prefix, line) return output
def merge_thread_output(data): """ merge multiple dictionaries of the same format into one """ out = {} for d in data: for cell, counts in d.items(): try: out[cell] except KeyError: out[cell] = counts else: out[cell] = [sum(x) for x in zip(counts, out[cell])] return out
def make_rewrite_entry(range, ip): """ Make single No_hop_rewrite table entry """ table_entry = dict({ "table":"ThisIngress.no_hop_lookup", "match":{ "hdr.dht.group_id": 1, "hdr.dht.id": (int(range[0]), int(range[1])) }, "priority": 1, "action_name":"ThisIngress.dht_rewrite", "action_params":{"dht_address": ip} }) return table_entry
def defrag(server, args_array, ofile, db_tbl, class_cfg, **kwargs): """Method: defrag Description: Stub holder for defrag function. Arguments: (input) server -> Mongo instance. (input) args_array -> Dict of command line options and values. (input) db_tbl -> Database and table names. (input) class_cfg -> Class configuration file. (input) kwargs: mail => Mail instance. """ flag = False errmsg = None mail = kwargs.get("mail", None) if server and args_array and ofile and db_tbl and class_cfg and mail: flag = False errmsg = None return flag, errmsg
def xp( name: str, *, suffix="_exp", adv_suffix="_advancementExp", tot_suffix="_totalExp" ) -> str: """ Generate the HTML for the Xp parts of arts & abilities """ return f"""[<input type="text" class="sheet-number_3" name="attr_{name}{suffix}" value="0"/>/<input type="text" class="sheet-number_3 advance" name="attr_{name}{adv_suffix}" value="0" readonly/>/<input type="text" class="sheet-number_3 total" name="attr_{name}{tot_suffix}" value="0" readonly/>]"""
def encode_payload(payload, key): """Returns XOR encoded payload with the given key""" encoded_payload = b'' for b in payload: encoded_payload += bytes([b ^ key]) return encoded_payload
def format_rally(testcase): """ Structure: details.[{summary.duration}] details.[{summary.nb success}] details.[{summary.nb tests}] Find data for these fields -> details.duration -> details.tests -> details.success_percentage """ details = testcase['details'] summary = None for item in details: if 'summary' in item: summary = item['summary'] if not summary: return False testcase['details'] = { 'duration': summary['duration'], 'tests': summary['nb tests'], 'success_percentage': summary['nb success'] } return True
def unmangle_string(string): """unmangle rest syntax like :foo:`bar` and *x* so an automatic translator won't fsck with them. Mangling syntax is easy, it's a base32 encoded, UTF-8, sphinx tag prefixed by 'SPX'. """ import re from base64 import b32decode def repl(match): found = match.group(0)[3:].replace('i', '=') return b32decode(found).decode('utf-8') return re.sub('SPX[A-Z0-9i]+', repl, string)
def partition_inplace(arr, low, hi): """ low = start index of this sub array to partition hi = end index + 1 of this sub array to partition inplace (using the same array) restrict the array with the bounds arr[low:hi] 1. make pivot the last element of the section we are looking at 2. make some pointers to keep track of the part that is lower than pivot and greater than pivot 3. loop over array from low to hi inclusive 4. if element is < swap it with the first element that is greater (element at index wall) invariant: everything before wall is < pivot, everything after wall that we have already looked at is >= pivot """ pivot = arr[hi-1] # take pivot to be the last element wall = low # everything before the is < pivot for i in range(low, hi, 1): # loop from low to hi inclusive if arr[i] < pivot: # if less than pivot swap element at wall with the less than element arr[wall], arr[i] = arr[i], arr[wall] wall += 1 arr[hi-1] = arr[wall] # put pivot in the right place arr[wall] = pivot # array mutated, don't need to return it # low = start of section < pivot # wall-1 = end of section < pivot # wall = pivot # wall+1 = start of section >= pivot # hi = end of section >= pivot return low, wall, wall, wall+1, hi
def locate_mfg_split(val): """Identify the split between Module Manufaturer and Model Number The CEC data is not clearly deliniated between Model Number & Mfg, so do the best you can. The rest is up to the user to sort out, when selecting a product. """ schvals = [ 'REC Solar ', 'Photronic ', 'Science & Technology ', ') ', 'USA ', 'Holdings ', 'Holding ', 'Frontier ', ' Liberty ', 'Industries ', 'Hong Kong ', 'Q CELLS ', 'Q-Cells', 'nologies ', 'Semiconductor ', 'Wind ','CO ', 'Singapore ', 'o. ', 'ogy ', 'ies ', 'ade ', 'ble ','ms ', 'nal ','ing ', 'rgy ', 'Ontario ', 'Korea ', 'are ', 'Universal ', 'nt ', 'da ', 'wer ', 'da ', 'eed ', 'le ', 'ry ', 'ica ','rik ', 'ue ', 'cis ', 'ech ', 'ics ', 'EC ', 'Solar ', 'ar ', 'oy ', 'ek ', 'BIPV ', 'den ', 'enn ', 'any ', 'tts ', 'nal ', 'eed', 'sis ', 'psun ', 'ght ', 'ASOL ', 'SEG PV ', 'son ', 'rray ', 'iva ', 'Inc. ', 'eme ', 'evo ', 'fab ', 'ray ', 'ity ', 'orld ', 'bine ', 'nnel ', 'ria ', 'max ', 'ace ', 'tec ', 'iosun ', 'gees ', 'llo ', 'ion ', 'gsu ', 'tric ', 'com ', 'umos ', 'uxco ', 'voltaic ', 'ICOR ', 'Sun ', 'iene ', 'fersa ', 'oton ', 'SPV ', 'eka ', 'Won ', 'eta ', 'MAR ', 'nix ', 'ital ', 'arp ', 'ick ', 'SDI ', 'oup ', 'BHD ', 'att ', 'olt ', ' ' ] while True: for sc in schvals: if val.find(sc) > 0: found = True return val.find(sc) + len(sc) print ('No Match for', val) return -1
def rekey(item, key, rekey_as=None): """ reindex a dict by a different key if rekey_as is provided, the original key will be inserted back into the dict under the key name specified in rekey_as """ rekeyed = {} for k in item: if rekey_as: item[k][rekey_as] = k rekeyed[item[k][key]] = item[k] return rekeyed
def linear_conflicts(start_list,goal_list): """ calculates number of moves to add to the estimate of the moves to get from start to goal based on the number of conflicts on a given row or column. start_list represents the current location and goal_list represnts the final goal. """ # Find which of the tiles in start_list have their goals on this line tiles_with_goals = [] for s in range(4): for g in range(4): if start_list[s] == goal_list[g] and start_list[s] != 0: # store tile number and the start and goal square number tiles_with_goals.append((start_list[s], s, g)) # have to have 2 tiles with goals on the same line if len(tiles_with_goals) < 2: return 0 # find the squares that each tile in tiles_with_goals # would go through to get to its goal occupied_squares = [] for t in tiles_with_goals: tile_num, start_square, goal_square = t if start_square > goal_square: smaller = goal_square larger = start_square direction = 'left' else: smaller = start_square larger = goal_square direction = 'right' squares = set() for x in range(smaller,larger+1): squares.add(x) occupied_squares.append([tile_num, squares, direction, start_square, goal_square]) # find tiles that move through the same squares and count # the conflicts conflicts = 0 while len(occupied_squares) > 0: tile_num, squares, direction, s, g = occupied_squares.pop() # find the tiles who intersect with this tile to_remove = [] for o in range(len(occupied_squares)): otile_num, osquares, odirection, os, og = occupied_squares[o] if len(osquares&squares) > 0 and not ((os == g) and (direction == odirection)): conflicts += 1 to_remove.append(occupied_squares[o]) for o in to_remove: occupied_squares.remove(o) # add 2 to estimate for each linear conflict return 2 * conflicts
def _get_key(subspace): """Get key. """ return ','.join(sorted(subspace, key=lambda k: (k.strip('-+|'), len(k), k)))
def octetsToOct(octets): """ convert a string of octets to a string of octal digits """ result = '' while octets: byte = octets[0] octets = octets[1:] result += "%.4s," % oct(ord(byte)) return result
def equal(a, b): """Tests if two suits, two colors, two cards, or two values are equal.""" return str(a) == str(b)
def version_ge(version1, version2): """ >>> from util_cplat_packages import * >>> version1 = distro_version >>> version2 = '15.03' """ import distutils.version flag = distutils.version.LooseVersion(version1) >= distutils.version.LooseVersion(version2) return flag
def hdf5pathparse(path): """ Args: path(str): Returns: tuple: (path,name) """ l = [x for x in path.split("/") if x] if len(l) == 0: return "/", "" elif len(l) == 1: return "/", l[0] else: return "/" + "/".join(l[:-1]), l[-1]
def dispatch(instance,path,): """ NOT USED ANYMORE msg format: { action: methodName/tag1/tag2, other key:value pairs will also be passed to method. } route an action to instance dispatchers """ methodName = path method = getattr(instance,methodName,None) if method==None: raise KeyError(f'Method <{methodName}> was not found on <{instance.__class__.__name__}>.') return method()
def group_position(*args): """ Get group position """ from collections import defaultdict table = defaultdict(int) result = [] for tup in zip(*args): result.append(table[tup]) table[tup] += 1 return result
def parse_response_status(status: str) -> str: """Create a message from the response status data :param status: Status of the operation. :return: Resulting message to be sent to the UI. """ message = status if status == 'SUCCESS': message = "Face authentication successful" elif status == 'NEW_USER': message = "Face signup successful" elif status == 'USER_NOT_FOUND': message = "User not registered" elif status == 'FAILED': message = "Face authentication failed" return message
def usage(err=''): """ Prints the Usage() statement for the program """ m = '%s\n' %err m += ' Default usage is to get Branch status info from SalesForce using branch names in a file.\n' m += ' Branch name is first space seperated value on a row of the file.\n' m += ' \n' m += ' branchls -f path_to_file \n' m += ' or\n' m += ' branchls -f filename_in_cwd \n ' return m
def _convert_str_to_int(str): """ TypeError will take care the case that str is None ValueError will take care the case that str is empty """ if not str: return None try: return int(str) except ValueError: return int(float(str)) except TypeError: return None
def _SubtractCpuStats(cpu_stats, start_cpu_stats): """Computes number of idle wakeups that occurred over measurement period. Each of the two cpu_stats arguments is a dict as returned by the Browser.cpu_stats call. Returns: A dict of process type names (Browser, Renderer, etc.) to idle wakeup count over the period recorded by the input. """ cpu_delta = {} for process_type in cpu_stats: assert process_type in start_cpu_stats, 'Mismatching process types' # Skip any process_types that are empty. if (not cpu_stats[process_type]) or (not start_cpu_stats[process_type]): continue # Skip if IdleWakeupCount is not present. if (('IdleWakeupCount' not in cpu_stats[process_type]) or ('IdleWakeupCount' not in start_cpu_stats[process_type])): continue idle_wakeup_delta = (cpu_stats[process_type]['IdleWakeupCount'] - start_cpu_stats[process_type]['IdleWakeupCount']) cpu_delta[process_type] = idle_wakeup_delta return cpu_delta
def addIO(defaults, config): """ Simple method to update defaults from IO """ for key in config: defaults[key] = config[key] return defaults
def _split_rpdr_key_line(text_line): """Remove newline chars and split the line by bars.""" return tuple(text_line.replace('\r', '').replace('\n', '').split('|'))
def mute(color='r', percentage=1.0): """ Color muting. The opposite of Channel - mute only a specific color channel. color = a string ('r', 'g', or 'b') to mute only the specified color channel. percentage = desaturation amount; 1.0 = 100% Author: SolarLune Date Updated: 6/6/11 """ main = color.lower() subone = 'g' # Defaults to channeling Red subtwo = 'b' if main == 'g': subone = 'r' subtwo = 'b' elif main == 'b': subone = 'r' subtwo = 'g' return ( """ // Name: Color Mute // Author: SolarLune // Date Updated: 6/6/11 // Notes: Color muting; this works, but some colors are (obviously) // made up of others; for example, purple is blue AND red, so if // you mute all colors but red, purple will still show up... uniform sampler2D bgl_RenderedTexture; void main(void) { vec4 color; color = texture2D(bgl_RenderedTexture, gl_TexCoord[0].st); float gray = dot(color.rgb, vec3(0.299, 0.587, 0.114)); // The human eye is more sensitive to certain colors (like bright yellow) than others, so you need to use this specific color-formula to average them out to one monotone color (gray) vec4 desat; if ((color.""" + str(main) + """ > color.""" + str(subone) + """) && (color.""" + str( main) + """ > color.""" + str(subtwo) + """)) desat = vec4(gray, gray, gray, color.a); // If red is the dominant color in the pixel (like the green cube or dark blue // background), gray it out else desat = color; // Otherwise, if red is a dominant color, display normally; note that red is a dominant color in purple, so the purple cube also shows up correctly. gl_FragColor = mix(color, desat, """ + str(float(percentage)) + """); } """ )
def is_pos_int(num_str): """ Args: num_str (str): The string that is checked to see if it represents a positive integer (not 0) Returns: bool Examples: >>> is_pos_int("25.6") False >>> is_pos_int("-25.6") False >>> is_pos_int("0") False >>> is_pos_int("1964") True >>> is_pos_int("-1964") False >>> is_pos_int("6e5") False >>> is_pos_int("1_964") False >>> is_pos_int("NaN") False >>> is_pos_int("None") False >>> is_pos_int("27j+5") False >>> is_pos_int("abcdefg") False >>> is_pos_int("12345abcdefg") False >>> is_pos_int("~26.3") False >>> is_pos_int("^26.3") False """ assert isinstance(num_str, str) if num_str.isdigit(): return int(num_str) != 0 return False
def _state_width(state): """ Get the width of a state. :arg state: A game state. :return: The width of the input state. """ if len(state) == 0: return 0 else: return len(state[0])
def fast_adopt(g, ego, edge): """ Fast adoption function for triangle closing rule""" try: from_neighbors = set(g.node[ego]['M'][edge[0]]) # if concept 0 not in network, false if edge[1] in from_neighbors: # edge already exists return False to_neighbors = set(g.node[ego]['M'][edge[1]]) # if concept 1 not in network, false if from_neighbors & to_neighbors: # if susceptible for nb in g[ego]: # check exposed if edge in g.node[nb]['M'].edges(): return True return False except: return False
def truthy(value): """ Checks for twitter's valid "truthy" values. """ value = value.lower() if value == 't' or value == 'true' or value == '1': return True return False
def get_usergroup_status(code): """Get usergroup status from code.""" usergroup_status = {0: "Enable", 1: "Disable"} if code in usergroup_status: return usergroup_status[code] + " (" + str(code) + ")" return "Unknown ({})".format(str(code))
def my_average(dataset): """ (string) -> float returns average of values in input string values, but zeros do not count at all >>> my_average('23') 2.5 >>> my_average('203') 2.5 """ count = 0 total = 0 for value in dataset: if value != '0': total += int(value) count += 1 if (count == 0): print ('Input "{}" Invalid. No valid numbers to take an average from'.format(dataset)) else: avg = total / count print ('Among non-zeros in dataset: "{}"'.format(dataset)) print ('The Average is: ', avg) print () return avg
def get_halfway_point(p1, p2): """Returns the coordinates of the point halfway between the two given points as a tuple """ return (p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2
def _prepare_devices(devices): """ Prepare dictionary of existing devices for saving to YAML file. """ return {k: {"classname": type(v).__name__, "module": type(v).__module__} for k, v in devices.items()}
def trend(x, slope, offset): """ Generate a trend """ return slope * x + offset
def isTapeRSE(rseName): """ Given an RSE name, return True if it's a Tape RSE (rse_type=TAPE), otherwise False :param rseName: string with the RSE name :return: True or False """ # NOTE: a more reliable - but more expensive - way to know that would be # to query `get_rse` and evaluate the rse_type parameter return rseName.endswith("_Tape")
def iou(box1, box2): """Implement the intersection over union (IoU) between box1 and box2 Arguments: box1 -- first box, list object with coordinates (x1, y1, x2, y2) box2 -- second box, list object with coordinates (x1, y1, x2, y2) """ # Calculate the (y1, x1, y2, x2) coordinates of the intersection of box1 and box2. Calculate its Area. xi1 = max(box1[0], box2[0]) yi1 = max(box1[1], box2[1]) xi2 = min(box1[2], box2[2]) yi2 = min(box1[3], box2[3]) inter_area = (xi2 - xi1) * (yi2 - yi1) # Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B) box1_area = (box1[3] - box1[1]) * (box1[2] - box1[0]) box2_area = (box2[3] - box2[1]) * (box2[2] - box2[0]) union_area = box1_area + box2_area - inter_area # compute the IoU iou = inter_area / union_area return iou
def line_scan(line: str): """ Call this on a line of Java code, get out regex :param line: A line of Java code :return: A regex string or None """ # all the function calls containing regexes (maybe) functions = ["compile", "matches"] for func in functions: if func in line: # start and end index of exp start = line.index(func) + len(func) + 1 # 1 from open paren end = len(line)-1 for i, chr in enumerate(line): # if it's a closing quote # and you haven't found it yet) # and it's not escaped if i > start and chr == line[start] and end == len(line)-1 and line[i-1] != "\\": end = i regex = line[start+1:end] return regex else: return None
def eh_linha(linha): """ Verifica se eh uma linha valida :param linha: string :return: bool Recebe um string e verifica se eh uma linha valida. Se for, devolve True, caso contrario devolve False """ return linha >= '1' and linha <= '3'
def whitespace_normalize_name(name): """Return a whitespace-normalized name.""" return ' '.join(name.split())
def cb_format_default_value(val): """Value formatting callback for default cell""" return "" if val is None else str(val)
def enrich(*args, **kwargs) -> None: """ Produces enriched text description for each node. Args: input_dir: A string pointing to the directory to import data from. input_nodelist: A KGX-format nodelist output_dir: A string pointing to the directory to output data to. sources: A list of sources to use for enrichment. Returns: None. """ # Doesn't do anything yet pass return None
def num_inactive_processes(spike_processes): """ Gets the number of neurons that didn't fire at all. Args: spike_processes: [ np.array([indexes of spikes]) ] Returns: int """ return sum([0 if len(fp) > 1 else 0 for fp in spike_processes])
def quantize(data): """ Given some continuous data, quantize it into appropriately sized discrete buckets (eg, as would be suitable for constructing a histogram of the values). """ # buckets = {} return []
def to_boolean(val, true_str=None, false_str=None): """Normlize str value to boolean value""" # set default true_str and false_str true_str = true_str or ['true', '1', 't', 'y', 'yes', 'Y', 'Yes', 'YES', 1] false_str = false_str or ['false', '0', 'f', 'n', 'N', 'No', 'no', 'NO', 0] # if type(val)!=str: if not isinstance(val, str): return bool(val) else: if val in true_str: return True elif val in false_str: return False
def separate_appetizers (dishes, appetizers): """ :param dishes: list of dish names :param appetizers: list of appetizer names :return: list of dish names The function should return the list of dish names with appetizer names removed. Either list could contain duplicates and may require de-duping. """ dishes_deduped = set (dishes) appetizers_deduped = set (appetizers) return dishes_deduped.difference (appetizers_deduped)
def get_power_set(my_set, set_size=None): """ my_set: list or set of strings set_size: deprecated, kept as optional for backwards compatibility returns: the power set of input strings """ p_set = set() if len(my_set) > 1: pow_set_size = 1 << len(my_set) # 2^n for counter in range(0, pow_set_size): temp = "" for j in range(0, len(my_set)): if(counter & (1 << j) > 0): if temp != "": temp = f"{temp} and {my_set[j]}" else: temp = my_set[j] if temp != "": p_set.add(temp) else: p_set = set(my_set) return p_set
def au_moins_1(list_vars): """Renvoie une clause DIMACS correspondant a la contrainte au moins une de ces vars est vraie""" return [" ".join(map(str, list_vars + [0]))]
def obj_to_str(obj): """ Convert an object to a string if it's not already a string. If it's a list, join its items into a single string. Parameters ---------- obj : str or list[str] Object to convert to string Returns ------- Str Object as a string """ return ' '.join(obj) if isinstance(obj, list) else obj
def emoji_filter(indata): """add mana symbols as emoji tags to a message""" ret = indata.replace("{", ":_") ret = ret.replace("}", "_:") lastpos = None while ret.rfind(":_", 0, lastpos) != -1: lastpos = ret.rfind(":_", 0, lastpos) start = lastpos + 2 end = ret.rfind("_:") content = ret[start:end] content = content.lower() content = content.replace("/", "") ret = ret[:start] + content + ret[end:] return ret
def getEPSGStringForUTMZone(zone, isNorth): """ Get EPSG string, e.g. "EPSG:32618" for UTM zone (WGS84) @param zone Integer representing UTM zone @param isNorth True if north @return String of the pattern "^EPSG:\d+$" """ if isNorth: epsg = 32600 + zone else: epsg = 32700 + zone return "EPSG:%d" % (epsg,)
def capitalize_words(words): """Capitalize the words of an input string.""" capitalized = [] for word in words.split(' '): capitalized.append(word.lower().capitalize()) return ' '.join(capitalized)
def second_dp_hp_kernel_config(defn, hp1=5, hp2=0.1): """Sample the second level Dirichlet process parameter (alpha) using the method of Teh et al (2005). Current implementation is based on that of Gregor Heinrich available at http://bit.ly/1LkdBdX. Teh (2005) is available here: http://www.cs.berkeley.edu/~jordan/papers/hdp.pdf Heinrich says his method is based on equations 47-49 ---------- defn : LDA model definition """ return [('direct_second_dp_hp', {'hp1': hp1, 'hp2': hp2})]
def process_keys(func, *args, ignore_keys=None): """For each key: Args: func: a function name args: args[0] should be dict, and the rest of is parameters """ ret = {} for var in args[0]: if ignore_keys is None or var not in ignore_keys: ret[var] = func(args[0][var], *args[1:]) else: ret[var] = args[0][var] return ret
def masi_distance(label1, label2): """Distance metric that takes into account partial agreement when multiple labels are assigned. >>> from nltk.metrics import masi_distance >>> masi_distance(set([1, 2]), set([1, 2, 3, 4])) 0.335 Passonneau 2006, Measuring Agreement on Set-Valued Items (MASI) for Semantic and Pragmatic Annotation. """ len_intersection = len(label1.intersection(label2)) len_union = len(label1.union(label2)) len_label1 = len(label1) len_label2 = len(label2) if len_label1 == len_label2 and len_label1 == len_intersection: m = 1 elif len_intersection == min(len_label1, len_label2): m = 0.67 elif len_intersection > 0: m = 0.33 else: m = 0 return (1 - (len_intersection / float(len_union))) * m
def normalize_interface(name): """Return the normalized interface name """ if not name: return def _get_number(name): digits = '' for char in name: if char.isdigit() or char in '/.': digits += char return digits if name.lower().startswith('gi'): if_type = 'GigabitEthernet' elif name.lower().startswith('te'): if_type = 'TenGigabitEthernet' elif name.lower().startswith('fa'): if_type = 'FastEthernet' elif name.lower().startswith('fo'): if_type = 'FortyGigabitEthernet' elif name.lower().startswith('long'): if_type = 'LongReachEthernet' elif name.lower().startswith('et'): if_type = 'Ethernet' elif name.lower().startswith('vl'): if_type = 'Vlan' elif name.lower().startswith('lo'): if_type = 'loopback' elif name.lower().startswith('po'): if_type = 'Port-channel' elif name.lower().startswith('nv'): if_type = 'nve' elif name.lower().startswith('twe'): if_type = 'TwentyFiveGigE' elif name.lower().startswith('hu'): if_type = 'HundredGigE' else: if_type = None number_list = name.split(' ') if len(number_list) == 2: number = number_list[-1].strip() else: number = _get_number(name) if if_type: proper_interface = if_type + number else: proper_interface = name return proper_interface
def XORs(alen,array): #incorrect, and too slow """xors=0 donedex=[] for i in xrange(alen): for j in xrange(1,alen): if (i!=j) and ((i,j) not in donedex): if (i^j)%2==1: donedex.append((i,j)) donedex.append((j,i)) xors+=1""" #correct, but too slow #return sum([1 if (array[i]^array[j])%2==1 else 0 for i in xrange(alen) for j in xrange(i+1, alen)]) evens = sum([1 if i%2==0 else 0 for i in array]) return evens*(alen-evens)
def rem_node_prefix(node_address): """ Remove node prefix from node address. :param node_address: The prefixed node address. """ if len(node_address) >= 5: return node_address[5:] else: return node_address
def average_heart_rate(heart_rates): """Calculate the average of a list of heart rates Args: heart_rates (list): a list of heart rates Returns: float: the average heart rate """ # Compute sum and length of the input list hr_sum = sum(heart_rates) hr_length = len(heart_rates) return hr_sum/hr_length
def replace_last(s, old, new, maxtimes=1): """Replace the last (n) occurence(s) of an expression in a string""" tokens = s.rsplit(old, maxtimes) return new.join(tokens)
def get_protocol(url): """Gets the protocol of a URL string For example if url is "https://www.argosopentech.com" "https" is returned. If the protocol can't be determined None is returned Args: url (str): The URL to get the protocol of Returns: str: The string representation of the protocol or None """ protocol_end_index = url.find(":") if protocol_end_index > 0: return url[:protocol_end_index] return None
def to_range(x): """Move x within the angular range -180...180 deg.""" while x < -180: x += 360 while x > 180: x -= 360 return x
def get_disk_info(metadata): """ Modified from: https://github.com/broadinstitute/dsde-pipelines/blob/develop/scripts/calculate_cost.py """ boot_disk_size = 0.0 disk_size = 0.0 disk_type = "HDD" # GCP and AWS backends handle disk information in a different way try: if ( "runtimeAttributes" in metadata and "disks" in metadata["runtimeAttributes"] ): # GCP if "bootDiskSizeGb" in metadata["runtimeAttributes"]: boot_disk_size = metadata["runtimeAttributes"][ "bootDiskSizeGb" ] (_, disk_size, disk_type) = metadata["runtimeAttributes"][ "disks" ].split() except ValueError: if "inputs" in metadata: # AWS if ( "runtime_attr" in metadata["inputs"] and "disk_gb" in metadata["inputs"]["runtime_attr"] ): if "boot_disk_gb" in metadata["inputs"]["runtime_attr"]: boot_disk_size = metadata["inputs"]["runtime_attr"][ "boot_disk_gb" ] disk_size = metadata["inputs"]["runtime_attr"]["disk_gb"] elif "disk_size" in metadata["inputs"]: disk_size = metadata["inputs"]["disk_size"] return disk_type, float(boot_disk_size) + float(disk_size)
def letter_grade(points, total): """Given an amount of points and the total points availage, returns the corresponding letter grade for the average""" # letter_grade(9, 10) returns 'A' avg = (points / total) * 100 # more specific grades (B-, C+, etc.) can be added to scale, # as long as it remains in ascending order (low to high) scale = [ (60, 'D-'), (64, 'D'), (67, 'D+'), (70, 'C-'), (74, 'C'), (77, 'C+'), (80, 'B-'), (84, 'B'), (87, 'B+'), (90, 'A-'), (94, 'A'), (97, 'A+') ] # Grade is F defualt grade = 'F' for value in scale: if avg >= value[0]: grade = value[1] return grade
def create_textarea(i): """ Input: { (cols) - columns, 50 by default (rows) - raws, 10 by default (spellcheck) - if !='yes', skip (name) - name (value) - default value } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 html - html for the form } """ cols=i.get('cols','') if cols=='': cols='50' rows=i.get('rows','') if rows=='': rows='10' h='<textarea cols="'+cols+'" rows="'+rows+'"' sp=i.get('spellcheck','') if sp!='yes': h+=' spellcheck="false"' name=i.get('name','') if name!='': h+=' name="'+name+'"' h+='>\n' value=i.get('value','') if value!='': h+=value h+='</textarea>' return {'return':0, 'html':h}
def get_ttt(jd:float): """ Get Julian centuries. Args: jd (float): Julan day number. Returns: (float): Julian centuries. """ return (jd - 2451545.0) / 36525.0
def exponential_decay(step, rate, decay_steps, start_step=0): """A standard exponential decay, scaling the learning rate by :obj:`rate` every :obj:`decay_steps` steps. """ return rate ** (max(step - start_step + decay_steps, 0) // decay_steps)
def calculate_tickers_set(correlated_features_filtered): """ calculate and return unique tickers in SET based on given `correlated_features_filtered` DICT Parameters ------------------------------------------------ `correlated_features_filtered`: DICT (object, generated as result of `collect_corr_tickers` Returns ------- SET """ total_tickers = set() for key, value in correlated_features_filtered.items(): total_tickers.update([key] + value) return len(total_tickers)
def checkLostPackets(expected_packet, recv_packet, packets_lost): """ Preforms checks for lost packets """ if expected_packet != recv_packet: packets_lost = packets_lost + (recv_packet - expected_packet) expected_packet = recv_packet + 1 else: expected_packet = expected_packet + 1 return expected_packet, packets_lost
def isunauthenticated(f): """Checks to see if the function is marked as not requiring authentication with the @unauthenticated decorator. Returns True if decorator is set to True, False otherwise. """ return getattr(f, 'unauthenticated', False)
def format_headers(headers): """ Return a list of column names formatted as headers :param headers: :return list: """ new_headers = [] for h in headers: h: str = h if '_' in h: h = h.replace('_', ' ') # if 'id' in h: # h = h.replace('id', '') h = h.strip() h = h.capitalize() if h == 'Power output': h = 'Power output (kW)' elif h == 'Ship type': h = 'Type' elif h == 'Ship status': h = 'Status' elif h == 'Speed': h = 'Speed (kn)' new_headers.append(h) return new_headers
def add_filter(req_filter, request=''): """ Parameters ---------- req_filter : str string shaped like "key":"val" or "key":{"op":"val"} request : str (optional) request string shaped like: {"key1":{"op1":"val1"}[,"key2":{"op2":"val2"}]*} Returns ------- str a string shaped like {["key_i":{"op_i":"val_i"},]*, "key", "val"} """ if request == "": return "{%s}" % req_filter else: return request[:-1] + ',' + req_filter + '}'
def num_owned_indices_from_cyclic(dd): """Given a dimension dictionary `dd` with dist_type 'c', return the number of indices owned. """ block_size = dd.get('block_size', 1) global_nblocks, partial = divmod(dd['size'], block_size) local_nblocks = ((global_nblocks - 1 - dd['proc_grid_rank']) // dd['proc_grid_size']) + 1 local_partial = partial if dd['proc_grid_rank'] == 0 else 0 local_size = local_nblocks * dd['block_size'] + local_partial return local_size
def extract_values(obj, key): """ Pull all values of specified key from nested JSON. Args: obj(dict): The JSON object key(str): The JSON key to search for and extract Returns: list of matching key values """ array = [] def extract(obj, array, key): """Recursively search for values of key in JSON tree.""" if isinstance(obj, dict): for k, v in obj.items(): if k == key: array.append(v) if isinstance(v, (dict, list)): extract(v, array, key) elif isinstance(obj, list): for item in obj: extract(item, array, key) return array results = extract(obj, array, key) return results
def convert_window_size(ws): """ This function converts the shorthand input window size and returns an integer of the same value (i.e. "100kb" == int(100000)) Args: ws: window size (bp/kb/mb) Returns: Integer of window size """ window_size = None if "bp" in ws: window_size = int(ws.strip("bp"))*100 elif "kb" in ws: window_size = int(ws.strip("kb"))*1000 elif "mb" in ws: window_size = int(ws.strip("mb"))*10000 return window_size
def transferCounts( coverage_info, transcript_info, transcriptome_out_bed ): """ """ for transcript_id in transcript_info: for eachexon in transcript_info[transcript_id]["exons"]: start, end = int( eachexon.split( "_" )[0] ), int( eachexon.split( "_" )[1] ) exon_coverage = [] for i in range( start, end + 1 ): try: exon_coverage.append( coverage_info[transcript_info[transcript_id]["chromosome"]][i] ) except KeyError: # print(transcript_id,"KEYERROR",i,start,end) exon_coverage.append( 0 ) transcript_info[transcript_id]["bed_cov"].append( exon_coverage ) return transcript_info
def get_xi_from_ARPS_simulation(simulation): """Extract xi from full name of ARPS files""" [topo_or_wind, N, dx, xi, sigma, ext] = simulation.split('_') xi = xi.split('xi')[1] return (xi)
def width0diameter(b, d): """Calculate rate width over diameter. :param b (float): impeller width [m] :param d (float): diameter [m] :return bd (float): width over diameter """ bd = b / d return bd
def search_required(runlvl_zma, es_keyword_dct): """ Determine if MechDriver needs to search for the saddle point of some reaction. If a saddle point was found, the function assessess whether the overwrite keyword has been set to True, which would requiring a new search from scratch. :param savefs_dct: filesystem objects under SAVE prefix :type savefs_dct: dict[str: autofile.fs objects] :rtype: bool """ overwrite = es_keyword_dct['overwrite'] if runlvl_zma is None: print('Since no transition state found in filesys', f'at {es_keyword_dct["runlvl"]} level', 'proceeding to find it...') _run = True else: if overwrite: print('\nUser specified to overwrite transition state search.' 'Redoing task...') _run = True else: print('\nSince transition state found and saved previously,', 'proceeding to next task.') _run = False return _run
def parse_numerical_dict(data, r=2): """Converts dict with numerical values to consistent `r` rounded float values.""" return {k: round(float(v), r) for k, v in data.items()}
def _is_student(user_item, univer_ids): """Does the user profile indicate that he studied at a university?""" # field universities if 'universities' in user_item: for i in user_item['universities']: if i['id'] in univer_ids: return True # field occupation if 'occupation' in user_item: occ = user_item['occupation'] if occ['type'] == 'university': if occ['id'] in univer_ids: return True return False
def _alpha_to_digit(alpha): """Translates A to 0, B to 1, etc. So "AB" is value 27. """ if type(alpha) is int: return alpha if not alpha.isalpha(): raise ValueError('column name "%s" is malformed' % alpha) column = 0 for c in alpha.lower(): v = ord(c) - ord('a') + 1 column = column * 26 + v return column - 1
def check_msg(msg): """ Check if message contains Error or Warning for run_analysis functions. Args: msg (str): Message from server/client code Returns: err (bool): True for there is an error msg (str): Message informing the user if the program is running properly or there is an error or warning. """ err = bool(0) if 'Error' in msg: err = bool(1) elif "Warning" in msg: msg = 'Success! Warning: Image already exists. ' \ 'Processing ran on existing image' else: msg = 'Image saved successfully' return err, msg
def render_pressure(data, query): """ pressure (P) """ answer = data.get('pressure', '') if answer: answer += 'hPa' return answer
def number_to_name(num): """ Helper function that converts a number (0 to 4) into a name representing one of the items of the program """ name = "" if num == 0: name = "rock" elif num == 1: name = "Spock" elif num == 2: name = "paper" elif num == 3: name = "lizard" elif num == 4: name == "scissors" return name
def range_to_range_linear(value, min_old_range, max_old_range, min_new_range, max_new_range): """ Transform a value from one range to another linearly. :param value: quantity to be transformed :param min_old_range: min :param max_old_range: max :param min_new_range: min :param max_new_range: max """ if value < min_old_range: value = min_old_range elif value > max_old_range: value = max_old_range temp = ((value - min_old_range) / (max_old_range - min_old_range)) * \ (max_new_range - min_new_range) + min_new_range return temp
def has_correct_parentheses(string): """Funkce otestuje, zdali zadany retezec obsahuje spravne ozavorkovani, tedy pred kazdou uzaviraci zavorkou musi byt prislusna oteviraci. Resi se pouze zavorky ( ). Vraci True v pripade spravneho ozavorkovani, jinak False. """ opened = 0 for i in range(len(string)): if string[i] == '(': opened += 1 if string[i] == ')': opened -= 1 if opened == 0: return True return False
def get_option_list(settings, key, default=None): """ Return string `settings` value as `list` split by lines :param settings: settings dictionary :param key: dictionary key :param default: default value if `key` is not in `settings` :return: settings value :rtype: str|list[str] """ if key not in settings: return default value = settings[key] lines = value.splitlines() return list(filter(None, lines))
def _create_input_kwargs_from_args_spec( args_key, args_spec, validator): """Helper function to return kwargs for most model inputs. Args: args_key: The args key of the input from which a kwargs dict is being built. args_spec: The ARGS_SPEC object to reference. validator: The validator callable to provide to the ``validator`` kwarg for the input. Returns: A dict of ``kwargs`` to explode to an ``inputs.GriddedInput`` object at creation time. """ model_spec = args_spec['args'] return { 'args_key': args_key, 'helptext': model_spec[args_key]['about'], 'label': model_spec[args_key]['name'], 'validator': validator, }
def check_point(x_point): """ check point validity """ # set zero for non_floats try: return float(x_point) except Exception: return 0
def applyF_filterG(L, f, g): """ Assumes L is a list of integers Assume functions f and g are defined for you. f takes in an integer, applies a function, returns another integer g takes in an integer, applies a Boolean function, returns either True or False Mutates L such that, for each element i originally in L, L contains i if g(f(i)) returns True, and no other elements Returns the largest element in the mutated L or -1 if the list is empty """ new_L = [L[i] for i in range(len(L)) if g(f(L[i])) == True] L[:] = new_L if len(L) > 0: return max(L) else: return -1
def extract(entrytext, lchar, rchar): """extracts all the textblocks surrounded by lchar and rchar from entrytext""" def indexlist(entrytext, char): """Creates a list of the indexes of segments to be extracted """ rl_temp = [] indexfrom = 0 for a_temp in range(entrytext.count(char)): indexat = entrytext.index(char) rl_temp.append(indexat+indexfrom) indexfrom += indexat+len(char) entrytext = entrytext.split(char, 1)[1] return rl_temp def isregular(entrytext, lchar, rchar): """checks to see if the text has an equal number of properly nested lchar and rchar """ llist = indexlist(entrytext, lchar) rlist = indexlist(entrytext, rchar) returnlist = [] if len(llist) != len(rlist) or not llist: return False rlist = iter(indexlist(entrytext, rchar)) for a_temp in llist: returnlist.append(a_temp) returnlist.append(next(rlist)) if returnlist == sorted(returnlist): return True return False if not isregular(entrytext, lchar, rchar): return [] llist = indexlist(entrytext, lchar) literator = iter(llist) riterator = iter(indexlist(entrytext, rchar)) returnlist = [] for a_temp in range(len(llist)): returnlist.append(entrytext[next(literator) +len(lchar): next(riterator)]) return returnlist
def filter_list(lst, vals): """Filter a list by vals. Args: lst (dict): The dictionary to filter. Returns: string (dict): The filtered dict. """ if any([not lst, not isinstance(lst, list), not isinstance(vals, list)]): return lst return list(set(lst).difference(set(vals)))
def extract_quoted(p_string, p_delim=b'"'): """ Used by play list extracting in pandora module. Discard characters before first p_delim extract chars between p_delim chars return all chars after second p_delim """ l_string = p_string l_1st = l_string.find(p_delim) l_2nd = l_string.find(p_delim, l_1st + 1) l_string = p_string[l_1st + 1:l_2nd].decode('utf-8') l_rest = p_string[l_2nd + 1:] return l_string, l_rest
def local(name, catalog=None): """ Create a session spec for connecting to a store on the local server. :param name: Store name. :type name: string :param catalog: Catalog name (None = root catalog). :type catalog: string :return: A session spec string. :rtype: string """ if catalog: return "<%s:%s>" % (catalog, name) else: return "<%s>" % name