content
stringlengths
42
6.51k
def sklearn_model_2_file_name(result_dir): """Model file name for trained sklearn model""" return '{0}/model_2.pkl'.format(result_dir)
def _resolve_option(option_entity, selected_dish, selected_restaurant): """ Given an option entity that could have many potential resolved values (each representing a unique customization option for a specific restaurant's dish), pick the most likely knowledge base entry for the option. Here, we choose the first option candidate that is compatible with the given dish. Args: option_entity (dict): An option entity with potentially many candidate resolved values. selected_dish (dict): Knowledge base entry for the selected dish. selected_restaurant (dict): Knowledge base entry for the selected restaurant. Returns: dict: The resolved knowledge base entry corresponding to the given option entity. """ # Can't do anything if there are no candidate values to choose from (i.e. if the NLP Entity # Resolver couldn't find any potential KB entries that matched with this entity). if 'value' not in option_entity: return None # Get all the potential resolved values for the given option entity. Each candidate represents # a different entry in the knowledge base, corresponding to a specific option for a specific # restaurant's dish. We use information about the selected dish to identify the correct # option from this candidate list. option_candidates = [value for value in option_entity['value']] # Next, get all the options that are listed for the selected dish on the restaurant's menus. all_option_groups = [groups for menu in selected_restaurant['menus'] for groups in menu['option_groups']] dish_option_groups = [group for group in all_option_groups if group['id'] in set(group_ids for group_ids in selected_dish['option_groups'])] dish_options = {option['id']: option for group in dish_option_groups for option in group['options']} # Finally, choose the first candidate that's a valid dish option listed on the menu. return next((dish_options[oc['id']] for oc in option_candidates if oc['id'] in dish_options), None)
def _divides(div, n): """ returns True if and only if div divides n which is equivalent to n = k*div with k an integer. """ return (n % div == 0)
def clean_line( line ): """ clean and classify the line, return tuple = ( cleaned_line, line_ref = str_ref, line_type ) # in flux check it out """ line_type = "tweet" # bad empty, retweet.... working on it line = line.encode( encoding='UTF-8', errors='replace') # do this in the read?? does not seem to work line = str( line ) # should these be strip line = line.replace("\n", " ") line = line.replace("/n", " ") # but still see them line_parts = line.split( ",") # seems to be way file is delimited this gets us just the tweat if len( line_parts ) < 7 : line_type = "bad" return( "", "", line_type ) #print( f"line_parts {len(line_parts)}" ) line_ref = line_parts[6] line_tweet = line_parts[1] if line_tweet.startswith( "RT"): # be careful where we lower line_type = "retweet" # get rid of the RT ?? line_tweet = line_tweet.lower() return( line_tweet, line_ref, line_type )
def remove_suffix(string, suffix): """ This funtion removes the given suffix from a string, if the string does indeed end with the prefix; otherwise, it returns the string unmodified. """ # Special case: if suffix is empty, string[:0] returns ''. So, test # for a non-empty suffix. if suffix and string.endswith(suffix): return string[:-len(suffix)] else: return string
def number_to_pattern(index: int, k: int) -> str: """Convert an integer to its pattern representation Arguments: index {int} -- integer to convert to pattern k {int} -- size of pattern Returns: str -- pattern Example: >>> number_to_pattern(11, 3) 'AGT' """ symbols = ["A", "C", "G", "T"] if k == 1: return symbols[index] prefix_index = index // 4 r = index % 4 symbol = symbols[r] prefix_pattern = number_to_pattern(prefix_index, k-1) return prefix_pattern + symbol
def check_sites(cleaned_data): """ returns True if sites params same as parent or children objects (instance have same sites id as children and parent) or if site params of instance have no sense (instance, parent or children have no sites params,) return False if site params have not intersection with parent/children of instance (did not have SAME site id) :return: bool """ result = True if cleaned_data.get('sites', None): instance_sites = set(cleaned_data['sites'].all().values_list('id', flat=True)) if cleaned_data.get('parent', None) and cleaned_data['parent'] is not None \ and hasattr(cleaned_data['parent'], 'sites') and cleaned_data['parent'].sites is not None: parent_sites = set(cleaned_data['parent'].sites.all().values_list('id', flat=True)) result = bool(instance_sites.intersection(parent_sites)) return result
def get_expected_python_safety() -> dict: """expected python object""" return { "REQUIREMENTS_FILES": ["default-requirements.txt"], "quick": {"REQUIREMENTS_FILES": ["small-requirements.txt"]}, "slow": {"REQUIREMENTS_FILES": ["big-requirements.txt"]}, }
def IsLeapYear(someyear): """Adjust soome year is or not a leap year. Args: someyear: for adjust year. Returns: if it is a leap year, then True, else False. """ if (someyear % 4) == 0: if (someyear % 100) == 0: if (someyear % 400) == 0: return True else: return False else: return True else: return False
def div_point(p, d): """ divide point p by d """ sp = [] for i in range(3): sp.append(p[i]/d) return sp
def _percent_to_integer(percent: str) -> int: """ Internal helper for converting a percentage value to an integer between 0 and 255 inclusive. """ return int(round(float(percent.split("%")[0]) / 100 * 255))
def resolve_blocks(source: str): """This is a dumb lexer that turns strings of text with code blocks (squigly braces) into a single long string separated by semicolons. All code blocks are converted to strings recursively with correct escaping levels. The resulting string can be sent to break_commands to iterate over the commands.""" result = [] in_brace = False inside_quotes = False i = 0 lineno = 1 while i < len(source): if not inside_quotes: if source[i] == '"': inside_quotes = True result.append("\\" * int(in_brace) + '"') elif source[i] == "{" and not in_brace: result.append('"') in_brace = True elif source[i] == "}": if not in_brace: raise ValueError(f"line {lineno}: mismatched closing brace") in_brace = False result.append('"') elif source[i] == "\\": result.append("\\" * (int(in_brace))) elif source[i] == "\n" and in_brace: result.append("\\n") elif source[i] == "#": # Comment while i < len(source) and source[i] != "\n": i += 1 else: result.append(source[i]) else: if source[i] == '"': inside_quotes = False result.append("\\" * int(in_brace) + '"') elif source[i] == "\\": result.append("\\" * (in_brace + 1)) elif source[i] == "\n": raise ValueError(f"line {lineno}: newlines cannot appear in strings") else: result.append(source[i]) if source[i] == "\n": lineno += 1 i += 1 if in_brace: raise ValueError("mismatched braces") if inside_quotes: raise ValueError("missing ending quote") return "".join(result).split("\n")
def modify_data(data): """Modify notification data dictionary. For easier use in templates, it joins the due_in and due today fields together. Args: data (dict): notification data. Returns: dict: the received dict with some additional fields for easier traversal in the notification template. """ data["due_soon"] = {} if "due_in" in data: data["due_soon"].update(data["due_in"]) if "due_today" in data: data["due_soon"].update(data["due_today"]) # combine "my_tasks" from multiple cycles data["cycle_started_tasks"] = {} if "cycle_data" in data: for cycle in data["cycle_data"].values(): if "my_tasks" in cycle: data["cycle_started_tasks"].update(cycle["my_tasks"]) return data
def _parse_color_string(colors, n=None, r=False, start=0, stop=1): """ Parses strings that are formatted like the following: 'RdBu_r_start=0.8_stop=0.9_n=10' 'viridis_start0.2_r_stop.5_n20' 'Greens_start0_n15' """ if isinstance(colors, str): color_settings = colors.split('_') colors = color_settings[0] for setting in color_settings[1:]: setting = setting.replace('=', '') if setting.startswith('n') and setting[1].isdigit(): n = int(setting[1:]) elif setting == 'r': r = True elif setting.startswith('start'): start = float(setting[5:]) elif setting.startswith('stop'): stop = float(setting[4:]) return colors, n, r, start, stop
def calc_prefix(_str, n): """ Return an n charaters prefix of the argument string of the form 'prefix...'. """ if len(_str) <= n: return _str return _str[: (n - 3)] + '...'
def iterate(function,value): """Apply a function of a value repeatedly, until the value does not change any more.""" new_value = function(value) while new_value != value: value = new_value ##debug("%s" % str(value).replace(" ","")) new_value = function(value) return value
def unindent(lines): """ Remove common indentation from string. Unlike doctrim there is no special treatment of the first line. """ try: # Determine minimum indentation: indent = min(len(line) - len(line.lstrip()) for line in lines if line) except ValueError: return lines else: return [line[indent:] for line in lines]
def filterEventList(events, LocationClient): """Take the fetched events list and compare if it is relevant to the location returns filtered event list """ LocalEvents = [] # Will contain all the relevant future events after filtering by location [ datetime, 'summary'] for event in events: #print( datetime.strptime( event['start']['dateTime'] ) ) if LocationClient in event['location'].lower(): LocalEvents.append( {'starttime':event['start'], 'summary':event['summary']}) return LocalEvents
def is_in_list(element, the_list): """ Prints boolean value of whether the element is in the list. * element can be a singleton value or a list of values. * the_list can be a single list or a list of lists. """ return any([element in the_list]) or any([element in row for row in the_list])
def str_for_containers(self): """ Nice printing for types and method containers. Containers must have _container attribute containing all elements to be printed. """ cont = getattr(self, '_container', None) if cont is None: return '' res = '' for child in cont: descr = str(getattr(getattr(self, child, None), '__doc__', None)) if len(descr) > 100: descr = descr[:100] + '...' descr = descr.replace('\n', '\n\t') res = res + '\n%s\n\t%s' % (child, descr) res = res[1:] return res
def convert_to_seconds(column_name, row): """converts the value in the column to a value in seconds It will interpret the value as hours """ value = float(row[column_name])*60*60 return value
def setr(registers, opcodes): """setr (set register) copies the contents of register A into register C. (Input B is ignored.)""" return registers[opcodes[1]]
def LineStyleRev(argument): """Dictionary for reverse switching the line style arguement""" switcher = { '-' : 'Line', '--':'Dash', '-.' : 'DashDot', ':' : 'Dotted' } return switcher.get(argument, 'Line')
def grouped_list_to_list(grouped): """ Takes a list of form [['group', 'item'], ...] Returns ['# group', 'item', ...] """ result = [] group = '' for pair in grouped: if not pair[0] == group: group = pair[0] result.append('# ' + group) result.append(pair[1]) return result
def combination_gen(ngrams): """ Returns combinations for truncated expressions """ args = [] if len(ngrams) > 1: for item1 in ngrams[0]: for item2 in ngrams[1]: if len(ngrams) == 2: args.append([item1, item2]) if len(ngrams) == 3: for item3 in ngrams[2]: args.append([item1, item2, item3]) else: for item in ngrams[0]: args.append([item]) return args
def sum_of_smaller_squares_5(n: int) -> int: """Returns the sum of the squares of all the positive integers smaller than n. """ return sum(num * num for num in range(n - 1, 0, -1))
def iscsi_portal_with_port(address): """Add default port 3260 to iSCSI portal :param address: iSCSI portal without port :return: iSCSI portal with default port 3260 """ return "%(address)s:3260" % {"address": address}
def ipaddress_to_asn_lookup(f_global_asndb_routeviews, flow_sa): """ Execute lookup AS number for IPs via route """ try: prefix_lookup_result = f_global_asndb_routeviews.lookup(flow_sa) flow_sa_origin_asn = prefix_lookup_result[0] except: flow_sa_origin_asn = None return flow_sa_origin_asn
def gcd(x: int, y: int) -> int: """Greatest Common Divisor Parameters: x (int): Number y (int): Number Returns: int: Result """ while y > 0: x, y = y, x % y return x
def getattrs(obj, attr_names=[], alias={}): """Get attribute values as a dict. obj can be any object. attr_names should be attribute names. alias is an attr_name:alias dict of attributes that should be renamed. Good for pulling of initial form data out of models objects. """ return dict((alias.get(attr, attr), getattr(obj, attr)) for attr in attr_names)
def assign_survey(night, conf): """ Takes a desi production configuration (yaml) dictionary and determines the survey corresponding to a given night based on the contents of the conf dictionary, if psosible. Otherwise returns None. Args: night, int. The night you want to know the survey it corresponds to. conf, dict. Dictionary that returned when the configuration yaml file was read in. Returns: survey, str. The survey the night was taken under, according to the conf file. """ for survey in conf['DateRanges']: first, last = conf['DateRanges'][survey] if night >= first and night <= last: return survey else: return None
def size_from_shape(shape): """Returns size from the shape sequence """ size=1 for d in shape: size*=d return size
def calc_inertialshapefactor(IA, IB, IC): """ Shape descriptor based on PMI Cannot be calculated for plannar surfaces """ Si = IB / (IA*IC) return Si
def biground(value, base=5): """ >>> biground(7) 10 >>> biground(11.0) 15 """ return int(base * round(float(value) / base)) + base return f
def ignore_gt(gt_anno, index, difficulty): """ Indicates whether to ignore GT sample Args: gt_anno [dict]: Ground truth annotation index [int]: GT sample index difficulty [int]: Difficulty index Returns ignore [bool]: Ignore flag """ # Compute ignore ignore = False if (gt_anno["difficulty"][index] == 0): ignore = True elif (gt_anno["difficulty"][index] > difficulty): ignore = True return ignore
def find_sqltype(val): """ Find sqlite data type which matches the type of `val`. Parameters ---------- val : any python type Returns ------- sqltype : str String with sql type which can be used to set up a sqlile table """ mapping = {\ type(None): 'NULL', int: 'INTEGER', float: 'REAL', # 'FLOAT' also works str: 'TEXT', memoryview: 'BLOB'} for typ in mapping: if isinstance(val, typ): return mapping[typ] raise Exception("type '%s' unknown, cannot find mapping " "to sqlite3 type" %str(type(val)))
def cnum1(s): """ x[y] -> y z -> z """ p1 = s.find('[') if p1 < 0: return s p2 = s.find(']') return s[p1+1:p2]
def from_sexagesimal(x, hours=False): """ Given string of the form "dd:mm:ss.sss" or "hh:mm:ss.sss" (when hours=True), convert to decimal degrees. """ w = x.split(':') w[0] = w[0].strip() s = 1. if w[0][0]=='-': s=-1. w[0] = w[0][1:] y = 0. c = 1. for yy in w: y += float(yy) * c c /= 60. if hours: y*= 15. return y*s
def reverseComplement(seq, alphabet='ACGT'): """ Returns the reverse complement of nucleic acid sequence input. """ compl= dict(zip('ACGTNRYWSMKBHDV', 'TGCANYRWSKMVDHB')) return ''.join([compl[base] for base in seq.upper().replace('U', 'T')])[::-1]
def ensure_list(specifier): """ if specifier isn't a list or tuple, makes specifier into a list containing just specifier """ if not isinstance(specifier, list) and not isinstance(specifier, tuple): return [specifier,] return specifier
def record_to_header(record, delimiter=None, name_only=False): """Builds a header for a given record.""" if name_only: return record["name"] fields = ["name", "organism", "scaffold", "start", "end"] values = [str(record[field]) for field in fields] if delimiter: return delimiter.join(values) return "{} [organism={}] [scaffold={}:{}-{}]".format(*values)
def first_or_default(items, default=None): """ Helper method to fetch first element of list (if exists) """ for item in items: return item return default
def parse_instagram_video_embed(msg, width="200px", height="200px"): """ need to put in proper video embed here """ video_url = msg['videos']['standard_resolution']['url'] picture_url = msg['images']['standard_resolution']['url'] video_str = """<video controls preload="none" width="{}" height="{}" poster="{}"> <source src="{}" type='video/mp4' /> </video>""".format( width, height, picture_url, video_url) return video_str
def get_url_from_formula(formula): """Return URL from the given HYPERLINK formula. Args: formula (str): HYPERLINK formula. Returns: str: Resource URL. """ return formula.split('",')[0][12:]
def job(priority, properties): """Creates a new job with the given priority and properties.""" return { "priority": priority, "properties": properties, }
def get_header(headers, header_name): """ :return: The header with the given name as a tuple with the name and value, or None if there was no such header. """ if headers is None: return None for name, value in headers.items(): if name.lower() == header_name.lower(): return name, value return None
def count_frequency(word_list): """ Count the frequency of each word in the list by scanning the list of already encountered words. """ L = [] for new_word in word_list: for entry in L: if new_word == entry[0]: entry[1] = entry[1] + 1 break else: L.append([new_word, 1]) return L
def complement(template_strand): """ Computes the complement strand of the given template strand. Parameters ---------- template_strand : string The template strand we are evaluating. Returns ------- complement_strand : string The resulting string after finding the complement for each nucleotide. """ # Empty string to store the complement of the template strand complement_strand = '' complement_dict = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'} # For each nucleotide in the sequence, add its complement to a new string for char in template_strand: # Append the complement of the current nucleotide using our dictionary complement_dict complement_strand += complement_dict[char] return complement_strand
def get_row_col(mouse_x, mouse_y): """ Converts an x, y screen position into a row, col value. """ # Note: the top row is row=0 (bottom row=2), left col is col=0 (right col=2) spacing_x = 86 + 8 spacing_y = 98 + 5 top_y = 50 left_x = 50 return (mouse_y - top_y) // spacing_y, (mouse_x - left_x) // spacing_x
def p1evl(x, coef, N): """ Evaluates the given polynomial of degree <tt>N</tt> at <tt>x</tt>. Evaluates polynomial when coefficient of N is 1.0. Otherwise same as <tt>polevl()</tt>. Coefficients are stored in reverse order. The function <tt>p1evl()</tt> assumes that <tt>coef[N] = 1.0</tt> and is omitted from the array. Its calling arguments are otherwise the same as <tt>polevl()</tt>. <p> """ ans = x + coef[0] for i in range(1, N): ans = ans * x + coef[i] return ans
def class_description(cl, maxlength=80, desc_attribute="description"): """ Returns a short description of a class, based on a description class attribute (if present) and the class path: Examples (maxlength = 45):: "Short description (path.to.the.class)" "This is a longer description (path...class)" Parameters ---------- cl : class The target class. maxlength : int Max length of the returned description. desc_attribute : str Name of the class attribute containing the description. """ length = maxlength def shorten(string, maxlength): # Helper function that operates the truncation if string.startswith("<class '"): # Usual str(type) of the form <class 'foo.bar.baz'>: # Remove wrapping and truncate, giving precedence to extremes. words = string[8:-2].split(".") num_words = len(words) words_with_priority = [ # from the out inwards, precedence to the left: 0 2 ... 3 1 (words[i], min(2*i, 2*num_words - 2*i - 1)) for i in range(num_words) ] for threshold in range(num_words, 0, -1): string = ".".join( (word if priority < threshold else "") for (word, priority) in words_with_priority ) if len(string) <= maxlength: return string # fallback when every dot-based truncation is too long. return shorten(words[0], maxlength) else: # Custom description: just truncate. return string if len(string) <= maxlength \ else string[:maxlength-3]+"..." if getattr(cl, desc_attribute, None) is not None: description = shorten(getattr(cl, desc_attribute), length) # if there's enough room left, add the class name in brackets. length -= len(description) + 3 if length >= 10: description += " (" + shorten(str(cl), length) + ")" else: description = shorten(str(cl), length) return description
def cont_search_data(raw_data): """ Function for interactive search. Returns organization number and name if partial name is matches :param raw_data: json data filtred by first letters :return: list """ data_to_return = [] for el in range(len(raw_data['data'])): data_to_return.append([raw_data['data'][el]['organisasjonsnummer'], raw_data['data'][el]['navn']]) return data_to_return
def add_extension_to_file(file: str, extension: str) -> str: """Add extension to a file. :param file: File path to which to add an extension :param extension: Extension to append to file (e.g., mp3) :return: File with extension """ return file + "." + extension
def flist(start, stop, step): """ Takes in: start, stop, step = integers or floats Returns: zlist = list start to stop with step as increment """ # print('-flist has been called') i = 0 zlist = [start] while zlist[i] < stop: nextvalue = zlist[i] + step zlist.append(nextvalue) i += 1 continue return zlist
def down_diagonal_contains_only_xs(board): """Check whether the going down diagonal contains only xs""" for i in range(len(board)): if board[i][i] != "X": return False return True
def fixextensions(peeps, picmap, basedir="."): """replaces image names with ones that actually exist in picmap""" fixed = [peeps[0].copy()] missing = [] for i in range(1, len(peeps)): name, ext = peeps[i][2].split(".", 1) if (name in picmap): fixed.append(peeps[i].copy()) fixed[i][2] = picmap[name] else: missing.append(i) return fixed, missing
def convert(im_height, im_width, box): """ Converts the box from normalized coordinates to absolute pixel values. Arguments: im_height: int The image height, in pixels im_width: int The image width, in pixels box: (int, int, int, int) (left, right, top, bot) box coordinates, normalized (0 <= x <= 1). Returns: box: (int, int, int, int) (left, right, top, bot) == (ymin, xmin, ymax, xmax) box coordinates, absolute (0 <= x <= image_dim). """ return (int(box[1] * im_width), int(box[3] * im_width), int(box[0] * im_height), int(box[2] * im_height))
def get_policy_length(control_str_ids, n_control_steps): """ get the length of the policy. ASSUMPTION - PUMP controls are binary 1 BIT, ORIFICE and WEIR are 3 BITS returns: [int] the number of total control decisions in the policy """ pol_len = 0 for ctl_id in control_str_ids: ctl_type = ctl_id.split()[0] if ctl_type == 'ORIFICE' or ctl_type == 'WEIR': pol_len += 3*n_control_steps elif ctl_type == 'PUMP': pol_len += n_control_steps return pol_len
def update_labels_and_tags(dataset_id, existing_labels_or_tags, new_labels_or_tags, overwrite_ok=False): """ Updates labels or tags in dataset if not set or needing to be updated or overwrites existing labels or tags in the dataset :param dataset_id: string name to identify the dataset :param existing_labels_or_tags: labels already existing on the dataset = Dict[str, str] tags already existing on the dataset = Dict[str, ''] :param new_labels_or_tags: new labels to add to the dataset = Dict[str, str] new tags to add to the dataset = Dict[str, ''] :param overwrite_ok: flag to signal if labels or tags are to be either overwritten (False as default) or updated (True) :raises: RuntimeError if parameters are not specified :raises: RuntimeError if overwrite_ok is false and new value for label is provided :return: a dictionary of new labels or tags """ if not dataset_id: raise RuntimeError("Provide a dataset_id") if not new_labels_or_tags: raise RuntimeError("Please provide a label or tag") # excludes duplicate keys updates = dict(new_labels_or_tags.items() - existing_labels_or_tags.items()) overwrite_keys = updates.keys() & existing_labels_or_tags.keys() if overwrite_keys: if not overwrite_ok: raise RuntimeError(f'Cannot update labels on dataset {dataset_id}' f'without overwriting keys {overwrite_keys}') return {**existing_labels_or_tags, **updates}
def find_vswitch_name_for_id(k2_id, vswitch_map): """ Returns the vSwitch name for a given vSwitch K2 ID :param k2_id: The K2 vSwitch ID :param vswitch_map: The vSwitch map from the HMC Topo :returns: vSwitch Name """ k2_obj = vswitch_map.get(k2_id) if not k2_obj: return None return k2_obj.element.findtext('SwitchName')
def image_check(name: str): """ A function that checks the string end for image file suffix. Args: name (str): the string to test the suffix for Returns: True: if the suffix suffix contains image extension False; if the string suffix does not contain image extension """ name = name.lower() checks = [".jpg", ".png", ".jpeg", ".gif", ".webp"] for i in checks: if name.endswith(i): return True return False
def _clamp(value, minx, maxx): """ Constrain a value between a minimum and a maximum. If the value is larger than the maximum or lower than the minimum, the maximum or minimum will be returned instead. :param int value: The value to clamp. :param int minx: The minimum the value can take. :param int maxx: The maximum the value can take. """ return max(minx, min(maxx, value))
def static_file(file_path): """ [This function will help in serving a static_file] :param file_path [str]: [file path to serve as a response] """ return { "type": "static_file", "file_path": file_path, # this is a hack for now "body": "", "status_code": "200", }
def offsetInRAM( dolOffset, sectionInfo ): # todo: write into dolInitializer method """ Converts the given DOL offset to the equivalent location in RAM once the DOL file is loaded. """ ramAddress = -1 # Determine which section the DOL offset is in, and then get that section's starting offsets in both the dol and RAM. for section in sectionInfo.values(): if dolOffset >= section[0] and dolOffset < (section[0] + section[2]): sectionOffset = dolOffset - section[0] # Get the offset from the start of the DOL section. ramAddress = section[1] + sectionOffset # Add the section offset to the RAM's start point for that section. break return ramAddress
def read_excl_input(excl_file): """ reads in the exclude file :param excl_file: :return: list(excluded_epitopes) """ excluded_epitopes = [] if not excl_file is None: with open(excl_file, "rU") as f: excluded_epitopes = f.read().splitlines() return excluded_epitopes else: return None
def adf_factor(x): """return an adjustment factor based on an fitted curve to the strava gradient adjusted pace curve""" coeff = [0.0017002, 0.02949656] return coeff[0]*x**2 + coeff[1]*x + 1.0
def get_info_file_path(path): """ example: path=/000001/000001493.png returns /000001/000001493.gt_data.txt """ return path[:16] + '.gt_data.txt'
def arb_gain(session, Type='Real64', RepCap='', AttrID=1250202, buffsize=0, action=['Get', '']): """[Arbitrary Gain <real64>] Sets/Gets the Gain for the waveform. Allowable range of values depends upon connection type: Single ended passive mode = 0.170 to 0.250 (best signal fidelity); Single ended amplified = 0.340 to 0.500; Differential = .340 to 0.500.This value is unitless. Set: RepCap=< channel# (1-2) > """ return session, Type, RepCap, AttrID, buffsize, action
def parseTheData(kubra_output_data_json2Dict): """ Parse the provided dict and return the epoch timestamp """ if 'updatedAt' in kubra_output_data_json2Dict.keys(): return kubra_output_data_json2Dict['updatedAt'] else: return 0
def _check_similarity(similarity, length): """Check if measures are similarities or dissimilarities""" if isinstance(similarity, bool): similarity_list = [similarity] * length elif isinstance(similarity, list) and all(isinstance(s, bool) for s in similarity) and len(similarity) == length: similarity_list = similarity else: raise ValueError("the similarity parameter should be a boolean or a list of booleans indicating " "if measures are similarities or dissimilarities, '{}' (type '{}') " "was passed.".format(similarity, type(similarity))) return similarity_list
def search_obj(bucketObj, searchList): """ Search function for s3 objects input: s3 object in boto3, list of items output: s3 object key list """ result=[] for target in searchList: for candidate in bucketObj.objects.all(): if str(candidate.key).find(target) > -1: # if target finds in string result.append(candidate.key) # get key return result
def max_key(words): """Return the item with the maximum key in words. words: dictionary """ return words[max(words.keys())]
def is_numeric(value): """ check whether a value is numeric (could be float, int, or numpy numeric type) """ return hasattr(value, "__sub__") and hasattr(value, "__mul__")
def extract_cands(mystr,candd): """ extract candidate names from _-separated string, increment dict entries """ for c in mystr.split("_"): if c in candd.keys(): candd[c] += 1 else: candd[c] = 1 return candd
def row_to_bbox_coordinates(row): """ Takes a row and returns a dictionary representing bounding box coordinates: (center_x, center_y, width, height) e.g. {'x': 100, 'y': 120, 'width': 80, 'height': 120} """ return {'x': row['xMin'] + (row['xMax'] - row['xMin'])/2, 'width': (row['xMax'] - row['xMin']), 'y': row['yMin'] + (row['yMax'] - row['yMin'])/2, 'height': (row['yMax'] - row['yMin'])}
def is_iterable(x): """Check if x is iterable""" try: iter(x) return True except: return False
def _only_keys(dict_ins, keys): """ Filters out unwanted keys of a dict. """ return {k: dict_ins[k] for k in dict_ins if k in keys}
def flatten(contours): """\ Return a single contour that contains all the points in the given contours. """ return [segment for contour in contours for segment in contour]
def binom_coeff(n): """ Calculate the binomial coefficient (n, 2), i.e. the number of distinct pairs possible in a set of size n :param n: size of set :return: number of pairs """ return int(n * (n-1) / 2)
def tf_sum(q, d, c): """ ID 1 for OHSUMED """ result = 0.0 for w in set(q) & set(d): result += d[w] return result
def _heading_sort_order(msg_line): """ Custom first (i.e. generic False first), then by message i.e. by title """ custom = ' #### ' in msg_line[: 10] ## Custom all start with #### ... return (not custom, msg_line)
def ndistance(p1, p2): """ Calculate eucleidian distance between two points in N-dimensional space NOTE: The two points must have the same number of dimensions, thus having the same shape. Points' dimension is the same as their index. Eg, point a: (2, 4) has two dimensions. ARGUMENTS: - p1 list() Coordinates. Eg, [0.2, 4, ..., n-1, n] - p2 list() Coordinates. Eg, [2, -7, ..., n-1, n] RETURNS: - float() Eucledidian distance between both points. """ return sum([(p1[i] - p2[i])**2 for i in range(len(p1))])**0.5
def _get_dependencies(dependencies): """Method gets dependent modules Args: dependencies (dict): dependent modules Returns: list """ mods = [] for key, val in dependencies.items(): mods.append(val['package']) return mods
def _CheckSuccessful(response): """Checks if the request was successful. Args: response: An HTTP response that contains a mapping from 'status' to an HTTP response code integer. Returns: True if the request was succesful. """ return "status" in response and 200 <= int(response["status"]) < 300
def read_file(filepath): """Open and read file contents. Parameters ---------- filepath : str path to a file Returns ------- str contents of file Raises ------ IOError if file does not exist """ file_data = None with open(filepath, "r") as f: file_data = f.read() return file_data
def expandUrlData(data): """ dict -> a param string to add to a url """ string = "?" # the base for any url dataStrings = [] for i in data: dataStrings.append(i+"="+data[i]) string += "&".join(dataStrings) return string
def separe(values, sep, axis=0): """ Separate values from separator or threshold. :param values: list of values :param sep: peparator value :param axis: axis in each value :return:lists of greater values, list of lesser values """ greater,lesser = [],[] for i in values: if i[axis]>sep: greater.append(i) else: lesser.append(i) return greater,lesser
def la_hoanthien(n): """Kiem tra so hoan thien""" sum = 0 for i in range(1,n): if n%i == 0: sum+=i if sum == n: return True return False
def getTotalTime(segs, countMultipleSpkrs=True): """ This function counts the overall ground truth time after removing the collars. An option allows overlapping speaker time to be double counted or not. Inputs: - segs: the list of ground truth and diarization segments after removing segments within the collars - countMultipleSpeakers: a Boolean option to double count overlapping ground truth speaker time (i.e. all time spoken by ground truth speakers v. time in which ground truth speakers are speaking), default "True" Outputs: - time: aggregate ground truth speaker time in seconds, form: "408.565" (multiple on) or "400.01" (multiple off) """ time = 0 for row in segs: if countMultipleSpkrs == False: time += row['tend'] - row['tbeg'] else: time += (row['tend'] - row['tbeg']) * len(row['name']['oname']) #time = round(time, 3) return time
def post_search_key(post): """Search key for post.""" return '{} {}'.format(post['title'], post['author'])
def info2lists(info, in_place=False): """ Return info with: 1) `packages` dict replaced by a 'packages' list with indexes removed 2) `releases` dict replaced by a 'releases' list with indexes removed info2list(info2dicts(info)) == info """ if 'packages' not in info and 'releases' not in info: return info if in_place: info_lists = info else: info_lists = info.copy() packages = info.get('packages') if packages: info_lists['packages'] = list(packages.values()) releases = info.get('releases') if releases: info_lists['releases'] = list(releases.values()) return info_lists
def get_record_value(record): """Return a list of values for a hosted zone record.""" # test if record's value is Alias or dict of records try: value = [ ":".join( [ "ALIAS", record["AliasTarget"]["HostedZoneId"], record["AliasTarget"]["DNSName"], ] ) ] except KeyError: value = [] for v in record["ResourceRecords"]: value.append(v["Value"]) return value
def _ainvhash(anum,num_of_arrows,dim): """Turns an arrow hash back into the array of arrow directions. Args: anum (int): The arrow hash number. num_of_arrows (int): The number of arrows in the system. dim (int): The number of directions the arrows can point. Returns: arrows (list): The arrow labeling. """ arrows = [0]*num_of_arrows for i in range(num_of_arrows): base = dim**(num_of_arrows-1-i) arrows[num_of_arrows-1-i] = anum//base anum -= base*arrows[num_of_arrows-1-i] return(arrows)
def set_binary_labels(input_examples, positive_label): """ Replaces the class labels with 1.0 or -1.0, depending on whether the class label matches 'positive_label'. Returns an array of tuples, where the first element is the label number and the second is a path to the image file. """ examples = [] for example in input_examples: if example[0] == positive_label: examples.append(("1.0", example[1])) else: examples.append(("-1.0", example[1])) return examples
def drop_unadjusted_fields(mapping): """Drops all fields beyond mapping[0:12], except for the cigar and alignment type field. Args: mapping ([type]): [description] """ # print("mapping before drop:", mapping) # Find the cigar and mapping-type fields. keep_fields = list() for field in range(len(mapping[12:])): field += 12 if mapping[field][:5] == "cg:Z:" or mapping[field][:5] == "tp:A:": keep_fields.append(field) fixed_mapping = mapping[:12] for kept_field in keep_fields: fixed_mapping.append(mapping[kept_field]) # print("mapping after drop:", fixed_mapping) # drop all fields after field 11, except for the cigar. return fixed_mapping
def num_to_int(num): """ Checks that a numerical value (e.g. returned by robot) is an integer and not a float. Parameters ---------- num : number to check Returns ------- integer : num cast to an integer Raises ------ ValueError : if n is not an integer """ if num % 1 == 0: return int(num) else: raise ValueError('Expecting integer. Got: "{0}" ({1})' .format(num, type(num)))
def get_package(version, *args) -> str: """...""" v = version.replace(".", "_") return ".".join(["kuber", f"{v}", *args])
def bubble_sort(array): """ do the bubble sort""" array_len = len(array) for i in range(array_len): # Create a flag that will allow the function to # terminate early if there's nothing left to sort already_sorted = True # Start looking at each item of the list one by one, # comparing it with its adjacent value. With each # iteration, the portion of the array that you look at # shrinks because the remaining items have already been # sorted. for j in range(array_len - i - 1): if array[j] > array[j + 1]: # If the item you're looking at is greater than its # adjacent value, then swap them array[j], array[j + 1] = array[j + 1], array[j] # Since you had to swap two elements, # set the `already_sorted` flag to `False` so the # algorithm doesn't finish prematurely already_sorted = False # If there were no swaps during the last iteration, # the array is already sorted, and you can terminate if already_sorted: break return array
def convert_priority(priority): """ Flips the priority from the interface version to the api version In the user interface, 1 is most important and 4 is least important. In the api, 4 is most important, and 1 is least important. Args: priority (int): The user inputted priority Returns: int The API version of the priority. """ try: return [4, 3, 2, 1][priority - 1] except (IndexError, TypeError): return 1
def det_cat_fct_merge(contab_1, contab_2): """ Merge two contingency table objects. Parameters ---------- contab_1: dict A contingency table object initialized with :py:func:`pysteps.verification.detcatscores.det_cat_fct_init` and populated with :py:func:`pysteps.verification.detcatscores.det_cat_fct_accum`. contab_2: dict Another contingency table object initialized with :py:func:`pysteps.verification.detcatscores.det_cat_fct_init` and populated with :py:func:`pysteps.verification.detcatscores.det_cat_fct_accum`. Returns ------- out: dict The merged contingency table object. """ # checks if contab_1["thr"] != contab_2["thr"]: raise ValueError( "cannot merge: the thresholds are not same %s!=%s" % (contab_1["thr"], contab_2["thr"]) ) if contab_1["axis"] != contab_2["axis"]: raise ValueError( "cannot merge: the axis are not same %s!=%s" % (contab_1["axis"], contab_2["axis"]) ) if contab_1["hits"] is None or contab_2["hits"] is None: raise ValueError("cannot merge: no data found") # merge the contingency tables contab = contab_1.copy() contab["hits"] += contab_2["hits"] contab["misses"] += contab_2["misses"] contab["false_alarms"] += contab_2["false_alarms"] contab["correct_negatives"] += contab_2["correct_negatives"] return contab