content
stringlengths
42
6.51k
def get_command_tokens(arguments): """ Extract tokens from command arguments """ rep = set() if type(arguments) is list: for arg in arguments: rep = rep.union(get_command_tokens(arg)) else: if 'token' in arguments: rep.add(arguments['token']) if 'arguments' in arguments: for arg in arguments['arguments']: rep = rep.union(get_command_tokens(arg)) return rep
def add_channels_to_command(command, channels): """ Add extra channels to a conda command by splitting the channels and putting "--channel" before each one. """ if channels: channels = channels.strip().split() dashc = [] for channel in channels: dashc.append('--channel') dashc.append(channel) return command[:2] + dashc + command[2:] else: return command
def wrap(raw, cutoff): """Return string. Soft wraps a string of text to a specified width. Keyword arguments: raw -- input string cutoff -- integer maximum width in characters """ working = '' outTxt = [] if len(raw) < cutoff: outTxt.append(raw) else: for i in raw.split(): if len(working) + len(i) < cutoff: working += i + ' ' else: outTxt.append(working.rstrip()) working = i + ' ' outTxt.append(working.rstrip()) results = '\n'.join(outTxt) return results
def thousands_separated_int(n: str) -> str: """Insert thousands separators in an int.""" new_int = "" for i, y in enumerate(reversed(n), 1): new_int = y + new_int # For every third digit, insert a thousands separator. if i % 3 == 0 and i != len(n): new_int = "," + new_int return new_int
def int64_parity(x): """ Compute the partity of x. Recursively divide a (64-bit) integer (x) into two equal halves and take their XOR until only 1 bit is left. Parameters ---------- x : int64 Returns ------- int64 """ x = (x & 0x00000000FFFFFFFF) ^ (x >> 32) x = (x & 0x000000000000FFFF) ^ (x >> 16) x = (x & 0x00000000000000FF) ^ (x >> 8) x = (x & 0x000000000000000F) ^ (x >> 4) x = (x & 0x0000000000000003) ^ (x >> 2) x = (x & 0x0000000000000001) ^ (x >> 1) return x & 1
def is_streaming(pattern): """ Determine whether Webdataset is being streamed in or not Very simple for now and kinda hacky Args: pattern: Returns: """ if "pipe" in pattern: return True else: return False
def _get_vm_prop(vm, attributes): """Safely get a property or return None""" result = vm for attribute in attributes: try: result = getattr(result, attribute) except (AttributeError, IndexError): return None return result
def noneorboolorcomplex(s): """Turn empty or 'none' to None, else evaluate to a boolean or complex.""" if s.lower() in ("", "none"): return None elif s.lower() in ("auto", "true", "t", "yes", "y"): return True elif s.lower() in ("false", "f", "no", "n"): return False else: return complex(eval(s, {}, {}))
def fix_style(s): """Minor, general style fixes for questions.""" s = s.replace('?', '') # Delete question marks anywhere in sentence. s = s.strip(' .') if s[0] == s[0].lower(): s = s[0].upper() + s[1:] return s + '.'
def test_get_overtime(hour_out,check_out): """menghitung lembur pegawai""" if check_out > hour_out: return check_out - hour_out else: return ' '
def unpack_response(dhash, deep=100, pid=0): """ iterates through a response which contains nested dictionaries and lists. dhash: dictionary which may be nested. deep: int indicated how deep to print out nested levels. pid : int """ rstr = "" for k2 in dhash.keys(): iii = pid spc = " " * iii rstr += spc + str(k2) + " " + str(type(dhash[k2])) + " : " # UNPACK DICTIONARY if iii < deep and isinstance(dhash[k2], dict): rstr += "\n" iii += 1 rstr += spc rstr += unpack_response(dhash[k2], pid=iii) rstr += "\n" # UNPACK LIST elif isinstance(dhash[k2], list): iii += 1 rstr += "\n---BEGIN LIST---" + str(iii) + "\n" for val in dhash[k2]: if isinstance(val, dict): rstr += unpack_response(val, deep=deep, pid=iii) rstr += "\n" else: rstr += spc + "listval " + str(val) + str(type(val)) + "\n" rstr += "---END LIST---" + str(iii) + "\n" elif isinstance(dhash[k2], str): rstr += spc + dhash[k2] + "\n" elif isinstance(dhash[k2], int): rstr += spc + str(dhash[k2]) + "\n" elif isinstance(dhash[k2], float): rstr += spc + str(dhash[k2]) + "\n" else: rstr += "\n" return rstr
def remove_duplicates_sorted(A): """Remove all duplicates in-place from a sorted array, ruturn the new length. [LC-0026] Examples: >>> remove_duplicates_sorted([1, 1, 2]) 2 >>> remove_duplicates_sorted([0, 0, 1, 1, 1, 2, 2, 3, 3, 4]) 5 >>> remove_duplicates_sorted([2, 3, 3, 3, 6, 9, 9]) 4 >>> remove_duplicates_sorted([2, 2, 2, 11]) 2 """ read_head = write_head = 0 prv = None while read_head < len(A): cur = A[read_head] if cur != prv: A[write_head] = A[read_head] prv = cur write_head += 1 read_head += 1 return write_head
def RunLintOverAllFiles(linter, filenames): """Runs linter over the contents of all files. Args: lint: subclass of BaseLint, implementing RunOnFile() filenames: list of all files whose contents will be linted Returns: A list of tuples with format [(filename, line number, msg), ...] with any violations found. """ lint = [] for filename in filenames: file = open(filename, 'r') if not file: print('Cound not open %s' % filename) continue lines = file.readlines() lint.extend(linter.RunOnFile(filename, lines)) return lint
def mangle_symbol_name(name): """Mangle an external symbol name for use with B.""" return 'b.' + name
def dict_to_tuple_list(my_dict): """ Given a dictionary where each k-v pair is of the form (x, [y]), convert the dictionary into a list of tuples. Example: >>> dict_to_tuple_list({'x': [1, 2, 3], 'y':[4, 5, 6]}) [(x, 1), (x, 2), (x, 3), (y, 4), (y, 5), (y, 6)] """ newList = list() for i in my_dict.keys(): for j in my_dict.get(i): newList.append((i, j)) return newList
def parse_acs_metadata(acs_metadata, groups): """Returns a map of variable ids to metadata for that variable, filtered to specified groups. acs_metadata: The ACS metadata as json. groups: The list of group ids to include.""" output_vars = {} for variable_id, metadata in acs_metadata["variables"].items(): group = metadata.get("group") if group in groups and metadata["label"].startswith("Estimate!!Total"): output_vars[variable_id] = metadata return output_vars
def sanitize_zfill(value): """ Built-in sanitizer which replaces the original value with zeros. """ return None if value is None else "".zfill(len(value))
def to_hyper_geography(dss_geopoint): """ Format a `geo` typed value from DSS to Tableau Hyper :param dss_geopoint: `geo` typed value :return: Tableau Hyper geo value """ return dss_geopoint.lower()
def _gf2mulxmod(a,m): """ Computes ``a * x mod m``. *NOTE*: Does *not* check whether `a` is smaller in degree than `m`. Parameters ---------- a, m : integer Polynomial coefficient bit vectors. Polynomial `a` should be smaller degree than `m`. Returns ------- c : integer Polynomial coefficient bit vector of ``c = a * x mod m``. """ c = a << 1 c2 = c^m if c2 < c: c = c2 return c
def parse_chunks(chunks): """Parse chunks and extract information on individual streams.""" streams = [] for chunk in chunks: if chunk["tag"] == 2: # stream header chunk streams.append( dict( stream_id=chunk["stream_id"], name=chunk.get("name"), # optional type=chunk.get("type"), # optional source_id=chunk.get("source_id"), # optional created_at=chunk.get("created_at"), # optional uid=chunk.get("uid"), # optional session_id=chunk.get("session_id"), # optional hostname=chunk.get("hostname"), # optional channel_count=int(chunk["channel_count"]), channel_format=chunk["channel_format"], nominal_srate=int(chunk["nominal_srate"]))) return streams
def low_field_losses(x, df_low, f1, n): """ Low field losses due to domain wall movement. Returns the frequency domain half width half maximum (HWHM). df_low = HWHM """ f = x return df_low * (1/(f-f1))**n
def instantiate(class_name, *args, **kwargs): """Helper to dynamically instantiate a class from a name.""" split_name = class_name.split(".") module_name = split_name[0] class_name = ".".join(split_name[1:]) module = __import__(module_name) class_ = getattr(module, class_name) return class_(*args, **kwargs)
def find_general_collateral_by_id(collateral_id: int, general_collateral): """Search existing list of general_collateral objects for a matching collateral id.""" collateral = None if collateral_id and general_collateral: for g_collateral in general_collateral: if g_collateral.id == collateral_id and not g_collateral.registration_id_end: collateral = g_collateral return collateral
def sec_to_ts(sec): """ seconds to timestamp :param sec: number of seconds :return: timestamp of the form HH:MM:SS.MIL """ ms = '{:.3f}'.format(sec).split('.')[-1] int_time = int(sec) ss = int_time % 60 int_time = int_time // 60 mm = int_time % 60 hh = int_time // 60 return '{:0>2d}:{:0>2d}:{:0>2d}.{}'.format(hh, mm, ss, ms)
def mul(a : float, b: float=1, c: float=1) -> float: """ returns the product of a, b, and c """ print(a * b * c) return a * b * c
def get_next_available_port(containers_info): """ Find next available port to map postgres port to host. :param containers_info: :return port: """ ports = [container_info.host_port for container_info in containers_info] return (max(ports) + 1) if ports else 5433
def _is_single_matplotlib_color(color): """Returns True if color is a single (not a list) mpl color.""" single_color = False if (isinstance(color, str)): single_color = True elif len(color) == 3 or len(color) == 4: single_color = True for e in color: if not (isinstance(e, float) or isinstance(e, int)): single_color = False return single_color
def _list_values(values, sep): """Returns list of values as '[values[0], values[1], ..., sep values[-1]]': $ _list_values([1, 2, 3], "and") "[1, 2, and 3]" """ values = list(map(repr, values)) if len(values) > 2: values = (", ".join(values[:-1]) + ",", values[-1]) if len(values) == 2: values = (" ".join((values[0], sep, values[1])),) return values[0]
def getImageLink(url, img_url, caption): """ Converts a URL to a Markdown image link. """ return "[![{caption}]({img_url} =16x16)]({url})".format( caption=caption, img_url=img_url, url=url )
def clean_meta_args(args): """Process metadata arguments. Parameters ---------- args : iterable of str Formatted metadata arguments for 'git-annex metadata --set'. Returns ------- A dict mapping field names to values. """ results = {} for arg in args: parts = [x.strip() for x in arg.split("=", 1)] if len(parts) == 2: if not parts[0]: raise ValueError("Empty field name") field, value = parts else: raise ValueError("meta argument isn't in 'field=value' format") if not value: # The `url_file` may have an empty value. continue results[field] = value return results
def filter_for_initial_manifest(objects, manifest): """ Find the initial manifest created at the beginning of a export request. :type objects: list :param objects: List of objects in a particular bucket. :type manifest: str :param manifest: The expected identifier for the initial manifest. :rtype: str :return: The identifier for the initial manifest object. :raises RuntimeError: If the initial manifest is not found. """ for obj in objects: key = obj['Key'].casefold() if key == manifest.casefold(): return key raise RuntimeError('Initial manifest not found.')
def is_percentage(p: str) -> bool: """ Check i f a string represents a percentage :param p: the string to check :return: """ if isinstance(p, str) and p.endswith('%'): return True else: return False
def reshape_coord(center_contours): """ Decomposes list of (x,y) into 2 x and y lists, as follows [ 'xi, yi', 'xi+1, yi+1', 'xn, yn'] -> [xi, xi+1, xn] & [yi, yi+1, yn] """ x, y = [], [] for i, j in enumerate(center_contours[:-1]): x.append(j[0]) y.append(j[1]) return x, y
def Dot2(a, b): """Return the dot product of two 2d vectors, a . b.""" return a[0] * b[0] + a[1] * b[1]
def filter_group_answers(groups, group_filter): """Filter answers from groups using group_filter.""" return [group_filter(group) for group in groups]
def format_template(template, **kwargs): """ Replace {{keys}} elements in template with the matching value in the kwargs dictionnary""" if template is None: return None formated = template for key in kwargs: replacement = kwargs.get(key, "") formated = formated.replace("{{{{{}}}}}".format(key), str(replacement)) return formated
def dot_product(a,b): """ Computes the dot-product of two vectors. Parameters ---------- a : (x,y) tuple b : (x,y) tuple Returns ------- float """ return a[0]*b[0] + a[1]*b[1]
def get_google_drive_id(link): """Get a google drive file id by the file url.""" return link.replace("https://drive.google.com/uc?export=view&id=", "")
def GetMixComponents(mix, mix_dict): """ Args: mix: (str) mix_dict: (mix - > mix_info like listed in other parts of file.) """ if mix not in mix_dict: return None else: return mix_dict[mix]
def create_masks(tokenized): """ Given a list of tokenized sentences, create the corresponding attention masks. - If a token ID is 0, then it's padding, set the mask to 0. - If a token ID is > 0, then it's a real token, set the mask to 1. """ attention_masks = [] for sent in tokenized: att_mask = [int(token_id > 0) for token_id in sent] attention_masks.append(att_mask) return attention_masks
def extend_diff_outliers(diff_indices): """ Extend difference-based outlier indices `diff_indices` by pairing Parameters ---------- diff_indices : array Array of indices of differences that have been detected as outliers. A difference index of ``i`` refers to the difference between volume ``i`` and volume ``i + 1``. Returns ------- extended_indices : array Array where each index ``j`` in `diff_indices has been replaced by two indices, ``j`` and ``j+1``, unless ``j+1`` is present in ``diff_indices``. For example, if the input was ``[3, 7, 8, 12, 20]``, ``[3, 4, 7, 8, 9, 12, 13, 20, 21]``. """ extended_indices = [] for idx in diff_indices: if idx not in extended_indices: extended_indices.append(idx) if idx+1 not in extended_indices: extended_indices.append(idx+1) return extended_indices
def has_bad_strings(text): """Check if the text has any of the bad strings""" bad_strings = ['ab', 'cd', 'pq', 'xy'] # Return True if any of the bad strings are in text, # otherwise return False if any(string in text for string in bad_strings): return True return False
def clean_email(value): """Cleans the email field.""" if value: email = ( value.strip() .replace(" [CERN]", "@cern.ch") .replace("[CERN]", "@cern.ch") ) return email
def clean_and_format_slashed_station_names(subject: str) -> str: """Cleans up slashed station names like Name / Name""" if "/" in subject: subject = " / ".join([i.strip().title() for i in subject.split("/")]) return subject
def l2_rewrite_group_id(ne_id): """ L2 Rewrite Group Id """ return 0x10000000 + (ne_id & 0x0fffffff)
def _filter_distance(barcodes, candidate, min_dist, distance): """Test whether {candidate} can be added to {barcodes} based on the minimum distance between {candidate} and all barcodes in {barcodes}. :arg list barcodes: List of barcodes. :arg str candidate: Candidate barcode. :arg int min_dist: Minimum distance between the barcodes. :arg function distance: Distance function. :returns bool: True if the barcode is clean, False otherwise. """ for i in barcodes: if distance(i, candidate) < min_dist: return False return True
def is_roman_numeral(s): """Return True if s is a roman numeral. s -- string """ if not s: return False # there is a comma in the alphabet, because some categories are # combined, split by comma alphabet = dict.fromkeys("IVXDCM,") for i in s: if i not in alphabet: return False return True
def parse_optionalString(value): """parse an optional string""" if not value: return None return value
def strict_positive_int(integer_string, cutoff=None): """ Cast a string to a strictly positive integer. """ ret = int(integer_string) if ret <= 0: raise ValueError() if cutoff: ret = min(ret, cutoff) return ret
def collisionDoor(playerX, playerY, doorX, doorY, level): """ Method collisionDoor Inputs playerX, playerY, doorX, doorY, level: integers Outputs success, level: integers This method checks for collisions with the door on the board. If there is a collision, 1 is returned along with the current level (0 for no collision) """ if playerX == doorX: if playerY == doorY: print("\nYou made it out of the dungeon alive!") level += 1 return 1, level return 0, level
def stacked_index(size, row, col): """ Convert a pair of indices into a square operator of `size` into a single index into the column-stacked version of the operator. """ return row + size*col
def double(value, name): """casts to an float value""" if isinstance(value, float): return value value = float(value) return value
def check_time(data): """"Return time in seconds, if the time is in format 23:59""" new_data = data.split(':') time = int(new_data[0]) * 3600 + int(new_data[1]) * 60 return time
def is_int_str(string): """ Checks if a given str can be successfully converted to an integer value. :param str string: String to be evaluated. :return: Returns true if the string is integer convertible and false otherwise. :rtype: bool """ try: int(string) return True except ValueError: return False
def pathify_this(key): """Return `True` if the value associated with this key should be pathified.""" pathify_these = {"PATH", "FILE", "DIR"} return bool(key.split("_")[-1] in pathify_these)
def get_env_bool(env, name, default=False) -> bool: """Convert a construction variable to bool. If the value of `name` in `env` is 'true', 'yes', 'y', 'on' (case insensitive) or anything convertible to int that yields non-zero then return ``True``; if 'false', 'no', 'n', 'off' (case insensitive) or a number that converts to integer zero return ``False``. Otherwise, return `default`. Args: env: construction environment, or any dict-like object name: name of the variable default: value to return if `name` not in `env` or cannot be converted (default: False) Returns: the "truthiness" of `name` """ try: var = env[name] except KeyError: return default try: return bool(int(var)) except ValueError: if str(var).lower() in ('true', 'yes', 'y', 'on'): return True if str(var).lower() in ('false', 'no', 'n', 'off'): return False return default
def triangular(n): """Gives the n-th triangle number.""" return n*(n+1)/2
def karatsuba(x, y) -> int: """ multiply x by y using simplified Karatsuba method """ x, y = str(x), str(y) n = len(x) a, b, c, d = int(x[:n // 2]), int(x[n // 2:]), int(y[:n // 2]), int(y[n // 2:]) ac, bd = a * c, b * d return (10 ** n) * ac + (10 ** (n // 2)) * ((a + b) * (c + d) - ac - bd) + bd
def manual_classify_building(building_type): """ Adopt manual classification from the paper and add further tags 'Estimation of Building Types on OpenStreetMap Based on Urban Morphology Analysis' """ residential = ['apartments','aparments (s)', 'domitory','house','residential', 'retirement_home', 'terrace', # self-add 'allotment_house', 'bungalow','summer_house','semidetached_house', 'terraced_house','dwelling_house','dormitory','family_house'] commercial = ['bank','bar','boat_rental','cafe', 'club','dentist','doctors','fast_food','fuel', 'guest_house','hostel','hotel','pharmacy', 'pub','restaurant','restaurant;bierg','shop','supermarket', # self-added 'commercial','retail','fuel_station','service','kiosk','nightclub' ] accessory_storage = ['carport','garage','garages','hut','roof','shelter', # self-add 'barn','basement','storage_tank','shed','cabin','bunker','chimney','detached', 'parking_garage','container','hangar','silo' ] accessory_supply = ['car_wash','surveillance','tower','warehouse', # self-add 'aviary','farm_auxiliary','farm','power','electricity', 'transformer_house','transformer_tower','cowshed' ] industrial = ['industrial', # self-add 'construction','manufacture'] public = ['MDF','attraction','arts_center','canteen','castle','hospital','church', 'college','community_centre','museum','fire_station','greenhouse', 'information','kindergarten','library','office','parking', 'place_of_worship','police','public','public_building','school', 'science_park','station','townhall','train_station','university', 'youth_centre','theatre','toilets', # self-add 'cathedral','historic','ambulance_station','bridge','government','transportation', 'synagogue','sports_centre','ship','mosque','tech_cab','railway','gymnasium','religious', 'chapel','civic','sports_hall','pavilion','bahnhof' ] not_classified = ['yes','YES'] if (building_type in residential): result = 'residential' elif (building_type in commercial): result = 'commercial' elif (building_type in accessory_storage): result = 'accessory_storage' elif (building_type in accessory_supply): result = 'accessory_supply' elif (building_type in industrial): result = 'industrial' elif (building_type in public): result = 'public' elif (building_type in not_classified): result = 'to_be_classified' else: result = 'other' return result
def parse_field(qualified_field): """ given a ${qualified_field} which describes a full qualified path to a class field with value like "xxx.xxx.xxx.Class.field", parse it to a tuple of package path, class name, and field name as (xxx.xxx.xxx, Class, field) """ return tuple(qualified_field.rsplit('.', 2))
def compute_iou(box1, box2): """xmin, ymin, xmax, ymax""" A1 = (box1[2] - box1[0])*(box1[3] - box1[1]) A2 = (box2[2] - box2[0])*(box2[3] - box2[1]) xmin = max(box1[0], box2[0]) ymin = max(box1[1], box2[1]) xmax = min(box1[2], box2[2]) ymax = min(box1[3], box2[3]) if ymin >= ymax or xmin >= xmax: return 0 return ((xmax-xmin) * (ymax - ymin)) / (A1 + A2)
def child_support_payor_b(responses, derived): """ Return who the payor is depends on the monthly amount from Factsheet B """ try: amount_1 = float(responses.get('your_child_support_paid_b', 0)) except ValueError: amount_1 = 0 try: amount_2 = float(responses.get('your_spouse_child_support_paid_b', 0)) except ValueError: amount_2 = 0 if amount_1 > amount_2: payor = 'you' elif amount_1 < amount_2: payor = 'spouse' else: payor = 'both' return payor
def _invert_indices(arr, range_max): """return all indices from range(range_max) that are not in arr as a list""" inv = [] for j in range(range_max): if j not in arr: inv.append(j) return inv
def oid_to_tuple(oid): """Convert an OID to a tuple of numbers""" return tuple([int(x) for x in oid.split('.')])
def she_says_he_says(she_says): """Replaces y/i, removes spaces, returns reversed >>> she_says_he_says('ma rymu') 'umiram' """ phonetic_she_says = she_says.replace('y', 'i') # vase reseni compact = phonetic_she_says.replace(' ', '') # vase reseni he_says = compact[::-1] # vase reseni return he_says
def weird_nodes(node_name): """ Returns true if the node has connections that skip tiles. Most nodes are made up of wires that only touch adjcent tiles. However some nodes fly over tiles. """ # Only about 1% of all nodes in the graph behave this way, so hopefully # the overall tileconn affect is small! _, wire = node_name.split('/') weird_prefixes = [ # ~400 instances 'CLK_HROUTE', # ~200 instances 'CLK_HDISTR', # ~500 instances 'CLK_TEST_BUF', # ~300 instances 'CLK_VDISTR', # ~200 instances 'CLK_VROUTE', # ~1500 instances 'HDIO_IOBPAIR', # 4k instances 'HPIO_IOBPAIR', # ~200 instances 'HPIO_IOBSNGL', # ~12k instances 'GND_WIRE', # ~40k instances 'VCC_WIRE', ] for prefix in weird_prefixes: if wire.startswith(prefix): return True return False
def deep_update_dict(origin_dict, override_dict): """ update origin dict with override dict recursively e.g. origin_dict = {'a': 1, 'b': {'c': 2, 'd': 4}} override_dict = {'b': {'c': 3}} return: {'a': 1, 'b': {'c': 3, 'd': 4}} """ for key, val in override_dict.items(): if isinstance(val, dict): tmp = deep_update_dict(origin_dict.get(key, {}), val) origin_dict[key] = tmp else: origin_dict[key] = override_dict[key] return origin_dict
def strike(text: str) -> str: """Make text to strike.""" return f'~{text}~'
def position_to_index(x, y, num_cols): """ Converts position(row index & column index) in the grid to index in the flat representation of the grid. Formula: (number of columns * row index) + column index Example: 2D array: [[1, 2], [3, 4]] Flat array: [1, 2, 3, 4] position of 3 is (1, 0), index of will be ((2 * 1) + 0) = 2 :param x: row index of the grid :param y: column index of the grid :param num_cols: Number of columns in the 2D array/grid :return: index in the flat representation of the grid """ return (num_cols * x) + y
def compare_dicts(d1, d2): """Use simple comparison""" return dict(d1) == dict(d2)
def get_skill_outputs_from_dialog(utterances, skill_name, activated=False): """ Extract list of dictionaries with already formatted outputs of `skill_name` from full dialog. If `activated=True`, skill also should be chosen as `active_skill`; otherwise, empty list. Args: utterances: utterances, the first one is user's reply skill_name: name of target skill activated: if target skill should be chosen by response selector on previous step or not Returns: list of dictionaries with formatted outputs of skill """ result = [] skills_outputs = [] for uttr in utterances: if "active_skill" in uttr: final_response = uttr.get("orig_text", None) if uttr.get("orig_text", None) is not None else uttr["text"] for skop in skills_outputs: # need to check text-response for skills with several hypotheses if skop["skill_name"] == skill_name: if activated and skop["text"] in final_response and uttr["active_skill"] == skill_name: # removed one condition as if scop contains skill_name and text, its len is > 0 result.append(skop) else: if not activated and skop: result.append(skop) elif "hypotheses" in uttr: skills_outputs = uttr["hypotheses"] return result
def sock_files(dev_uids_list, is_spp_pri=False): """Return list of socket files on host and containers. The name of socket files is defined with a conventional ones described in DPDK doc, though you can use any name actually. For spp_primary, path of sock file is just bit different because it is shared among other SPP processes. Here is an example of two vhost devices. [vhost:0, vhost:1] => [ {'host': '/tmp/sock0, 'guest': '/var/run/usvhost0'}, {'host': '/tmp/sock1, 'guest': '/var/run/usvhost1'} ] """ socks = { 'vhost': { 'host': '/tmp/sock{:s}', 'guest': '/var/run/usvhost{:s}'}, 'memif': { 'host': '/tmp/spp-memif.sock', 'guest': '/var/run/spp-memif.sock'}} res = [] is_memif_added = False for dev_uid in dev_uids_list: dev_uid = dev_uid.split(':') if (dev_uid[0] == 'memif') and (not is_memif_added): # Single sock file is enough for memif because it is just used for # negotiation between master and slaves processes. if is_spp_pri: res.append({ 'host': socks['memif']['host'], 'guest': socks['memif']['host']}) else: res.append({ 'host': socks['memif']['host'], 'guest': socks['memif']['guest']}) is_memif_added = True elif dev_uid[0] == 'vhost': if is_spp_pri: res.append({ 'host': socks['vhost']['host'].format(dev_uid[1]), 'guest': socks['vhost']['host'].format(dev_uid[1])}) else: res.append({ 'host': socks['vhost']['host'].format(dev_uid[1]), 'guest': socks['vhost']['guest'].format(dev_uid[1])}) return res
def is_multicast(ip): """ Tells whether the specified ip is a multicast address or not :param ip: an IPv4 address in dotted-quad string format, for example 192.168.2.3 """ return int(ip.split('.')[0]) in range(224, 239)
def check_auth(username, password): """This function is called to check if a username / password combination is valid. """ return username == 'user' and password == 'THISISNOTAPASSWORD'
def calc_Debye_charging_time(eps_fluid, sigma): """ The Debye charging time is the time required to charge the Debye layer units: s Notes: Adjari, 2006 - "Debye time scale" """ tau_debye = eps_fluid / sigma return tau_debye
def _merge(left, right): """Merger helper.""" result = [] while left and right: if left[0] <= right[0]: result.append(left[0]) left = left[1:] else: result.append(right[0]) right = right[1:] while left: result.append(left[0]) left = left[1:] while right: result.append(right[0]) right = right[1:] return result
def multiply_by_2(a_dictionary): """ using dictionary comprehension for my first time in order to multiply the value of every key: value pair in a dictionary """ return {key: value*2 for (key, value) in a_dictionary.items()}
def is_pandigital(n: int) -> bool: """Determine if n is pandigital.""" lst = set(sorted([int(i) for i in str(n)])) return len(lst) == 10
def b58decode(string): """Takes a base58-encoded number and returns it in base10. :param string: the number to base58_decode (as str). :return: the number passed as first parameter, base10 encoded. """ alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" # Populating a dictionary with base58 symbol chart dict = {} k = 0 for i in alphabet: dict[i] = k k += 1 n = 0 # Result pos = 0 # Cf https://www.dcode.fr/conversion-base-n for i in string: for y in alphabet: if i == y: n = n * 58 + dict[i] pos += 1 return n
def _node_like(test_dict: dict): """ Evaluates whether a dict can be converted to a node safely. test_dict : dict | Dict to check """ if not isinstance(test_dict, dict): return False keys = list(test_dict.keys()) try: keys.remove("ParameterName") keys.remove("ParameterValue") keys.remove("ParameterInfo") except ValueError: return False if keys: return False if not isinstance(test_dict["ParameterName"], str): return False if not isinstance(test_dict["ParameterInfo"], str): return False return True
def type_exact_match_string(prop_name, dual=True): """ Mapping for fields that may want prefixes (based upon the default tokenizer which splits by -'s) Or the full exact string (like domains) """ if dual: return { "type": "multi_field", "fields": { prop_name: {"type": "string", "index": "analyzed"}, "exact": {"type": "string", "index": "not_analyzed"} } } else: return dict(type="string")
def _deckhand_render_exception_msg(errors): """ Helper function to create deckhand render exception msg. Parameters: errors: List of errors provided by deckhand render. Returns: string: formulated error message. """ err_msg = '' for err in errors: if isinstance(err, tuple) and len(err) > 1: err_msg += ': '.join(err) + '\n' else: err_msg += str(err) + '\n' return err_msg
def iterable_to_uuids_list(iterable): """ takes an iterable of django objects and gets the str uuid into a list """ result = [] for item in iterable: uuid_label = str(item.uuid) result.append(uuid_label) return result
def is_item_in_list_a_in_list_b(list_a, list_b): """ Check if one of the items given in list_a is present in list_b. :return: True will be returned if one of the item in list_a is present in list_b """ for item in list_a: if item in list_b: return True return False
def mac_to_node_id(mac: str) -> str: """ turn mac address into a unique string to act as node id, but don't make it look like mac address :param mac: :return: """ return 'node_id-' + mac.replace(':', '-')
def h_datemysqltoint(in_date): """2009-12-31 format in 20091231 int returned""" boom = str(in_date).split("-") # [0] = year, [1] = month, [2] = day return int(boom[0] + boom[1] + boom[2])
def query_starts_with(formatted_sql, prefixes): """Check if the query starts with any item from *prefixes*.""" prefixes = [prefix.lower() for prefix in prefixes] return bool(formatted_sql) and formatted_sql.split()[0] in prefixes
def file_index(index): """ formatting the index of each file """ if len(str(index)) == 1: return '000' + str(index) elif len(str(index)) == 2: return '00' + str(index) elif len(str(index)) == 3: return '0' + str(index) elif len(str(index)) == 4: return str(index)
def _TestTypesMatch(types_to_run, this_tests_types): """types_to_run should be a set of test types to run. this_test_types should be an iterable. Returns true if any type in the latter is also in the former, i.e., if the two are not disjoint.""" return not types_to_run.isdisjoint(this_tests_types)
def validate_workgroup_state(workgroup_state): """ Validate State for Workgroup Property: WorkGroup.State """ VALID_WORKGROUP_STATE = ("ENABLED", "DISABLED") if workgroup_state not in VALID_WORKGROUP_STATE: raise ValueError( "Workgroup State must be one of: %s" % ", ".join(VALID_WORKGROUP_STATE) ) return workgroup_state
def calculate(a, b, c): """ Calculate doc... """ return a * 2 + b * 3 + c
def commanum(num): """As of Python 3.7, this can be accomplished in an f-string by the syntax f"{num:,f}", in which the ":,f" adds commas at the appropriate places. In Python 2.6 to 3.6, we could use "{n:,f}".format(n = nums) or similar.""" bd = str(num).split(".") if len(bd) == 1: base, dec = bd[0], "" else: base, dec = bd newbase = "" lb = len(base) for ii in range(1, lb): newbase += base[lb - ii] if ii % 3 == 0: newbase += "," newbase += base[0] if dec == "": return newbase[::-1] return newbase[::-1] + "." + dec
def splitdrive(p): """Split a pathname into drive and path specifiers. Returns a 2-tuple "(drive,path)"; either part may be empty""" if p[1:2] == ':': return (p[0:2], p[2:]) return ('', p)
def poly_coef_to_str(coef, degree): """A helper function for poly_print.""" out = [] if coef < 0: out.append('-') else: out.append('+') if abs(coef) != 1 or degree == 0: out.append(str(abs(coef))) if degree == 1: out.append('X') elif degree > 1: out.append('X^' + str(degree)) return out
def parse_range(cpu_range): """Create cpu range object""" if '-' in cpu_range: [x, y] = cpu_range.split('-') # pylint: disable=invalid-name cpus = range(int(x), int(y)+1) if int(x) >= int(y): raise ValueError("incorrect cpu range: " + cpu_range) else: cpus = [int(cpu_range)] return cpus
def pie_chart_drawing(topic,party): """ In debugging mode, the commented code works fine because it is fine to dynamically draw the pie charts and sent to the front end. However, Matplotlib forbids opening a GUI window on the server trying to rendering the figure to a png and then shipping it to the user as the payload of a response. Thus, the solution is to draw all pie charts before hand and save to static folder. """ # fig = plt.figure() # df = pd.read_csv("static/datamodel/all_data.csv") # data_arr = [] # count_df = df.groupby(['topic', 'subreddit']).size().reset_index(name='counts') # # print(count_df) # try: # democrats_data = count_df[(count_df['topic'] == topic) & (count_df['subreddit'] == 'democrats')].counts.values[0] # except: # democrats_data = 0 # try: # republican_data = count_df[(count_df['topic'] == topic) & (count_df['subreddit'] == 'Republican')].counts.values[0] # except: # republican_data = 0 # try: # conservative_data = count_df[(count_df['topic'] == topic) & (count_df['subreddit'] == 'Conservative')].counts.values[0] # except: # conservative_data = 0 # try: # liberal_data = count_df[(count_df['topic'] == topic) & (count_df['subreddit'] == 'Liberal')].counts.values[0] # except: # liberal_data = 0 # data_arr.append(democrats_data) # data_arr.append(republican_data) # data_arr.append(conservative_data) # data_arr.append(liberal_data) # # print(data_arr) # labels = ["democrats", "Republican", "Conservative", "Liberal"] # explode_index = labels.index(party) # explode = [0, 0, 0, 0] # explode[explode_index] = 0.2 # plt.pie(data_arr, labels=labels, explode=explode, shadow=True,autopct='%1.1f%%') # plt.axis('equal') # plt.show() # fig.savefig("static/datamodel/pie_charts/" + topic + '_' + party + '.png',transparent=True) return_img_data = {'image_url': ["static/datamodel/pie_charts/" + topic + '_' + party + '.png']} return return_img_data
def parse_sexp(string): """ >>> parse_sexp("(+ 5 (+ 3 5))") [['+', '5', ['+', '3', '5']]] """ sexp = [[]] word = '' in_str = False for char in string: if char == '(' and not in_str: sexp.append([]) elif char == ')' and not in_str: if word: sexp[-1].append(word) word = '' temp = sexp.pop() sexp[-1].append(temp) elif char in (' ', '\n', '\t') and not in_str: if word: sexp[-1].append(word) word = '' elif char == '\"': in_str = not in_str else: word += char return sexp[0]
def nth_triangular_number(number: int) -> int: """Function to compute the triangular number for a positive integer. :param number: The integer n to compute the triangular number. :return: The resulting integer triangular number. """ return number * (number + 1) // 2
def validate_numeric_array(lyst): """ This function makes sure a list holds only numerical values """ for num in range(len(lyst)): if not str(lyst[num]).isdigit(): raise ValueError return True
def hamming_distance(s1, s2): """Return the Hamming distance between equal-length sequences""" if len(s1) != len(s2): raise ValueError("Undefined for sequences of unequal length") return sum(el1 != el2 for el1, el2 in zip(s1, s2))