content
stringlengths
42
6.51k
def append_if_not_none(prefix, key, separator="."): """ Append the given prefix if the prefix isn't empty. """ if prefix is not None and len(prefix) > 0: return prefix + separator + key else: return key
def bias_greater_than(a, b): """ """ bias_relative = 0.95 bias_absolute = 0.01 return a >= b * bias_relative + a * bias_absolute
def total_travel_distance(journey): """ Return the total travel distance of your PyCon journey in kilometers rounded to one decimal. """ return round(sum(trip.distance for trip in journey),1) pass
def remove_extra_lines_from_first_page(page_text): """Function to remove extra lines from the first page.""" lines = page_text.split("\n") print(lines) return "\n".join(lines[2:-2])
def _iteritems(d): """Factor-out Py2-to-3 differences in dictionary item iterator methods""" try: return d.iteritems() except AttributeError: return d.items()
def namefromcycle(cyclenumber, version): """Return the dataset name corresponding to the given cycle number. SE 1.0 and 1.1 used dashes; SE 1.2 uses zero-padded integers with 10-digit field width. """ if version == b'1.2': return "cycle%010d" % cyclenumber else: return "cycle-" + str(cyclenumber)
def get_file_information(file_path:str, strip_extras: bool = False) -> str: """Module to fetch file name or file type. Parameters ---------- file_path*: Support both relative, absolute references to the input file location. strip_extras: Returns file source instead of file name (if set True), default: False (* - Required parameters) Returns ------- base path as string """ base_url_idx = file_path.rindex('/')+1 file_name = file_path[base_url_idx:] # Return the file name (if that's what the module has been called for) if not strip_extras: return file_name # Return file type (if strip_extras flag is set to True) return file_name[17:file_name.rindex('_')]
def convert_bin_to_int(list_string_col): """ Convert a list of strings to a list of integers Args: list_string_col: list of strings (characters 0 or 1) Returns: list of integer """ int_from_string_col = [int(x, 2) for x in list_string_col] return int_from_string_col
def prefixed_userid(request): """In Kinto users ids are prefixed with the policy name that is contained in Pyramid Multiauth. If a custom authn policy is used, without authn_type, this method returns the user id without prefix. """ # If pyramid_multiauth is used, a ``authn_type`` is set on request # when a policy succesfully authenticates a user. # (see :func:`kinto.core.initialization.setup_authentication`) authn_type = getattr(request, 'authn_type', None) if authn_type is not None: return authn_type + ':' + request.selected_userid
def _sequence(one, two): """Return function calling two functions in order""" if one is None: return two elif two is None: return one return lambda *args, **kargs: (one(*args, **kargs), two(*args, **kargs))[1]
def assert_uniqueness_clause(property: str, node: str = 'node') -> str: """ Returns the *part* of a statement that ensures a property of a node is unique. Parameters ---------- property : str Name of the mean-to-be-unique property node : str, optional Name of the node (coming from other statement) Returns ------- out: str Neo4j statement """ return f"ASSERT {node}.`{property}` IS UNIQUE"
def my_float(s): """ Returns a float if string 's' is float and 'None' if not. 's' MUST be a string. :param s: string with number :type s: str :rtype: float or None """ try: return float(s) except ValueError: return None
def SUDOERSLINE_NOPASSWD(user): """ returns {user} ALL=(ALL) NOPASSWD: ALL use : echo "{thing}" >> /etc/sudoers """ return { 'adduser':{ "loc": "{user} ALL=(ALL) NOPASSWD: ALL".format(user=user), "succ":"PASS MESSAGE", "fail":"FAIL MESSAGE", "info":"INFO MESSAGE" } }
def clear_sesam_attributes(sesam_object: dict): """ Return same dict but without properties starting with "_" :param sesam_object: input object from Sesam :return: object cleared from Sesam properties """ return {k: v for k, v in sesam_object.items() if not k.startswith('_')}
def _glslify(r): """Transform a string or a n-tuple to a valid GLSL expression.""" if isinstance(r, str): return r else: assert 2 <= len(r) <= 4 return 'vec{}({})'.format(len(r), ', '.join(map(str, r)))
def to_string(text, encoding='utf8', errors='strict'): """Convert a string (bytestring in `encoding` or unicode), to unicode.""" if isinstance(text, str): return text return str(text, encoding, errors=errors)
def nested_list_map(lis: list, func) -> list: """Nested mapping for a nested list.""" def _nested_fn(obj): if isinstance(obj, list): return list(map(_nested_fn, obj)) else: return func(obj) # assert isinstance(lis, list), type(lis) return _nested_fn(lis)
def input_fa_name(name): """ map event name to input fasta wdl variable name """ return '{}_fa'.format(name)
def get_finished(data): """ Gets players who have finished their bugs """ total_parts = sum(part_type.count for part_type in data["partTypes"]) finished = [] for player, parts in data["players"].items(): if sum(parts) == total_parts: finished.append(player) return finished
def get_canonical_import(import_set): """Obtain one single import from a set of possible sources of a symbol. One symbol might come from multiple places as it is being imported and reexported. To simplify API changes, we always use the same import for the same module, and give preference to imports coming from main tensorflow code. Args: import_set: (set) Imports providing the same symbol Returns: A module name to import """ # We use the fact that list sorting is stable, so first we convert the set to # a sorted list of the names and then we resort this list to move elements # not in core tensorflow to the end. import_list = sorted(import_set) import_list.sort(key=lambda x: 'lite' in x) return import_list[0]
def _get_rotx(rot_type): """ Return x coordinate for this rotation """ return 1 if rot_type >= 4 else 0
def _get_intg_dict(intg_type, fields): """ Return a dictionary-format integration. Fields is a multi-arg from Click with 2 elems """ data = {"type": intg_type, "fields": {}} for field in fields: data["fields"][field[0]] = field[1] return data
def convert2asciii(dictionary): """ Changes all keys (i.e. assumes they are strings) to ASCII and values that are strings to ASCII. Specific to dictionaries. """ return dict([(key.encode('ascii','ignore'),value.encode('ascii','ignore')) if type(value) in [str,bytes] else (key.encode('ascii','ignore'),value) for key, value in dictionary.items()])
def rank_to_util(rank, num_players): """ Helper method to convert a rank into a utility value (copied from AIUtils in the Ludii Java code) :param rank: :param num_players: :return: """ if num_players == 1: return 2.0 * rank - 1.0 else: return 1.0 - ((rank - 1.0) * (2.0 / (num_players - 1)))
def receptive_field_size(total_layers, num_cycles, kernel_size, dilation=lambda x: 2**x): """Compute receptive field size Args: total_layers (int): total layers num_cycles (int): cycles kernel_size (int): kernel size dilation (lambda): lambda to compute dilation factor. ``lambda x : 1`` to disable dilated convolution. Returns: int: receptive field size in sample """ assert total_layers % num_cycles == 0 layers_per_cycle = total_layers // num_cycles dilations = [dilation(i % layers_per_cycle) for i in range(total_layers)] return (kernel_size - 1) * sum(dilations) + 1
def escape_special_characters_shell(text, characters="'\""): """ Simple function that tries to escape quotes. Not guaranteed to produce the correct result!! If that is needed, use the new `Escaper' class. """ for character in characters: text = text.replace(character, '\\' + character) return text
def default_document_verifier(doc): """ By default we try to index the document. In `production` environments you should set the `document_verifier` via `Solr.__init__()` in order to minimize the traffic to solr. """ return {'ok': 'true'}
def read(filename): """Read file content.""" with open(filename) as f: return f.read()
def get_named_parent(decl): """ returns a reference to a named parent declaration :param decl: the child declaration :type decl: :class:`declaration_t` :rtype: reference to :class:`declaration_t` or None if not found """ if not decl: return None parent = decl.parent while parent and (not parent.name or parent.name == '::'): parent = parent.parent return parent
def function(a, b=True): """[summary] Args: a ([type]): [description] b (bool, optional): [description]. Defaults to True. Returns: [type]: [description] """ if a == -1: print("negative") result = 5 return result
def flag_to_list(flagval, flagtype): """Convert a string of comma-separated tf flags to a list of values.""" if flagtype == 'int': return [int(_) for _ in flagval.split(',') if _] elif flagtype == 'float': return [float(_) for _ in flagval.split(',') if _] elif flagtype == 'str': return [_ for _ in flagval.split(',') if _] else: raise Exception("incorrect type")
def part2(entries: list) -> int: """part2 solver""" nice_strings = 0 for string in entries: if any([string.count(string[i:i+2]) >= 2 for i in range(len(string)-2)]) and any([string[i] == string[i+2] for i in range(len(string)-2)]): nice_strings += 1 return nice_strings
def combine_dicts(*dlist): """ Combine a tuple of dictionaries into one. If there is a conflict in keys, return an empty dictionary Returns: dict """ ret = dict() for d in dlist: for k, v in d.items(): if ret.get(k, d[k]) != d[k]: return dict() else: ret[k] = d[k] return ret
def downgrade_md_headings(text: str) -> str: """Downgrades all headings in a markdown-formatted input text to the next heading level (e.g. # to ##). Args: text: markdown-formatted text Returns: markdown-formatted text """ return text.replace('\n#', '\n##')
def message_to_bits(message): """ convert a list of numbers (plaintext) to a list of bits to encrypt :param message: :return: type list, list of bits """ message_bits = [] for number in message: bits = "{0:b}".format(number) for bit in list(bits): message_bits.append(int(bit)) return message_bits
def is_char_chinese(uchar): """ :param uchar: input char in unicode :return: whether the input char is a Chinese character. """ if u'\u3400' <= uchar <= u'\u4db5': # CJK Unified Ideographs Extension A, release 3.0 return True elif u'\u4e00' <= uchar <= u'\u9fa5': # CJK Unified Ideographs, release 1.1 return True elif u'\u9fa6' <= uchar <= u'\u9fbb': # CJK Unified Ideographs, release 4.1 return True elif u'\uf900' <= uchar <= u'\ufa2d': # CJK Compatibility Ideographs, release 1.1 return True elif u'\ufa30' <= uchar <= u'\ufa6a': # CJK Compatibility Ideographs, release 3.2 return True elif u'\ufa70' <= uchar <= u'\ufad9': # CJK Compatibility Ideographs, release 4.1 return True elif u'\u20000' <= uchar <= u'\u2a6d6': # CJK Unified Ideographs Extension B, release 3.1 return True elif u'\u2f800' <= uchar <= u'\u2fa1d': # CJK Compatibility Supplement, release 3.1 return True elif u'\uff00' <= uchar <= u'\uffef': # Full width ASCII, full width of English punctuation, half width Katakana, half wide half width kana, Korean alphabet return True elif u'\u2e80' <= uchar <= u'\u2eff': # CJK Radicals Supplement return True elif u'\u3000' <= uchar <= u'\u303f': # CJK punctuation mark return True elif u'\u31c0' <= uchar <= u'\u31ef': # CJK stroke return True elif u'\u2f00' <= uchar <= u'\u2fdf': # Kangxi Radicals return True elif u'\u2ff0' <= uchar <= u'\u2fff': # Chinese character structure return True elif u'\u3100' <= uchar <= u'\u312f': # Phonetic symbols return True elif u'\u31a0' <= uchar <= u'\u31bf': # Phonetic symbols (Taiwanese and Hakka expansion) return True elif u'\ufe10' <= uchar <= u'\ufe1f': return True elif u'\ufe30' <= uchar <= u'\ufe4f': return True elif u'\u2600' <= uchar <= u'\u26ff': return True elif u'\u2700' <= uchar <= u'\u27bf': return True elif u'\u3200' <= uchar <= u'\u32ff': return True elif u'\u3300' <= uchar <= u'\u33ff': return True return False
def expected_win_prob(pos_prob, pos_win_prob, neg_win_prob): """Expected value of win probability, factoring in p(success).""" return (pos_prob * pos_win_prob) + ((1 - pos_prob) * neg_win_prob)
def str_to_unicode(x, encoding="utf-8"): """ #After Loading Text from CSV ---> Immediately in unicode """ if isinstance(x, str): if not isinstance(x, str): x = str(x, encoding) return x else: return x
def iterate_bus_ids(timestamp, bus_ids): """Iterate through bus ID's, skip X's and multiply until you get the closest timestamp to the original. Then subtract the two timestamps and multiply by the bus ID that had the closest departing timestamp. """ ids = bus_ids[0].split(',') new_timestamps = [] for bus_id in ids: if bus_id == 'x': continue else: new_timestamp = int(bus_id) while new_timestamp <= timestamp: new_timestamp += int(bus_id) new_timestamps.append([bus_id, new_timestamp]) # print(new_timestamps) closest_timestamp = min([new_timestamp[1] for new_timestamp in new_timestamps]) # Find the soonest timestamp index_of_id = [new_timestamp[1] for new_timestamp in new_timestamps].index( closest_timestamp ) # Get the index of the ID answer = (closest_timestamp - timestamp) * int(new_timestamps[index_of_id][0]) return answer
def pids_value_dict(profDict, value): """ Creates a dictionary with the key being 'pid' and the values being the value in a prof entry with key 'value'. """ pidsDict = {} for prof in profDict: pidsDict[prof['pid']] = prof[value] return pidsDict
def contains_str(cadena1, cadena2): """Comprueba que la primera cadena se encuentra contenida en la segunda cadena. Arguments: cadena1 {[str]} -- Cadena a encontrar cadena2 {[str]} -- Cadena base """ cad1 = cadena1.lower() cad2 = cadena2.lower() puntuacion = 0 puntuacion_max = 0 idx = 0 for val in cad2: if cad1[idx] is val: idx += 1 if idx is len(cad1)-1: return True else: idx = 0 return False
def enhanceEntries(entriesList, feedId, feedName): """ Add Id of feed to each entry so that we only need the item, which then contains all information that we need Parameters ---------- entriesList : list A List of RSSEntries (FeedParserDicts) feedId : string The URL of the source feed feedName : string The clear text name of the source Returns ------- entriesList : dict The enhanced entriesList """ for entry in entriesList: entry["source"]=feedId entry["feed_name"]=feedName return entriesList
def convertMacMessageToSognoMessage(mac_message, device_type_to_comp): """ TBD """ print("MAC interface:") for reading in mac_message['readings']: print("Received measurement value: {}, {}, {}, {}, {}".format(mac_message['timestamp'], mac_message['identifier'], reading['type'], reading['source'], reading['data'])) # Mapping of terminology: from mac source to sogno phase source_to_phase = {"channel_1": "A", "channel_2": "B", "channel_3": "C"} # Mapping of terminology: from mac type to sogno measurand type_to_measurand = {"volt": "voltmagnitude", "activepower": "activepower", "reactivepower": "reactivepower"} sogno_message = {} sogno_message["device"] = mac_message["identifier"] sogno_message["timestamp"] = mac_message["timestamp"] sogno_message["readings"] = [] mac_readings = mac_message["readings"] for mac_elem in mac_readings: sogno_elem = {} # Supplement mac reading by CIM component UUID related to device and measurand if (mac_message["identifier"], mac_elem["type"]) in device_type_to_comp.keys(): sogno_elem["component"] = device_type_to_comp[(mac_message["identifier"], mac_elem["type"])] else: sogno_elem["component"] = "unspecified" print("Warning: mapping from ({}, {}) to CIM component not specified.".format(mac_message["identifier"], mac_elem["type"])) # Map terms for type and source, if mapping available otherwise skip reading if mac_elem["type"] in type_to_measurand.keys(): sogno_elem["measurand"] = type_to_measurand[mac_elem["type"]] else: pass if mac_elem["source"] in source_to_phase.keys(): sogno_elem["phase"] = source_to_phase[mac_elem["source"]] else: pass # Actual measurement data assignment including necessary value conversion from MAC to SOGNO format if sogno_elem["measurand"] == "activepower" or sogno_elem["measurand"] == "reactivepower": # convert single-phase power in kW to single-phase power in W as expected by SOGNO interface sogno_elem["data"] = float(mac_elem["data"])*1e3 else: # take data as they are sogno_elem["data"] = float(mac_elem["data"]) # Add element to output message in SOGNO format sogno_message["readings"].append(sogno_elem) return sogno_message
def check_parameter(data, parameter, min_length=None, max_length=None): """ Check if the given parameter is the the data request. If max or min parameters are present, it checks for the parameter length :param data: Dict containing all the request data :param parameter: Key to search for :param min_length: Optional, min length of the parameter :param max_length: Optional, max length of the parameter :return: """ if parameter not in data: return False if min_length is not None and len(data[parameter]) < min_length: return False if max_length is not None and len(data[parameter]) > max_length: return False return True
def base_metadata_url(alternate=None): """ Return metadata URL. :param alternate: If the URL is for the simulator metadata. Default is False. :type alternate: str """ baseurl = "http://downloads.blackberry.com/upr/developers/update/bbndk" tail = "{0}/{0}_metadata".format(alternate) if alternate is not None else "metadata" return "{0}/{1}".format(baseurl, tail)
def str2bool(string): """ Convert string to corresponding boolean. - string : str """ if string in ["True","true","1"]: return True elif string in ["False","false","0"]: return False else : return False
def mask(data, mask): """Applies a masking key to a byte string. Arguments: data (bytes): The data to apply the masking key to. mask (bytes): The masking key. """ return bytes(data[i] ^ mask[i % 4] for i in range(len(data)))
def split_equal_chunks(l: list, chunk_size: int): """ Ignores tail after last chunk >>> split_equal_chunks(list(range(10)), 3) [[0, 1, 2], [3, 4, 5], [6, 7, 8]] >>> split_equal_chunks(list(range(10)), 2) [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] :param l: :param chunk_size: :return: """ return [l[i - chunk_size: i] for i in range(chunk_size, len(l) + 1, chunk_size)]
def nest_dict(d, prefixes, delim="_"): """Go from {prefix_key: value} to {prefix: {key: value}}.""" nested = {} for k, v in d.items(): for prefix in prefixes: if k.startswith(prefix + delim): if prefix not in nested: nested[prefix] = {} nested[prefix][k.split(delim, 1)[1]] = v else: nested[k] = v return nested
def isprefix(path1, path2): """Return true is path1 is a prefix of path2. :param path1: An FS path :param path2: An FS path >>> isprefix("foo/bar", "foo/bar/spam.txt") True >>> isprefix("foo/bar/", "foo/bar") True >>> isprefix("foo/barry", "foo/baz/bar") False >>> isprefix("foo/bar/baz/", "foo/baz/bar") False """ bits1 = path1.split("/") bits2 = path2.split("/") while bits1 and bits1[-1] == "": bits1.pop() if len(bits1) > len(bits2): return False for (bit1, bit2) in zip(bits1, bits2): if bit1 != bit2: return False return True
def ffd_find(prgbanks, datalen, bank_factory=None): """Find the first unused range that will accept a given piece of data. prgbanks -- a list of (bytearray, slice list) tuples datalen -- the length of a byte string to insert in an unused area bank_factory -- a function returning (bytearray, slice list), called when data doesn't fit, or None to instead throw ValueError We use the First Fit Decreasing algorithm, which has been proven no more than 22% inefficient (Yue 1991). Because we don't plan to insert more than about 100 things into a ROM at once, we can deal with O(n^2) time complexity and don't need the fancy data structures that O(n log n) requires. Yet. Return a (bank, address) tuple denoting where it would be inserted. """ for (bank, (prgdata, unused_ranges)) in enumerate(prgbanks): for (start, end) in unused_ranges: if start + datalen <= end: return (bank, start) # At this point we need to add another PRG bank. Create a PRG # bank that has the reset patch built into it. if not bank_factory: raise ValueError("could not add bank") prgbanks.append(bank_factory()) last_bank_ranges = prgbanks[-1][1] if datalen > last_bank_ranges[0][1] - last_bank_ranges[0][0]: raise ValueError("string too long") return (len(prgbanks) - 1, 0x8000)
def clean_post_type(post): """ Cleans Facebook post type :param post: Facebook post in the format returned by Facebook's API. :type post: dict :return: "photo" | "video" | None :rtype: str | None """ post_type = None for attachment in post["attachments"]["data"]: assert attachment["type"] in {"video_inline", "video_direct_response", "photo"}, post if attachment["type"] in {"video_inline", "video_direct_response"}: assert post_type in {"video", None}, post post_type = "video" elif attachment["type"] == "photo": assert post_type in {"photo", None}, post post_type = "photo" return post_type
def is_palindrom(x: str) -> bool: """ Palindrom merupakan kata yang dapat dibaca dengan sama baik dari depan maupun belakang. Fungsi ini mengecek apakah `x` merupakan sebuah palindrom. Fungsi ini akan mengembalikan `True` jika `x` merupakan palindrom, dan `False` jika tidak. >>> is_palindrom("katak") True >>> is_palindrom("label") False >>> is_palindrom("Kasur ini rusak") True """ # Mengubah string `x` menjadi lowercase dan menghilangkan spasi normalized = x.lower().replace(" ", "") # Membandingkan string `normalized` dengan `normalized` yang telah dibalik return normalized == normalized[::-1]
def sparse(x0, rho, gamma): """ Proximal operator for the l1 norm (induces sparsity) Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) gamma : float A constant that weights how strongly to enforce the constraint Returns ------- theta : array_like The parameter vector found after running the proximal update step """ lmbda = float(gamma) / rho return (x0 - lmbda) * (x0 >= lmbda) + (x0 + lmbda) * (x0 <= -lmbda)
def _parent_name(target): """ Turn 'foo.bar' into ['foo', 'bar'] """ r = target.rsplit('.', 1) if len(r) == 1: return '', r[0] else: return r[0], r[1]
def compact_dict(dct): """ Compacts a dct by removing pairs with a None value, meaning 0, None, [], {}, False, etc :param dct: :return: The filtered dict """ return dict(filter(lambda key_value: key_value[1], dct.items()))
def bits(data, size=8): """A tool for inspecting binary output, returns a list containing each bytes' bits as a string.""" out = [] try: for byte in data: out.append("".join(['{0:0',str(8),'b}']).format(byte if isinstance(byte, int) else ord(byte))) except(TypeError): out.append("".join(['{0:0',str(8),'b}']).format(data if isinstance(data, int) else ord(data))) return out
def getExplicitSolution(n): """Calculating single explicit solution for a given n using guarantees provided by: - Hoffman, E., Loessi, J., & Moore, R. (1969). Constructions for the Solution of the m Queens Problem. Mathematics Magazine, 42(2), 66-72. doi:10.2307/2689192 - Bernhardsson, B. (1991). Explicit solutions to the N-queens problem for all N. SGAR. Keyword arguments: n -- Required matrix size (n*n)/ number of queens """ queens = [(0,0)] * n mid_n = n//2 if n % 6 != 2: for i in range(1,mid_n+1): queens[i-1] = (i,2*i) queens[i-1 + mid_n] = ( (mid_n) + i,2*i - 1 ) else: for i in range(1,mid_n+1): queens[i-1] = (i, 1 + ((2*(i-1) + (mid_n) - 1) % n)) queens[i-1 + mid_n] = (n + 1 - i,n - ((2*(i-1) + (mid_n) - 1) % n)) return(queens)
def get_ion_state(line): """ Get the ionization state of a `VoigtFit.Line` instance or of `line_tag` string: ex: Line<'FeII_2374'> --> II ex: Line<'CIa_1656'> --> I ex: 'CIV_1550' --> IV """ if isinstance(line, str): ion = line.split('_')[0] else: ion = line.ion if 'H2' in ion: return '' elif 'CO' in ion: return '' else: pass element = ion[:2] if ion[1].islower() else ion[0] length = len(element) ion_state = ion[length:] if ion_state[-1].islower(): ion_state = ion_state[:-1] return ion_state
def encode_label(input_lines, label_dict): """ encode list of strings into word-level representation: number """ lines = list(map(lambda t: list(map(lambda m: label_dict[m], t)), input_lines)) return lines
def get_total_builds(builds): """Get number of all builds.""" return [b for b in builds if b["result"] is not None]
def f(u_cart): """ A non-linear function of the 6 components u_cart """ result = 0 u = u_cart[0] for i in range(1,6): if i%2: v = u*u else: v = u*u*u result += v * u_cart[i] u = u_cart[i] result += u*u*u * u_cart[0] return result
def is_stressful(subj): """ recognize stressful subject """ if subj[-3:] == '!!!' or subj.isupper(): return True word = ' ' for l in subj.lower(): if l.isalpha(): if word[-1] != l: word += l red_words = ['help','asap','urgent'] for red in red_words: if red in word: return True return False
def rev_int(n): """Use list comprehension""" if n < 0: sign = -1 else: sign = 1 rev = [char for char in str(n) if char.isnumeric()] print(rev) rev.reverse() print(rev) return int(''.join(rev)) * sign
def prepare_params(params): """ Prepare params before the http request is dispatched. The return value must be a list of doubles (tuples of lenght 2). By now, the preparation step basically transforms `params` to the right return type sorted by keys. In cases where `params` is None, None must be returned. :param params: Is key/value pair or `None`. """ if params is None: return None if hasattr(params, 'items'): params = params.items() return sorted(params)
def parse_midi_input(midi_key_presses): """Takes in a list of MidiKeyPress objects, returns the notes, ordered by time pressed.""" midi_key_presses.sort(key=lambda x: x.time) return [x.note for x in midi_key_presses if x.velocity > 0]
def bubble_sort(arr: list) -> list: """ Sort a list to non-decreasing order using bubble sort :param arr: list :return: sorted list """ n = len(arr) for i in range(n-1): for j in range(n-1-i): if arr[j] > arr[j+1]: arr[j], arr[j+1] = arr[j+1], arr[j] return arr
def match3(g1, s, guesses_number): """ Give the guesses_number that the user has. """ if g1 in s: print("You have " + str(guesses_number) + " guesses left.") else: guesses_number = guesses_number - 1 if guesses_number == 0: pass else: # print("There is no " + str(g1) + "'s in the word.") print("You have " + str(guesses_number) + " guesses left.") return guesses_number
def _valid_number(data: str) -> bool: """ Checks if a data is valid integer or float number. """ try: if data.isdigit(): return isinstance(int(data), int) else: return isinstance(float(data), float) except ValueError: return False
def merge(line): """ Helper function that merges a single row or column in 2048 """ r1 = [0] * 4 r2 = [] pos1, pos2 = 0, 0 score = 0 for num in line: if num != 0: r1[pos1] = num pos1 += 1 while len(r1) > 1: if r1[pos2] == r1[pos2 + 1]: r2.append(r1[pos2] * 2) score += r1[pos2] del r1[pos2:pos2 + 2] else: r2.append(r1[pos2]) del r1[pos2] r2 += r1 if len(r2) < len(line): r2.extend([0] * (len(line) - len(r2))) r2.append(score) return r2
def bs_contains(ordered, target): """Return index of target in ordered or -(p+1) where to insert it.""" low = 0 high = len(ordered)-1 while low <= high: mid = (low + high) // 2 if target < ordered[mid]: high = mid-1 elif target > ordered[mid]: low = mid+1 else: return mid return -(low + 1)
def make_filename(params, run): """Generate a filename based on params and run. params: (net, seed, freq, weight)""" nets = ['net_ppdynamicstuned', 'net_ppdynamicsoff'] return(f"time-stamps_{nets[int(params[0])]}.TunedNetwork_{params[2]}_{str(run).zfill(3)}_00.00025_00.00050_{params[1]}_0100_.npz")
def PS_custom_set_process(v): """convert float to proper int value, for working with the PS 120-10 """ return int(v * 1e2)
def heat_equation(Cu, Cth, Ck, density=2700, c1=9.52, c2=2.56, c3=3.48): """ Heat production equation from Beamish and Busby (2016) density is the density of the density: rock density in kg/m3 Cu: weight of uranium in ppm Cth: weight of thorium in ppm Ck: weight of potassium in % Returns: Radioactive heat production in W/m3 """ return (10e-5)*density*(c1 * Cu + c2 * Cth + c3 * Ck)
def roll(I_x, S_gross_w, density, velocity, span, Cl_p): """ This calculates the approximate time constant for the roll mode Assumptions: Only the rolling moment equation is needed from the Lateral-Directional equations Sideslip and yaw angle are being neglected and thus set to be zero. delta_r = 0 X-Z axis is plane of symmetry Constant mass of aircraft Origin of axis system at c.g. of aircraft Aircraft is a rigid body Earth is inertial reference frame Perturbations from equilibrium are small Flow is Quasisteady Source: J.H. Blakelock, "Automatic Control of Aircraft and Missiles" Wiley & Sons, Inc. New York, 1991, p 134-135. Inputs: I_x - moment of interia about the body x axis [kg * meters**2] S_gross_w - area of the wing [meters**2] density - flight density at condition being considered [kg/meters**3] span - wing span of the aircraft [meters] velocity - flight velocity at the condition being considered [meters/seconds] Cl_p - change in rolling moment due to the rolling velocity [dimensionless] Outputs: roll_tau - approximation of the time constant of the roll mode of an aircraft [seconds] (positive values are bad) Properties Used: N/A """ #process roll_tau = 4.*I_x/(S_gross_w*density*velocity*span**2.*Cl_p) return roll_tau
def _check_type_and_items(_converted_item, _control_item, _new_type): """This function facilitates testing the :py:func:`khoros.utils.core_utils.convert_set` function.""" _correct_type = True if isinstance(_converted_item, _new_type) else False _items_present = True for _item in _control_item: if _item not in _converted_item: _items_present = False return all((_correct_type, _items_present))
def is_increasing(channel_indices): """Check if a list of indices is sorted in ascending order. If not, we will have to convert it to a numpy array before slicing, which is a rather expensive operation Returns: bool """ last = channel_indices[0] for i in range(1, len(channel_indices)): if channel_indices[i] < last: return False last = channel_indices[i] return True
def intersecting_bounds(a1,a2,b1,b2): """ Check whether two ranges intersect Specifically: b1 < a1 < b2 and a2 > b2 """ cond1 = ((a1 >= b1) & (a1 <= b2)) | \ ((b1 >= a1) & (b1 <= a2)) return cond1
def to_int(time_float): """ frontend js sometimes don't handle milliseconds, so multiply by 1k in these cases """ if not time_float: return 0 return int(time_float * 1000)
def unquote_value(value): """Unquote a configuration value.""" if not value: return '' if value[0] in '"\'' and value[0] == value[-1]: value = value[1:-1].decode('string-escape') return value.decode('utf-8')
def get_nodata_value(scene_name): """ Get nodata value based on dataset scene name. """ if 'urbanscape' in scene_name.lower() or 'naturescape' in scene_name.lower(): nodata_value = -1 else: raise NotImplementedError return nodata_value
def reformat_time(mmmyy): """ From MMM-YY to %Y-%m """ MONTHS = { "Jan": 1, "Feb": 2, "Mar": 3, "Apr": 4, "May": 5, "Jun": 6, "Jul": 7, "Aug": 8, "Sep": 9, "Oct": 10, "Nov": 11, "Dec": 12, } mmm, yy = mmmyy.split("-") Y = int(yy) + 2000 m = MONTHS.get(mmm) return "%i-%02i" % (Y, m)
def ip4_to_int(ip): """ Convert string contained valid IP4 address to 32-bit integer. >>> ip4_to_int("192.168.0.1") 3232235521L """ ip = ip.rstrip().split('.') ipn = 0 while ip: ipn = (ipn << 8) + int(ip.pop(0)) return ipn
def str2bool(val): """ Convert the strings "True" and "False" to the booleans True and False. """ if val == "True": return True elif val == "False": return False else: raise Exception("unknown string for bool '%s'" % val)
def to_jaden_case(string): """ Your task is to convert strings to how they would be written by Jaden Smith. The strings are actual quotes from Jaden Smith, but they are not capitalized in the same way he originally typed them. :param string: A string value input. :return: A new string with each word in the sentence capitalized. """ return " ".join(x.capitalize() for x in string.split())
def bd_makeDateUri(x): """"Return a string for a data that can be used as a URI. InferLink provides the dates in the format 'Mon, Mar 31, 2014, 12:38:14' without a timezone. We need a robust date parser. The current code just makes something that's unique even thoug it does not look like a date. """ import re x = re.sub('[^A-Za-z0-9]+', '', x) x = x.lower() return x
def collapse_into_single_dates(x, y): """ Function used for a time plot to convert multiple values into one value, while retaining enough information to perform a moving average over time :param x: a list of dates in ascending order :param y: a list of values and can use the '+' operator as a function of date :return: a unique list of dates, sum of y for that date, and number of original points for that date :rtype: dict """ # average daily data and keep track of points per day x_collapsed = [x[0]] y_collapsed = [y[0]] w_collapsed = [1] for n in range(1, len(x)): if x[n] == x_collapsed[-1]: y_collapsed[-1] = (y_collapsed[-1] + y[n]) w_collapsed[-1] += 1 else: x_collapsed.append(x[n]) y_collapsed.append(y[n]) w_collapsed.append(1) return {'x': x_collapsed, 'y': y_collapsed, 'w': w_collapsed}
def adjust_cruise_no(cruise, old, shipc): """If shipc should be mapped we map cruise_no as well.""" return cruise.replace(old, shipc) # return ''.join((cruise[:4], shipc, cruise[8:]))
def error_checks(fn_handles, ref_map, warn): """ Error checks footnote references and the link def reference list... - ensure every footnote references an existing item in the ref list - ensure every ref list item is referenced by at least one footnote """ ref_handles = set(ref_map.keys()) missing_refs = fn_handles - ref_handles if missing_refs: print("Some footnotes never appear in the references...") print([x for x in missing_refs]) if not warn: print("Correct above issues and re-try...") exit(1) missing_fns = ref_handles - fn_handles if missing_fns: print("Some references never appear in a footnote...") print([x for x in missing_fns]) if not warn: print("Correct above issues and re-try...") exit(1) return missing_fns
def diamond(input_list): """ Return all diamons found """ diamonds = 0 open_diamonds = 0 for i in input_list: if i == '<': open_diamonds += 1 elif i == '>': if open_diamonds > 0: open_diamonds -= 1 diamonds += 1 return diamonds
def _get_timeunit_scaling(time_unit): """MNE expects time in seconds, return required scaling.""" scalings = {'ms': 1000, 's': 1, 'unknown': 1} if time_unit in scalings: return scalings[time_unit] else: raise RuntimeError(f'The time unit {time_unit} is not supported by ' 'MNE. Please report this error as a GitHub ' 'issue to inform the developers.')
def outputs_file(job_name: str) -> str: """Return the filename for the job outputs""" return f"__cixx_outputs_{job_name}.json"
def get_char_ranks(char_list): """Given a list of characters, assigns each a unique integer and returns two mapping dictionaries.""" char_ranks = { char: rank for rank, char in enumerate(char_list) } inverse_char_ranks = { rank: char for rank, char in enumerate(char_list) } # char_ranks[''] = 0 # inverse_char_ranks[0] = '' return char_ranks, inverse_char_ranks
def change_field( array, start: int, stop: int, content: list, filler: bytes = b'\x00', ): """ Change the content of a .eeg file field in memory. Args: array: is the header of the .eeg file. start: is the starting index of where to change the field. stop: is the stoping index of where to change the field. content: is the content to write in the field. filler: is the filling character used in the field. """ for index in range(start, stop): if index - start < len(content): array[index] = content[index - start] else: array[index] = filler return stop - start >= len(content)
def jaccard_similarity(x,y): """ returns the jaccard similarity between two lists """ intersection_cardinality = len(set.intersection(*[set(x), set(y)])) union_cardinality = len(set.union(*[set(x), set(y)])) return intersection_cardinality/float(union_cardinality)
def create_json(sensor): """Simple function that creates a json object to return for each sensor Args as data: sensor object retrieved from MongoDB Returns: { Formatted sensor object as below } """ json_object = {'building': sensor.get('building'), 'name': sensor.get('name'), 'tags': sensor.get('tags'), 'metadata': sensor.get('metadata'), 'source_identifier': sensor.get('source_identifier'), 'source_name': sensor.get('source_name') } return json_object
def largest_word(sentence): """Determines the largest word in a sentence""" word = '' for w in sentence.split(): if len(w) > len(word): word = w return word
def generate_uniform_weights(random_workers): """ This function generates uniform weights for each stratum in random_workers :param random_workers: :return: """ strata_weights = dict() weight = 1.0 / len(list(random_workers.keys())) for stratum in random_workers: strata_weights[stratum] = weight return strata_weights
def bin_pack(items, bin_size, bins=None): """Pack items in bins with size bin_size""" bins = [] if bins is None else bins if not items: return bins item = items[0] solutions = [] for i, bin in enumerate(bins): if sum(bin) + item > bin_size: # Can't add to bin continue sbins = bins[:] sbins[i].append(item) # Add item to bin solutions.append(bin_pack(items[1:], bin_size, sbins)) # Open new bin solutions.append(bin_pack(items[1:], bin_size, bins + [[item]])) return min(solutions, key=len)
def oct_add(x1, x2): """ add tuple """ if type(x1) is tuple: if type(x2) is tuple: return (x1[0]+x2[0], x1[1]+x2[1]) else: return (x1[0]+x2, x1[1]) else: if type(x2) is tuple: return (x1+x2[0], x2[1]) else: return x1+x2