content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def unpack_tuple(f): """Return a unary function that calls `f` with its argument unpacked.""" return lambda args: f(*iter(args))
d3de9361020567d62b555d05f987741a27a92272
238,610
def time_to_mss(sectime): """ Converts a number of seconds to m:ss format :param sectime: int, a number of seconds :return: str, sectime in m:ss """ n_min = int(sectime / 60) n_sec = int(sectime % 60) if n_sec == 0: return '{0:d}:00'.format(n_min) elif n_sec < 10: return '{0:d}:0{1:d}'.format(n_min, n_sec) else: return '{0:d}:{1:d}'.format(n_min, n_sec)
86d86ef386c4f1736ec5fd3d1711155b4c613bc0
589,991
import pickle def read_sphinx_environment(pth): """ Read the sphinx environment.pickle file at path `pth`. """ with open(pth, 'rb') as fo: env = pickle.load(fo) return env
bf892fb7cc644854084f4a1d63ae052dc55d9bc5
435,046
def DeltaAngle(a, b): """ Calculates the shortest difference between two given angles given in degrees. Parameters ---------- a : float Input a b : float Input b """ return abs(a - b) % 360
ac63837ad8150bc4c2aa92e03b0f301b903517f3
312,919
import math def generate_sin_wave(sample_rate, frequency, duration, amplitude): """ Generate a sinusoidal wave based on `sample_rate`, `frequency`, `duration` and `amplitude` `frequency` in Hertz, `duration` in seconds, the values of `amplitude` must be in range [0..1] """ data = [] samples_num = int(duration * sample_rate) volume = amplitude * 32767 for n in range(samples_num): value = math.sin(2 * math.pi * n * frequency / sample_rate) data.append(int(value * volume)) return data
550f6514bbc6b0c1c3b754ad0d94ef05ef3060f3
182,533
def GetFunctionImageNames( baseName, funcNameList ): """Generate a list of FITS filenames as would be created by makeimage in "--output-functions" mode. """ nImages = len(funcNameList) imageNameList = [ "%s%d_%s.fits" % (baseName, i + 1, funcNameList[i]) for i in range(nImages) ] return imageNameList
69da24936174b029a48307e9f933e1e1d5119aa9
429,961
def iroot(a, b): """Function to calculate a-th integer root from b. Example: iroot(2, 4) == 2 Parameters: a: int Root power b: int Number to calculate root from Returns: result: int Integer a-th root of b """ if b < 2: return b a1 = a - 1 c = 1 d = (a1 * c + b // (c ** a1)) // a e = (a1 * d + b // (d ** a1)) // a while c not in (d, e): c, d, e = d, e, (a1 * e + b // (e ** a1)) // a return min(d, e)
6704b7d0b663008c5a4e9d235d90aabcc9ca063d
666,120
def get_client_browser(request): """ Returns client browser """ return request.META.get('HTTP_USER_AGENT')
9142f2a3331a8b6b8691aa932ff0d3631db00fcc
590,030
def get_volume_id_to_instance_id_map(volume_info): """Generate volume id to instance id map. Unattached volumes will be ignored.""" instance_id_to_volume_id_map = {} for volume_id in volume_info: volume = volume_info.get(volume_id) if volume.get("Attachments"): # Attached volumes ec2_instance_id = volume.get("Attachments")[0].get("InstanceId") instance_id_to_volume_id_map[volume_id] = ec2_instance_id return instance_id_to_volume_id_map
a0c1e85739a154205a85c51f5eff8602178ebd57
401,418
def setDefaultVal(myDict, name, value): """ checks if a value exists in the dictionary and returns it. if there isn't a value for that property sets it to value and. :param myDict: a dictionary we what to update :param name: the name of the property :param value: the default vale if it is not already set :return: the value wither default or from myDict """ if name in myDict: return myDict[name] else: myDict[name] = value return value
531067276170d28ba1a3496ab9d955a3d9994b60
216,444
def daemon_launch_lock_path(root_path): """ A path to a file that is lock when a daemon is launching but not yet started. This prevents multiple instances from launching. """ return root_path / "run" / "start-daemon.launching"
dec00a8a358132dc5e8dbb5084dbf118e9229ad0
52,872
def allowed_to_preview(user): """ Is the user allowed to view the preview? Users are only allowed to view the preview if they are authenticated, active and staff. :param user: A User object instance. :return: Boolean. """ if ( user.is_authenticated and user.is_active and user.is_staff ): return True return False
3f628939f79ffcb7f0802b71a9433eaa58c48946
670,251
def keyword(name=None): """Decorator to set custom keyword names to functions and methods. This decorator creates the ``robot_name`` attribute on the decorated keyword method or function. Robot Framework checks for this attribute when determining the keyword's name. library.py:: @keyword(name='Login Via User Panel') def login(username, password): # ... tests.robot:: Login Via User Panel myusername mypassword If ``name`` is not given, the actual name of the keyword will not be affected, but the ``robot_name`` attribute will still be created. This can be useful for marking methods as keywords in a dynamic library. In this usage it is possible to also omit parenthesis when using the decorator:: @keyword def func(): # ... """ if callable(name): return keyword()(name) def _method_wrapper(func): func.robot_name = name return func return _method_wrapper
c6362611589a584bbb720693e54a969a863d43f0
572,320
from typing import OrderedDict def dict_to_str(input_dict, indent=""): """Turn a dictionary of attributes/status values into a pretty-printed string for use on the command line. Recursively pretty-prints dictionaries. This function is most useful with OrderedDicts as it keeps the same printing order. """ # Find minimum width that fits all names min_width = 0 for n in input_dict: min_width = max(min_width, len(str(n))) # Build string ret = "" for n in input_dict: if type(input_dict[n]) in (dict, OrderedDict): ret += indent + str(n) + ' = ' ret += '\n' + dict_to_str(input_dict[n], indent+" ") else: ret += indent + str(n).ljust(min_width) + ' = ' ret += str(input_dict[n]) + '\n' return ret
8ce711ecbaed54d618c3ca621229be789ef9b257
537,817
def find_index(token, low, high, features): # binary search to find element index in list """ # Binary search helper method # helps finding token index # with O(log n) time whereas # np.where() will need O(n) """ if high >= low: mid = int((high + low) / 2) if features[mid] == token: return mid elif features[mid] > token: return find_index(token, low, mid - 1, features) else: return find_index(token, mid + 1, high, features) return -1
181443fa1a95419c28e357b646ecf98e00cdeeaf
687,875
def _find_or_create_output_view(window, target): """ Find or create a view in the current window for putting the lint output in. This is used when lint_output_to_view is set to True to create the view for the lint results to be displayed in. """ caption = { "package": "HyperHelpAuthor Lint: {pkg}", "single": "HyperHelpAuthor Lint: {target} ({pkg})" }.get(target.target_type, "???").format( target=target.files[0], pkg=target.pkg_info.package) for view in window.views(): if view.name().startswith("HyperHelpAuthor Lint"): view.set_name(caption) view.set_read_only(False) view.run_command("select_all") view.run_command("left_delete") return view view = window.new_file() view.set_name(caption) view.set_scratch(True) return view
a7072c7d646df20b65f2acbe190693eba66721c9
534,662
def get_pkg_version(pkg_name): """ Return package version for `pkg_name` if installed Returns ------- pkg_version : str or None Return None if package not importable. Return 'unknown' if standard ``__version__`` string not present. Otherwise return version string. """ try: pkg = __import__(pkg_name) except ImportError: return None try: return pkg.__version__ except AttributeError: return 'unknown'
255393bba3982db044c199027da6966d4590221c
325,597
def str_location(loc): """ This function takes a location from a clingo AST and transforms it into a readable format. """ begin = loc.begin end = loc.end ret = "{}:{}:{}".format(begin.filename, begin.line, begin.column) dash = True eq = begin.filename == end.filename if not eq: ret += "{}{}".format("-" if dash else ":", end.filename) dash = False eq = eq and begin.line == end.line if not eq: ret += "{}{}".format("-" if dash else ":", end.line) dash = False eq = eq and begin.column == end.column if not eq: ret += "{}{}".format("-" if dash else ":", end.column) dash = False return ret
a17d9e983ac60ee2382693c6ee2b0ef02c3b83b9
387,482
def read_results(results_filename): """Loads a GrowthRates results file. Parameters ---------- results_filename : str path to results file. Returns ------- pandas.DataFrame Table of GrowthRates results. """ with open(results_filename) as f: for line in f: if line.startswith('*'): break return [line.strip() for line in f if line and line != '*' * 57]
bc82a0fce99a7a72ff461e765547fd9483cc8214
447,165
def has_passed_all_test_cases(report): """Check that all test cases passed.""" for case in report: if case["result"] == "FAILED": return False return True
cc5e81cce2ffe6d57f77fe59afecbd6e1477fb45
313,593
import re def get_version(string): """ Retrieve the ``__version__`` attribute for Layabout. """ flags = re.S pattern = r".*__version__ = '(.*?)'" match = re.match(pattern=pattern, string=string, flags=flags) if match: return match.group(1) raise RuntimeError('No version string could be matched')
36aa18a6750fc5d17e1da13ba6768afe7b52960d
636,376
def get_cv(word, vowels, sep=None): """ Calculate the consonant ("C") and vowel ("V") structure of the given word. Returns a string of the characters "C" and "V" corresponding to the characters in the word. *vowels* -- A list of the characters representing vowels. *sep* -- String used to separate phonemes (if the words are phonological forms). To separate into individual characters, set to `None` (default). """ wsplit = list(word) if not sep else word.split(sep) pattern = ["C" if char not in vowels else "V" for char in wsplit] return "".join(pattern)
fbfe1c11e2b21f51fcd95bff454edd295e502314
63,694
import decimal def euro_to_cent(amount): """ Rechnet einen Cent-Betrag nach Euro um, inklusive Rundung Es wird mit ROUND_HALF_DOWN gerundet. >>> euro_to_cent(1) Decimal('100') >>> euro_to_cent('0.5') Decimal('50') >>> euro_to_cent('0.01') Decimal('1') >>> euro_to_cent('0.001') Decimal('0') """ value = decimal.Decimal(amount) * 100 return value.quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_DOWN)
c888cf7cf9b8bc827f3bdeb069d1e99aa2d16a9b
558,424
def makeMove(column, board, myTurn): """ Input: column (int), whose turn (boolean), and the board (matrix) What: insert your coin in the column you chose How: find the first empty (0) spot in the column and replace with your number Output: the new board >>> makeMove(1, board, True) [[0, 2, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]] """ # adapt the number of rows to the game rows=board.shape[0] # check if it is my turn if myTurn == True: #bot = player 2 coin = 2 else: #human = player 1 coin = 1 board_temp = board.copy() for row in reversed(range(rows)): if board_temp[row, column] == 0: board_temp[row, column] = coin return board_temp
b6a520067d9e276111abd0cec9715adfba0fd173
495,243
def get_variables (phrase): """Get all variables from a phrase.""" for char in '()&~|<>': phrase = phrase.replace(char,' ') phrase = phrase.split(' ') phrase = list(set([x for x in phrase if x and x!='!allnotes!'])) return phrase
fdb37313f4a5e2ca5d91ff2f4c6310f97f0548dc
590,010
def get_polysemy_class(num_senses): """ Converts the number of senses (integer) in several ranges """ polysemy_class = None if num_senses == 0: polysemy_class = '0' elif num_senses == 1: polysemy_class = 'mono' elif num_senses < 5: polysemy_class = 'low_polysemy' elif num_senses < 15: polysemy_class = 'medium_polysemy' elif num_senses < 30: polysemy_class = 'high_polysemy' else: polysemy_class = 'very_high_polysemy' return polysemy_class
53f12a5054f8b37c0846332fd1f7f158ef030e5e
458,099
def count_measurements(report_uuid: str, database) -> int: """Return the number of measurements.""" return database.measurements.count_documents(filter={"report_uuid": report_uuid})
c9f20149ec975134ad7f0abab28a2c632f5295a1
697,115
def euler_bernoulli_beam(x, y, EI, f): """Euler-Bernoulli Beam Theory defining y'''' for Lu=f. This form is used because it is expected for Scipy's solve_ivp method. Keyword arguments: x -- independent variable y -- dependent variable EI -- EI(x) parameter f -- forcing function f(x) """ return [y[1], y[2], y[3], -1*f/EI]
461abaf13705ca835f036648d9252827401ffd12
155,262
def _pop(items: list[str], from_: dict) -> tuple: """Pop a value(s) off a dictionary and return them as a tuple. For example the following pop two of the values from a dictionary, key'd 'key1' and 'key2': >>> key1, key2 = _pop( items=['key1', 'key2'], from_={'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} ) >>> ('value1', 'value2') :param items: A list of keys to pop off `from_`. :type items: list[str] :param from_: The dictionary to pop items off. :type from_: dict :return: A tuple of the values of the popped dictionary keys. :rtype: tuple """ return tuple(from_.pop(key) for key in items)
b0fed3dde64006c6b39668c23f41d9d7a50bb9a4
183,237
import binascii def bin2base64(bin_str): """ Convert bytes to base64 """ return binascii.b2a_base64(bin_str)
0482412894d339b97517bf100dc159084f482238
63,774
def get_all_layers(layer): """ This function gathers all layers below one or more given :class:`Layer` instances, including the given layer(s). Its main use is to collect all layers of a network just given the output layer(s). :usage: >>> from lasagne.layers import InputLayer, DenseLayer >>> l_in = InputLayer((100, 20)) >>> l1 = DenseLayer(l_in, num_units=50) >>> all_layers = get_all_layers(l1) >>> all_layers == [l1, l_in] True >>> l2 = DenseLayer(l_in, num_units=10) >>> all_layers = get_all_layers([l2, l1]) >>> all_layers == [l2, l1, l_in] True :parameters: - layer : Layer the :class:`Layer` instance for which to gather all layers feeding into it, or a list of :class:`Layer` instances. :returns: - layers : list a list of :class:`Layer` instances feeding into the given instance(s) either directly or indirectly, and the given instance(s) themselves. """ if isinstance(layer, (list, tuple)): layers = list(layer) else: layers = [layer] layers_to_expand = list(layers) while len(layers_to_expand) > 0: current_layer = layers_to_expand.pop(0) children = [] if hasattr(current_layer, 'input_layers'): children = current_layer.input_layers elif hasattr(current_layer, 'input_layer'): children = [current_layer.input_layer] # filter the layers that have already been visited, and remove None # elements (for layers without incoming layers) children = [child for child in children if child not in layers and child is not None] layers_to_expand.extend(children) layers.extend(children) return layers
5864720fdd6131e0206d03b0b54a57732dfa175c
118,540
import json def to_javascript(obj): """For when you want to inject an object into a <script> tag. """ return json.dumps(obj).replace('</', '<\\/')
2fba6a30eb19fd0b8fcc4295c3994ad6fa82b02f
25,324
from typing import Union import shlex def parse_code_package_config(config) -> Union[dict, bool]: """ Parse the code package options from a AWSBatchExecutor config. """ if not config.getboolean("code_package", fallback=True): return False include_config = config.get("code_includes", "**/*.py") exclude_config = config.get("code_excludes", "") return {"includes": shlex.split(include_config), "excludes": shlex.split(exclude_config)}
edfd952f9341d8a31e401559185f4c7876aa76e0
476,233
def reactions_to_user_lists(reaction_list): """Convert list of reactions from GitHub API into list of usernames for upvote and downvote""" upvotes = [] downvotes = [] for r in reaction_list: if r["content"] not in ["+1", "-1"] or r["user"]["type"] != "User": continue username = r["user"]["login"] if r["content"] == "+1": upvotes.append(username) elif r["content"] == "-1": downvotes.append(username) upvotes.sort() downvotes.sort() return upvotes, downvotes
6f3355b92ddb230028e5b81a8c3dd4987ff4f235
138,725
def sort_uniq(l: list) -> list: """ 1.1 Remove the duplicates in a sorted list """ if len(l) == 0: return l i = 1 rst = [l[0]] while i != len(l): if l[i - 1] != l[i]: rst.append(l[i]) i += 1 return rst
7daacf3544346c9dd0d473db4f10511d1bce1066
197,939
def check_filters(filters): """Checks that the filters are valid :param filters: A string of filters :returns: Nothing, but can modify ``filters`` in place, and raises ``ValueError``s if the filters are badly formatted. This functions conducts minimal parsing, to make sure that the relationship exists, and that the filter is generally well formed. The ``filters`` string is modified in place if it contains space. """ if not isinstance(filters, str): raise TypeError("filters must be a string") # We replace all spaces with %20 filters.replace(' ', '%20') # These steps will check that the filters are correct REL = ['startswith', 'endswith', 'exact', 'contains', 'range', 'gt', 'lt', 'gte', 'lte', 'in'] filters = filters.split('&') for fi in filters: if not '=' in fi: raise ValueError("Filter "+fi+" is invalid (no =)") if not '__' in fi: raise ValueError("Filter "+fi+" is invalid (no __)") splitted_filter = fi.split('=') match = splitted_filter[0] target = splitted_filter[1] request = match.split('__') relationship = request[len(request)-1] if not relationship in REL: raise ValueError("Filter "+fi+" is invalid ("+ relationship +" is not a valid relationship)") if len(filters) == 1 : return filters else : return '&'.join(filters)
724bfbce98c734fe253bb2e77100fb8588f6a3da
667,304
import re def _get_summary(gh_issue): """ Return the JIRA summary corresponding to a given GitHub issue Format is: GH/PR #<gh issue number>: <github title without any JIRA slug> """ is_pr = "pull_request" in gh_issue result = "%s #%d: %s" % ("PR" if is_pr else "GH", gh_issue["number"], gh_issue["title"]) # don't mirror any existing JIRA slug-like pattern from GH title to JIRA summary # (note we don't look for a particular pattern as the JIRA issue may have moved) result = re.sub(r" \([\w]+-[\d]+\)", "", result) return result
0f0bd2286cfb9f982505157ea34e87fed01efee1
461,047
import requests import re def check_tfchain_synced(prefab, height_threeshold=10): """ Check if the tfchain daemon is synced with the offical testnet @param prefab: prefab of the TFT node @param height_threeshold: The max difference between the testnet explorer and the local node to be considered in sync """ testnet_explorer = 'https://explorer.testnet.threefoldtoken.com/explorer' res = requests.get(testnet_explorer) if res.status_code == 200: expected_height = res.json()['height'] _, out, err = prefab.core.run(cmd='tfchainc', showout=False) out = '{}\n{}'.format(out, err) match = re.search('^Synced:\s+(?P<synced>\w+)\n*.*\n*Height:\s*(?P<height>\d+)', out) if match: match_info = match.groupdict() if match_info['synced'] == 'Yes' and expected_height - int(match_info['height']) <= height_threeshold: return True return False
64171c78e2a6dff99de691a37635cbf1db5b2fa7
378,550
def calc_death_rate(confirmed, deaths): """ Calculates the daily death rate in confirmed cases. :param confirmed: DataFrame of confirmed cases :param deaths: DataFrame of deaths :return: DataFrame of daily death rate """ death_rate = (deaths / confirmed) * 100 death_rate = death_rate.fillna(0.0) return death_rate
61f6e445d52b4495c3d07cf115692620c047fc75
659,405
def _(arg): """Converts numpy floats to python floats""" return float(arg)
2da3c058bda920529ba14f2ad729388f95b90216
134,749
from typing import Dict from typing import Any from typing import Union def get_mineral_name(mineral_dict: Dict[str, Any]) -> Union[str, None]: """Get the mineral name from a mineral dictionary. Args: mineral_dict: The mineral dictionary from the condensed description. Returns: If ``mineral_dict["type"]`` is set, the mineral name will be returned as a string, else ``None`` will be returned. """ if mineral_dict['type']: if not mineral_dict['n_species_type_match']: suffix = "-derived" elif mineral_dict['distance'] >= 0: suffix = "-like" else: suffix = "" return "{}{}".format(mineral_dict['type'], suffix) else: return None
98ebf7efa162bc68867c40483e01ad1eb8793715
352,939
def make_review(restaurant_name, rating): """Return a review data abstraction.""" return [restaurant_name, rating]
b1807917cf3ea790fb49617a5dd7a968353c22e2
500,486
import pickle def load(filename): """ Loads an item from filename. Returns the object that was saved in filename. Throws IOError if File DNE. Throws PickleError is file cannot be opened via pickle. """ obj = None try: with open(filename, 'rb') as reader: obj = pickle.load(reader) except IOError: raise Exception("Exception while reading the file %s." % filename) except pickle.PickleError: raise Exception("Exception while loading pickle.") return obj
ce44596732ff73d024f80694e9e5b21ac9f278f0
552,116
def is_custom_session(session): """Return if a ClientSession was created by pyatv.""" return hasattr(session, '_pyatv')
741057221f80f0285b8b744e635d419dd9a0a38b
666,663
import gzip def open_file_for_reading(filename): """Open the file gunzipping it if it ends with .gz.""" if filename.lower().endswith(".gz"): return gzip.open(filename, "rb") else: return open(filename, "rb")
55036422c55de278bf94fea13074bbe336f96489
118,686
def select_and_rename(self, name_dict=None, **names): """ Select and rename variables from this Dataset Parameters ---------- name_dict, **names: dict The keys or keyword arguments give the current names of the variables that will be selected out of this Dataset. The values give the new names of the same variables in the resulting Dataset. Returns ------- Dataset """ if name_dict is None: name_dict = names else: name_dict.update(names) return self[list(name_dict.keys())].rename(name_dict)
c7f33b1613b5037524aae0faf1a0af764e05a89a
558,515
def sklearn_model_2_file_name(result_dir): """Model file name for trained sklearn model""" return '{0}/model_2.pkl'.format(result_dir)
4b397ef9d0169d4eb818e3a214849e10821fbcf6
334,315
import json import six def _get_manifest_body(context, prefix, path2info, put_headers): """ Returns body for manifest file and modifies put_headers. path2info is a dict like {"path": (size, etag)} """ if context.static_segments: body = json.dumps([ {'path': '/' + p, 'size_bytes': s, 'etag': e} for p, (s, e) in sorted(six.iteritems(path2info)) ]) put_headers['content-length'] = str(len(body)) context.query['multipart-manifest'] = 'put' else: body = '' put_headers['content-length'] = '0' put_headers['x-object-manifest'] = prefix return body
20e9a3d038d135e59d2bfa6767f3260e81c0a534
383,046
def flatten_results(results: dict, derivation_config: dict) -> dict: """Flatten and simplify the results dict into <metric>:<result> format. Args: results (dict): The benchmark results dict, containing all info duch as reduction types too derivation_config (dict): The configuration defining how metrics are derived from logs Returns: flat_results (dict): The flattened dict of all results in <metric>:<result> format """ flat_results = {} for metric, results_dict in results.items(): key = derivation_config.get(metric, {}).get("reduction_type", "mean") flat_results[metric] = results_dict[key] return flat_results
045c1e1d856c20b37d2d78d8dd6c3fd76cb91777
20,862
def comma_code(collection): """ Take an iterable collection and returns the collection as a string formatted using the Oxford comma :param collection: collection of values to convert to readable string """ if len(collection) == 0: return '' elif len(collection) == 1: return str(collection[0]) # list comprehension used to explicitly cast items to str str() converts # slice to list of chars, this implementation allows for a mixed list return f"{', '.join([str(item) for item in collection[:-1]])}, and {collection[-1]}"
5953b6dbe09be8a5b9cb8f3d4e288f9160fdecb5
464,974
import re def get_title(passenger: str) -> str: """Extracts the title (Mr, Ms, etc) from the name variable.""" line = passenger if re.search("Mrs", line): return "Mrs" elif re.search("Mr", line): return "Mr" elif re.search("Miss", line): return "Miss" elif re.search("Master", line): return "Master" else: return "Other"
8928d546e8cdc01a8a9330435c4e22ac0ecb8970
574,861
def population_filter(neighbors, min_population): """Drop key-index pairs with insufficient populations. Parameters ---------- neighbors : dict Key-index mappings. min_population : int Minimum number of items to keep, inclusive. Returns ------- filt_neighbors : dict Population-filtered key-index mappings. """ nbs = {} keys = list(neighbors.keys()) for k in keys: if len(neighbors[k]) >= min_population: nbs[k] = neighbors[k] return nbs
b03803124164b2b65248c21e6426c0be4bfee7d4
287,393
import string import random def random_id(length=4): """ Generate a case sensitive random string. """ symbols = string.ascii_letters + string.digits return ''.join(random.choice(symbols) for _ in range(length))
83566515661e768087e74c75fd7e5954d9b7c023
396,134
def uuid_prefix_len(uuids, step=4, maxlen=32): """Get smallest multiple of `step` len prefix that gives unique values. The algorithm is not fancy, but good enough: build *sets* of the ids at increasing prefix lengths until the set has all ids (no duplicates). Experimentally this takes ~.1ms for 1000 duplicate ids (the worst case). """ full = set(uuids) all_of_them = len(full) for n in range(step, maxlen, step): prefixes = {u[:n] for u in uuids} if len(prefixes) == all_of_them: return n return maxlen
bea7e69cbd8cfa8b900df4475b0c60dc3874024b
378,562
import yaml def extract_dict(yaml_filename, validate_only=False): """ Args: yaml_filename: A YAML file. validate_only: If True, returns only a boolean validation flag. Returns: The nested dictionary represented by the YAML source. (Unless validate_only is True). """ data = None with open(yaml_filename, 'rt') as file: try: data = yaml.load(file, Loader=yaml.FullLoader) if validate_only and (not data is None): return True except Exception as e: if validate_only: return False return data
c435df7f826b96fd29407441e8a84cb0eb76f8e5
425,661
import math def munsell_value_mccamy1987(Y): """ Returns the *Munsell* value :math:`V` of given *luminance* :math:`Y` using *McCamy (1987)* method. Parameters ---------- Y : numeric *luminance* :math:`Y`. Returns ------- numeric *Munsell* value :math:`V`. Notes ----- - Input *Y* is in domain [0, 100]. - Output *V* is in domain [0, 10]. References ---------- .. [8] `Standard Test Method for Specifying Color by the Munsell System - ASTM-D1535-1989 <https://law.resource.org/pub/us/cfr/ibr/003/astm.d1535.1989.pdf>`_, # noqa DOI: http://dx.doi.org/10.1520/D1535-13 Examples -------- >>> munsell_value_mccamy1987(10.08) # doctest: +ELLIPSIS 3.7347235... """ if Y <= 0.9: V = 0.87445 * (Y ** 0.9967) else: V = (2.49268 * (Y ** (1 / 3)) - 1.5614 - (0.985 / (((0.1073 * Y - 3.084) ** 2) + 7.54)) + (0.0133 / (Y ** 2.3)) + 0.0084 * math.sin(4.1 * (Y ** (1 / 3)) + 1) + (0.0221 / Y) * math.sin(0.39 * (Y - 2)) - (0.0037 / (0.44 * Y)) * math.sin(1.28 * (Y - 0.53))) return V
233e6e9a70699e40e23697457803f6af5c168b83
403,063
def _create_json(name, description, type, searchBaseDN, searchFilter, searchTimeout, serverConnection, cacheSize, cacheLifetime, attributeName, attributeSelector): """ Create a JSON to be used for the REST API call """ json = { "name": name, "description": description, "type": "LDAP", "properties": [ {"key": "searchBaseDN", "value": searchBaseDN, "datatype": "String", "sensitive": False, "readOnly": False}, {"key": "searchFilter", "value": searchFilter, "datatype": "String", "sensitive": False, "readOnly": False}, {"key": "searchTimeout", "value": searchTimeout, "datatype": "Integer", "sensitive": False, "readOnly": False}, {"key": "dataSource", "value": serverConnection, "datatype": "String", "sensitive": False, "readOnly": False}, {"key": "cacheSize", "value": cacheSize, "datatype": "Integer", "sensitive": False, "readOnly": False}, {"key": "cacheLifetime", "value": cacheLifetime, "datatype": "Integer", "sensitive": False, "readOnly": False} ], "attributes": [ {"name": attributeName, "selector": attributeSelector} ] } return json
3474567527055afc6863cfc335e221e0fb103e37
330,463
import logging def add_file_log_handler(formatter, log_file, level=logging.INFO): """Creates and returns a handler that streams to the file at the logging level specified""" fh = logging.FileHandler(log_file, mode='w') fh.setFormatter(formatter) fh.setLevel(level) return fh
a1718809800a25705a0b36faa05a91454dbb10a3
502,067
def pump(type_): """ Feed the generator into *type_* til exhaustion. Can be overridden by setting exhaust=False. """ def _pump(f): def fx(*args, **kwargs): exhaust_gen = kwargs.pop("exhaust", True) gen = f(*args, **kwargs) if exhaust_gen: return type_(gen) return gen return fx return _pump
07a0247c49ace070106395a96b90870f9957275e
524,634
def make_index(it): """Create a dictionary mapping elements of an iterable to the index position of that element """ return {b : a for a, b in enumerate(it)}
38efe39f69cc026aafa40107deff944c33afb8e1
141,543
import re def pad_numbers(string): """Modify a string to make its numbers suitable for natural sorting.""" return re.sub(r'\d+', lambda m: m.group(0).rjust(16, '0'), string)
6c19c9d2691de87600d334bb42d39f95fbcf8669
320,920
from typing import List from typing import Any def read_keyword_list(keywords: List[Any], list_length: int) -> List[Any]: """Reads a number of tokens from a keyword list and returns as separate list""" newlist = [] #Uses a loop like this to ensure the original list is modified when removing keywords, rather than a just this reference to the list updating for _ in range(list_length): newlist.append(keywords.pop(0)) return newlist
d9250fc3960cce0491a7b3d1861d0d116ec31ceb
642,690
def update_smag_metadata(col_name): """Update SuperMAG metadata Parameters ----------- col_name : (str) Data column name Returns -------- col_dict : (dict) Dictionary of strings detailing the units and long-form name of the data """ smag_units = {'IAGA': 'none', 'N': 'nT', 'E': 'nT', 'Z': 'nT', 'MLT': 'hours', 'MLAT': 'degrees', 'SZA': 'degrees', 'IGRF_DECL': 'degrees', 'SMU': 'none', 'SML': 'none', 'datetime': 'YYYY-MM-DD HH:MM:SS', 'GEOLON': 'degrees', 'GEOLAT': 'degrees', 'AACGMLON': 'degrees', 'AACGMLAT': 'degrees', 'STATION_NAME': 'none', 'OPERATOR_NUM': 'none', 'OPERATORS': 'none'} smag_name = {'IAGA': 'Station Code', 'N': 'B along local magnetic North', 'E': 'B along local magnetic East', 'Z': 'B vertically downward', 'MLT': 'Magnetic Local Time', 'MLAT': 'Magnetic Latitude', 'SZA': 'Solar Zenith Angle', 'IGRF_DECL': 'IGRF magnetic declination', 'SMU': ' '.join(['Maximum eastward auroral electrojets', 'strength.\nUpper envelope of N-component', 'for stations between 40 and 80 degrees' 'magnetic north.']), 'SML': ' '.join(['Maximum westward auroral electrojets', 'strength.\nLower envelope of N-component', 'for stations between 40 and 80 degrees', 'magnetic north.']), 'datetime': 'UT date and time', 'GEOLON': 'geographic longitude', 'GEOLAT': 'geographic latitude', 'AACGMLON': ' '.join(['Altitude-Adjusted Corrected', 'Geomagnetic longitude']), 'AACGMLAT': ' '.join(['Altitude-Adjusted Corrected', 'Geomagnetic latitude']), 'STATION_NAME': 'Long form station name', 'OPERATOR_NUM': 'Number of station operators', 'OPERATORS': 'Station operator name(s)', } ackn = "When using this data please include the following reference:\n" ackn += "Gjerloev, J. W., The SuperMAG data processing technique, " ackn += "Geophys. Res., 117, A09213, doi:10.1029/2012JA017683, 2012\n\n" ackn += "For publications and presentations, please include the following" ackn += "acknowledgement:\nFor the ground magnetometer data we gratefully " ackn += "acknowledge: Intermagnet; USGS, Jeffrey J. Love; CARISMA, PI Ian " ackn += "Mann; CANMOS; The S-RAMP Database, PI K. Yumoto and Dr. K. " ackn += "Shiokawa; The SPIDR database; AARI, PI Oleg Troshichev; The " ackn += "MACCS program, PI M. Engebretson, Geomagnetism Unit of the " ackn += "Geological Survey of Canada; GIMA; MEASURE, UCLA IGPP and Florida" ackn += " Institute of Technology; SAMBA, PI Eftyhia Zesta; 210 Chain, PI " ackn += "K. Yumoto; SAMNET, PI Farideh Honary; The institutes who maintain" ackn += " the IMAGE magnetometer array, PI Eija Tanskanen; PENGUIN; " ackn += "AUTUMN, PI Martin Connors; DTU Space, PI Dr. Rico Behlke; South " ackn += "Pole and McMurdo Magnetometer, PI's Louis J. Lanzarotti and Alan " ackn += "T. Weatherwax; ICESTAR; RAPIDMAG; PENGUIn; British Artarctic " ackn += "Survey; McMac, PI Dr. Peter Chi; BGS, PI Dr. Susan Macmillan; " ackn += "Pushkov Institute of Terrestrial Magnetism, Ionosphere and Radio " ackn += "Wave Propagation (IZMIRAN); GFZ, PI Dr. Juergen Matzka; MFGI, PI " ackn += "B. Heilig; IGFPAS, PI J. Reda; University of L’Aquila, PI M. " ackn += "Vellante; BCMT, V. Lesur and A. Chambodut; Data obtained in " ackn += "cooperation with Geoscience Australia, PI Marina Costelloe; " ackn += "SuperMAG, PI Jesper W. Gjerloev." col_dict = {'units': smag_units[col_name], 'long_name': smag_name[col_name], 'acknowledgements': ackn} return col_dict
8d39c0e1f9a719b4e80d72a612fc351be7cfe4d4
355,927
def transpose_pairs(tuple_list): """ For each tuple pair in the list, transpose. i.e. (a,b) => (b,a) """ a,b = [list(x) for x in zip(*tuple_list)] return list(zip(b,a))
e7dd1dbfa8e1c73e84a88dbec44b08e86ff6c538
590,369
from typing import Union def val_mb(valstr: Union[int, str]) -> str: """ Converts a value in bytes (in string format) to megabytes. """ try: return "{:.3f}".format(int(valstr) / (1024 * 1024)) except (TypeError, ValueError): return '?'
b4dfd67876e696e46727d1b66ca6f41cf9bf3829
231,738
def alpha_to_index(char): """Takes a single character and converts it to a number where A=0""" translator = { "A": 0, "B": 1, "C": 2, "D": 3, "E": 4, "F": 5, "G": 6, "H": 7, "I": 8, "J": 9, "K": 10, "L": 11, "M": 12, "N": 13, "O": 14, "P": 15, "Q": 16, "R": 17, "S": 18, "T": 19, "U": 20, "V": 21, "W": 22, "X": 23, "Y": 24, "Z": 25, } return translator[char.upper()]
83bcc8f6ed93538ad90fe72cdf51e462f6b3c77e
393,808
from typing import Union from typing import List from typing import Dict import time import json import requests def download(url: str) -> Union[List, Dict]: """ Download data from SRC API, with throttling """ print('[tools.py::download] Fetching', url) time.sleep(1) headers = {'user-agent': 'akleemans-gameboy-wr-bot/2.0'} content = json.loads(requests.get(url, headers=headers).text) data = content['data'] return data
c2ec21c2cd3e9f33ab113c9083ba394b4f018eb7
94,298
def get_max_sheet(book): """ Get the sheet in the workbook with the most rows of data :param book: An xlrd book object :return: Returns an xlrd sheet object """ row_counts = [s.nrows for s in book.sheets()] # list of row counts for each sheet max_sheet_index = row_counts.index(max(row_counts)) # find list index for largest sheet sheet = book.sheet_by_index(max_sheet_index) # return the sheet with the greatest number of rows return sheet
ceb6a7b5b4a8592a22f9a707e1f098504069f98f
214,150
import torch def pr(x, y): """ Metrics calculation from: https://en.wikipedia.org/wiki/Confusion_matrix Returns precision, recall, specificity and f1 (in that order) """ tp = ((x == y) * (x == 1)).sum().to(torch.float32) tn = ((x == y) * (x == 0)).sum().to(torch.float32) fp = ((x != y) * (x == 1)).sum().to(torch.float32) fn = ((x != y) * (x == 0)).sum().to(torch.float32) pr = tp / (tp + fp) rc = tp / (tp + fn) sp = tn / (tn + fp) f1 = (2 * tp) / (2 * tp + fp + fn) return pr, rc, sp, f1
b2095585e3283b8c301c992ea241158c612b4d3b
27,719
def load_statistics(log_dir, statistics_file_name): """ Loads the statistics in a dictionary. :param log_dir: The directory in which the log is saved :param statistics_file_name: The name of the statistics file :return: A dict with the statistics """ data_dict = dict() with open("{}/{}.csv".format(log_dir, statistics_file_name), 'r') as f: lines = f.readlines() data_labels = lines[0].replace("\n", "").replace("\r", "").split(",") del lines[0] for label in data_labels: data_dict[label] = [] for line in lines: data = line.replace("\n", "").replace("\r", "").split(",") for key, item in zip(data_labels, data): if item not in data_labels: data_dict[key].append(item) return data_dict
df1a51db9112d0aa65ff9a2b41c2d2420bb2b353
300,291
def vertical_flip(img): """Flip the image along the vertical axis.""" return img[:, :, ::-1]
d04f2175b106cd036cf5eb6701779782de8f1c8b
431,000
def query_dict(data, query_string, ret_default=None): """ Drill down into a dict using dot notation, e.g. query_dict({'this': {'that': 'other'}}, 'this.that'}). :param dict data: :param str query_string: :param ret_default: """ _data = data.copy() for q in query_string.split('.'): d = _data.get(q) if d is not None: _data = d else: return ret_default return _data
d4408c22e62372f0203579b07890f980b2bbe880
550,927
def raman_and_roa(values, *args, **kwargs): """Returns values as passed, ignoring any other args and kwargs. Introduced for consistency of intensities calculation. Parameters ---------- values: numpy.ndarray Returns ------- numpy.ndarray Values passed to this function.""" return values
252bdce9e302e1d6c61575e74839867191c77db1
393,157
def hash_matches(doc, hash): """Check whether the hash of the passed doc matches the passed hash """ return doc["_input_hash"] == hash
f8068a06d1e4162bb07a19e6e8d6276420c74d28
413,352
import copy def poly_diff(poly): """ Differentiate a polynomial. """ newlist = copy.deepcopy(poly) for term in newlist: term[0] *= term[1] term[1] -= 1 return newlist
79b0319edb1fda28110b643bc79d4772740371f5
467,386
def _nested_multiquestion_required(question): """Returns a list of required nested properties for the multiquestion. `.required_form_fields` returns the top-level schema property name for the nested multiquestion, since that's the part that's influenced by the question's `optional` flag and it returns the key we need to add to the top-level schema's 'required' list. Nested properties are always required if the nested question is mandatory, even when multiquestion itself is optional, since they're part of the nested schema. """ required = [] followups = [] # Followup questions don't need to be in the 'required' properties list since even # non-optional questions don't always have to be present if they're a followup. # Both the question itself and the followups are covered by the oneOf subschemas. for nested_question in question['questions']: required.extend(nested_question.required_form_fields) if nested_question.get('followup'): followups.extend(nested_question['followup'].keys()) return sorted(set(required) - set(followups))
3e128c8637e6514e500d94794e2c76cd9068cba9
195,955
def run_program(program: list) -> int: """Execute an Intcode program :param program: Intcode program :return: Output of the Intcode program """ for i in range(len(program) // 4 + 1): opcode = program[4 * i] if opcode == 99: break pos1, pos2, pos3 = program[(4 * i + 1):(4 * i + 4)] if opcode == 1: program[pos3] = program[pos1] + program[pos2] elif opcode == 2: program[pos3] = program[pos1] * program[pos2] else: raise ValueError('Invalid opcode!') return program[0]
7aa950fcecf6cac1115092c8c2e74888cd711108
559,938
import io def image_to_bytes(image): """ Converts PIL image to bytes :param image: PIL.Image :return: bytes """ bytes_buffer = io.BytesIO() image.save(bytes_buffer, format='PNG') return bytes_buffer.getvalue()
b142fa512b3e7b7719772553b79eee01b33f40d1
72,706
def PyDateTime_TIME_GET_FOLD(space, w_obj): """Return the fold, either 0 or 1 """ return space.int_w(space.getattr(w_obj, space.newtext("fold")))
aa46ad9e6163d7ade3da3269913b34fc0e419962
593,545
def update_dict_params_for_calibration(params): """ Update some specific parameters that are stored in a dictionary but are updated during calibration. For example, we may want to update params['default']['compartment_periods']['incubation'] using the parameter ['default']['compartment_periods_incubation'] :param params: dict contains the model parameters :return: the updated dictionary """ if "n_imported_cases_final" in params: params["data"]["n_imported_cases"][-1] = params["n_imported_cases_final"] for location in ["school", "work", "home", "other_locations"]: if "npi_effectiveness_" + location in params: params["npi_effectiveness"][location] = params["npi_effectiveness_" + location] for comp_type in [ "incubation", "infectious", "late", "hospital_early", "hospital_late", "icu_early", "icu_late", ]: if "compartment_periods_" + comp_type in params: params["compartment_periods"][comp_type] = params["compartment_periods_" + comp_type] return params
8aaf9cb030076adfddb7c8d5740a2c8cc5c21c06
8,851
def choices_help_text(choices): """Return a clear text for choices. Args: choices: list of tuples (value, label) Returns: a text in the form: enum1: label1 enum2: label2 """ choises_str = ('{enum}: {label}'.format(enum=choice[0], label=choice[1]) for choice in choices) return '\n'.join(choises_str)
67fd67f9e2af7d9ad96de4cea057ac2f992539b7
638,602
def ToHex(val): """Convert an integer value (or None) to a string Returns: hex value, or 'None' if the value is None """ return 'None' if val is None else '%#x' % val
c85c90f8d3b67e06acc54681dc74bf6240682c91
273,863
def load_vocabulary(fn): """Read the file fn and return a set containing all the words of the vocabulary.""" vocabulary = set() with open(fn) as f: for line in f: vocabulary.add(line.strip()) return vocabulary
45d7938177dbb3d0770e0d494cf5224a15f74c64
96,521
def inc(x): """Increments its argument""" return x + 1
23b0c854dc3307fe6411174e9b9da39b0f0988d3
58,440
def _dfs_in(atom): """Traverse an Atom's incoming neighborhood iteratively with a depth-first search.""" atoms = [atom] stack = list(atom.incoming) while stack: atom = stack.pop(0) atoms.append(atom) stack[0:0] = atom.incoming return atoms
8e3557def098bdedea2507f298d781a3b51f7d3b
98,031
def get_camelcase_name_chunks(name): """ Given a name, get its parts. E.g: maxCount -> ["max", "count"] """ out = [] out_str = "" for c in name: if c.isupper(): if out_str: out.append(out_str) out_str = c.lower() else: out_str += c out.append(out_str) return out
134a8b1d98af35f185b37c999fbf499d18bf76c5
709,760
def globs(test): """ Return the globals for *test*, which can be a `doctest` or a regular `unittest.TestCase`. """ try: return test.globs except AttributeError: return test.__dict__
ada66843770fe53f3a6880440b39121bf93f437a
505,272
from typing import Any def overlaps(lhs: Any, rhs: Any) -> bool: """Indicates whether two time-spans/segments are overlapping or not.""" return lhs.start < rhs.end and rhs.start < lhs.end
a2b8e2ee71794c6627305118c65fe2de4dc141d1
130,783
from typing import List def preprocess_list_query(queries: List[str]): """Preprocesses a list of provided conditions into what the REST api expects (comma separated string)""" # Note: could use resolve_multiple_args but the same comprehension works for tuples return ",".join([q.strip() for q in queries])
f80b86fd6c046f2557fffc859e81d1ea869c5bb5
116,702
def get_name(s, isconj=False): """Parse variable names of form 'var_' as 'var' + conjugation.""" if not type(s) is str: if isconj: return str(s), False else: return str(s) if isconj: return s.rstrip("_"), s.endswith("_") # tag names ending in '_' for conj else: return s.rstrip("_")
9648a8b206a107f91784bd2caea1f7b9d312c5e3
668,400
def as_path(*args: str) -> str: """Converts strings to lowercase path-like string Take variable order of string inputs :param args: variable-length of strings :return: string """ strings = [] for arg in args: if arg is None: continue strings.append(str(arg).strip().lower()) return "/".join(strings)
b58d0b06f57ec93549431c23be00498aaf3bf5e4
614,334
import html def unescape_html(s: str): """ >>> unescape_html("&lt") '<' """ return html.unescape(s)
9fcdcf54ad09674ae482c081ab65a74dade1864d
237,318
def distance(x1, x2): """Find distance between two points (made for use with numba) Parameters ---------- x1 : float 3D position of first point of the form [x,y,z] x2 : float 3D position of first point of the form [x,y,z] Returns ------- distance : float distance between the two points History ------- 2019 - Written - Webb (UofT) """ dx = x2[0] - x1[0] dy = x2[1] - x1[1] dz = x2[2] - x1[2] r = (dx * dx + dy * dy + dz * dz) ** 0.5 return r
1596319d6929cd76e805c9fcd73dbaf26d35f763
445,155
def is_lambda(function): """ Checking function to be a lambda function. :param function: function to be checked. :returns: True when given function is a lambda function. >>> square = lambda value: value**2 >>> is_lambda(square) True >>> def square2(value): return value**2 >>> is_lambda(square2) False >>> is_lambda("hello world") False """ LAMBDA = lambda: 0 return isinstance(function, type(LAMBDA)) and function.__name__ == "<lambda>"
171d337c4827111a8d2ef4c678ae96ccf9b6360b
461,697
def links_for_distillations(num_distillations): """ Returns the number of links needed to perform num_distillations distillation operations :param num_distillations: type int The number of distillations to perform :return: type int The number of links required """ return num_distillations + 1
fd219d67cc9a2f7df5869c90ef8f7dc334accd69
324,647
def cast_contours(contours, x, y): """ Convenience function to cast contour(s) to a given x,y. Parameters ---------- contours : list or ndarray Contour(s) to cast. x : int x position to cast to. y : int y position to cast to. Returns ------- contours : list of ndarray Contours casted to new coordinates. See Also -------- find_contours draw_contours """ if isinstance(contours, list): for i, c in enumerate(contours): c[:, 0, 0] += x c[:, 0, 1] += y contours[i] = c else: contours[:, 0, 0] += x contours[:, 0, 1] += y return contours
cba16109552fb857211671f52908305c93bc8667
242,048
import requests def exec_request(url, timeout): """Executes request given in url and returns a dictionary with content""" data = requests.get(url, timeout=timeout) data.raise_for_status() # Raise in case of failed status return data.json()
63522420fe36e0e38c41c42b43a65208354bee1e
671,270
from typing import Dict def prepare_top_level_args(d: Dict) -> Dict: """ Parse top-level configuration. Parameters ---------- d: Dict The configuration section from the transform/merge YAML Returns ------- Dict A parsed dictionary with parameters from configuration """ args = {} if 'checkpoint' in d and d['checkpoint'] is not None: args['checkpoint'] = d['checkpoint'] else: args['checkpoint'] = False if 'node_property_predicates' in d and d['node_property_predicates']: args['node_property_predicates'] = set(d['node_property_predicates']) else: args['node_property_predicates'] = set() if 'predicate_mappings' in d and d['predicate_mappings']: args['predicate_mappings'] = d['predicate_mappings'] else: args['predicate_mappings'] = {} if 'prefix_map' in d and d['prefix_map']: args['prefix_map'] = d['prefix_map'] else: args['prefix_map'] = {} if 'reverse_prefix_map' in d and d['reverse_prefix_map'] is not None: args['reverse_prefix_map'] = d['reverse_prefix_map'] else: args['reverse_prefix_map'] = {} if 'reverse_predicate_mappings' in d and d['reverse_predicate_mappings'] is not None: args['reverse_predicate_mappings'] = d['reverse_predicate_mappings'] else: args['reverse_predicate_mappings'] = {} if 'property_types' in d and d['property_types']: args['property_types'] = d['property_types'] else: args['property_types'] = {} return args
210540fe5200539fdccc3fea1c9cb4ac37beaa88
497,097
def freq_str_and_bar_count(history_spec): """ Helper for getting the frequency string and bar count from a history spec. """ return (history_spec.frequency.freq_str, history_spec.bar_count)
39476d54a4828a09a88707ef9128b52ff9ed34cf
538,050
def _find_comment_index(line): """ Finds the index of a ; denoting a comment. Ignores escaped semicolons and semicolons inside quotes """ escape = False quote = False for i, char in enumerate(line): if char == '\\': escape = True continue elif char == '"': if escape: escape = False continue else: quote = not quote elif char == ';': if quote: escape = False continue elif escape: escape = False continue else: # comment denoting semicolon found return i else: escape = False continue # no unquoted, unescaped ; found return -1
fe09d041f67135304601d3c87a0926cfa4cf14dc
400,878