content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def print_args(args): """Convenience function for printing the current value of the input arguments to the command line. ---------------------------------------------------------------------------- Args: args: argparse object returned by ArgumentParser.parse_args() Returns: None """ print("Current input flags...") for arg in vars(args): print("\t%s : %s" % (arg, getattr(args, arg))) return None
18548ce7ad9f8683cbebf7bbda5198c97ea5dfd9
694,184
def find_col_index_with_name(name, trained_schema): """ finds the index of the column with name 'name' and returns the index :param name: name of the column to look for :type name: str :param trained_schema: :type trained_schema: List(dict) :return: index of the element in trained_schema that has the given name :rtype: int """ for i in range(len(trained_schema)): if trained_schema[i]['name'] == name: return i return None
66bc4c9d3f78ba6bf16a03854909787eaad21171
669,561
def always_true(*args, **kwargs): # pylint: disable=unused-argument """ Returns ``True`` whatever the arguments are. """ return True
6f755e48a482dba4a3cccc8dad92cb6fbb610a1b
19,616
def load_file(filename: str) -> list: """Load the submarine commands from a file :param filename: Location of the input file :return: List of commands """ with open(filename) as f: commands = f.readlines() commands = [c.split() for c in commands] return commands
33d853a5950816ef98b5d57cbd7b9de84aed1cfa
305,610
def isValidArch(sArch): """ Validates the CPU architecture name. """ if sArch in ('x86', 'amd64', 'sparc32', 'sparc64', 's390', 's390x', 'ppc32', 'ppc64', \ 'mips32', 'mips64', 'ia64', 'hppa32', 'hppa64', 'arm', 'alpha'): return True; return False;
52c16150cff896902462bef19384909f341ab784
450,072
def int32_unpack(var, buff): """ Unpack int32. :param var: variable name, ``str`` :returns: struct unpacking code for an int32 """ return '(%s,) = _struct_I.unpack(%s)' % (var, buff)
e88935db559ffc139ad72fa77902d4c3c29d584d
427,820
def actual_svg(pathname: str) -> str: """Read SVG image from disk.""" with open(pathname, "r") as file: svg = file.read() return svg
ba7ae52d3bdbae1d3112a183de2f484f4bcc066d
20,073
def f1_score(tp,fp,tn,fn): """ Computes F1-score, see http://en.wikipedia.org/wiki/F1_score :param tp: True positives (TP) :param fp: False positives (FP) :param tn: True negatives (TN) :param fn: False negatives (FN) :return: F1-score in [0,1] """ return 2*tp/float(2*tp + fn + fp)
ad4716db78c16479ed2df17ce5bf07dc76b1b50b
91,409
def unique_word_count(string): """ Returns the count of unique words in a string. """ words = string.split(" ") unique_words = set(words) return len(unique_words)
dad9ef98a60702b695b5a0b9674cd2fc5f6edd53
174,169
import re def contentfilter(fsname, pattern): """ Filter files which contain the given expression :arg fsname: Filename to scan for lines matching a pattern :arg pattern: Pattern to look for inside of line :rtype: bool :returns: True if one of the lines in fsname matches the pattern. Otherwise False """ if pattern is None: return True prog = re.compile(pattern) try: with open(fsname) as f: for line in f: if prog.match(line): return True except Exception: pass return False
ea30d6af9df3adc0986d9eaed9913f1160bd24d3
26,094
def dictionary2list(element, dictionary): """ Converts dictionary to list, prepending element """ return [(element, i[0], i[1]) for i in dictionary.iteritems()]
0c901100583b8a4990c31d9321dc71632392feb3
607,496
from typing import Dict def merge_hooks(hooks1: Dict[str, list], hooks2: Dict[str, list]) -> Dict[str, list]: """ Overview: merge two hooks, which has the same keys, each value is sorted by hook priority with stable method Arguments: - hooks1 (:obj:`dict`): hooks1 to be merged - hooks2 (:obj:`dict`): hooks2 to be merged Returns: - new_hooks (:obj:`dict`): merged new hooks .. note:: This merge function uses stable sort method without disturbing the same priority hook """ assert set(hooks1.keys()) == set(hooks2.keys()) new_hooks = {} for k in hooks1.keys(): new_hooks[k] = sorted(hooks1[k] + hooks2[k], key=lambda x: x.priority) return new_hooks
add5ae72917ca9aff109e8ac86a4d6902c14b298
5,614
def get_job_tasks(rank, ranks, tasks_tot): """ Return a tuple of job task indices for a particular rank. This function distribute the job tasks in tasks_tot over all the ranks. Note ---- This is a primerly a MPI help function. Parameters ---------- rank : int Current MPI rank/worker. ranks : int Number of MPI ranks/workers in total. tasks_tot : list List of task indices. Length is the total number of job tasks. """ n_tot = len(tasks_tot) nj = n_tot//ranks rest = n_tot%ranks #tasks = range(nj*rank, nj*rank + nj) tasks = [tasks_tot[i] for i in range(nj*rank, nj*rank + nj)] if rank < rest: #tasks.append(n_tot - rest + rank) tasks.append(tasks_tot[n_tot - rest + rank]) return tuple(tasks)
d61a77d737bc198ac13487999699d4434fd5b290
615,920
def get_number_rows(ai_settings, defender_width): """Determine the number of rows of the defense.""" #random_rows = randint(3,6) #available_space_x = (ai_settings.screen_width - #(random_rows * defender_width)) number_rows = 4 #int(available_space_x / (3 * defender_width)) return number_rows
100109eaf0364ec3165e397ddc5fbe63cead7769
482,010
def has_block_sibling(item): """ Test if passed node has block-level sibling element @type item: ZenNode @return: bool """ return item.parent and item.parent.has_block_children()
f06849f71c6a99e8f4004a038ae424106845c5ff
58,912
import json def load_json(filename: str) -> dict: """Load a json file Will simply use the `json library <https://docs.python.org/3/library/json.html>` and return the loaded dictionary. Args: filename (str): The file to load Returns: dict: The loaded json file contents in a dictionary form. """ with open(filename) as f: j = json.load(f) return j
ba2bc0c725c80d1fbb0173f5d9f807ad365b25c7
212,787
def mermin_klyshko_quantum_bound(n): """The quantum bound for the Mermin-Klyshko inequality is :math:`2^{3(n-1)/2}`. :param n: The number of measurement nodes. :type n: Int :returns: The quantum bound. :rtype: Float """ return 2 ** (3 * (n - 1) / 2)
721ca41b19ef72cae77baf1ad6dea5377b6eb67d
4,740
import random import time def retry(initial_delay, max_delay, factor=2.0, jitter=0.25, is_retriable=None): """Simple decorator for wrapping retriable functions. Args: initial_delay: the initial delay. factor: each subsequent retry, the delay is multiplied by this value. (must be >= 1). jitter: to avoid lockstep, the returned delay is multiplied by a random number between (1-jitter) and (1+jitter). To add a 20% jitter, set jitter = 0.2. Must be < 1. max_delay: the maximum delay allowed (actual max is max_delay * (1 + jitter). is_retriable: (optional) a function that takes an Exception as an argument and returns true if retry should be applied. """ if factor < 1: raise ValueError('factor must be >= 1; was %f' % (factor,)) if jitter >= 1: raise ValueError('jitter must be < 1; was %f' % (jitter,)) # Generator to compute the individual delays def delays(): delay = initial_delay while delay <= max_delay: yield delay * random.uniform(1 - jitter, 1 + jitter) delay *= factor def wrap(fn): """Wrapper function factory invoked by decorator magic.""" def wrapped_fn(*args, **kwargs): """The actual wrapper function that applies the retry logic.""" for delay in delays(): try: return fn(*args, **kwargs) except Exception as e: # pylint: disable=broad-except) if is_retriable is None: continue if is_retriable(e): time.sleep(delay) else: raise return fn(*args, **kwargs) return wrapped_fn return wrap
4c62282671d46e1eb1d1720a84f6792380c21995
680,376
import re def calc_word_frequency(my_string, my_word): """Calculate the number of occurrences of a given word in a given string. Args: my_string (str): String to search my_word (str): The word to search for Returns: int: The number of occurrences of the given word in the given string. """ # Remove all non alphanumeric characters from the string filtered_string = re.sub(r'[^A-Za-z0-9 ]+', '', my_string) # Return the number of occurrences of my_word in the filtered string return filtered_string.split().count(my_word)
15ff723dd2ff089fb12cccb38283f1f75e37079d
1,160
def css_class(field): """ Returns widgets class name in lowercase """ return field.field.widget.__class__.__name__.lower()
e6a555199c9b6762758837e0f7794bd69dc7fe09
681,578
import itertools import random import bisect def weighted_choice(distribution): """Choose an element from a weighted distribution. Algorithm is taken from the documentation for the Python random module. distribution A sequence of (element, weight) tuples.""" choices, weights = zip(*distribution) cdist = list(itertools.accumulate(weights)) x = random.uniform(0,cdist[-1]) return choices[bisect.bisect(cdist,x)]
0f7f04916904f2164d5bcabe7b27c66c29e41865
138,409
import io import csv def ids_from_csv_file_str(csv_file_str, fieldnames, fieldname): """return a set of ids from a string that is the contents of a file string fieldnames is all the field names in the file fieldname is one of the fieldnamds that is the id""" infile = io.StringIO(csv_file_str) reader = csv.DictReader(infile, fieldnames, dialect='editor') ret = [] for row in reader: if set(row.values()) == set(fieldnames): continue # first row might be the title ret.append(row[fieldname]) return ret
24d2277bcc1036730d23fd2516642cec2e89926e
530,344
def pv_fullname(name): """ make sure an Epics PV name ends with .VAL or .SOMETHING! Parameters ---------- pvname: name of PV Returns ------- string with full PV name """ name = str(name) if '.' not in name: name = "%s.VAL" % name return name
5ad60054057a5668ac9cdaab2384d1f65f14f1c7
612,747
def _gaspr_input(endfin, pendfin, pendfout, **kwargs): """ Write gaspr input. Parameters ---------- endfin : `int` tape number for input ENDF-6 file pendfin : `int` tape number for input PENDF file pendfout : `int` tape number for output PENDF file Returns ------- `str` gaspr input text """ text = ["gaspr"] text += [f"{endfin:d} {pendfin:d} {pendfout:d} /"] return "\n".join(text) + "\n"
70f7208816b37469eb59346e98c43339e5a443c9
579,453
def shorten(text, length): """Truncate the text if it is too long. """ if len(text) > length: pos = text.rfind(' ', 0, length - 2) if pos == -1: pos = length - 3 return text[:pos] + "..." else: return text
5ffd470de78534de0b95a999d55ed10edda043ed
196,944
import re def last_name_first_author(authors): """Return displays of info based on the authors field Selects the last name of the first author Doctest: .. doctest:: >>> last_name_first_author('Pimentel, Joao') 'pimentel' >>> last_name_first_author('Pimentel, Joao and Braganholo, Vanessa') 'pimentel' >>> last_name_first_author('Joao Pimentel') 'pimentel' >>> last_name_first_author('Joao Pimentel and Vanessa Braganholo') 'pimentel' >>> last_name_first_author('Joao Pimentel, Vanessa Braganholo') 'pimentel' """ if " and " in authors: authors = authors.split(" and ")[0] if "," not in authors: last = authors.split()[-1] else: last = re.findall(r'(\w*)[`\-=~!@#$%^&*()_+\[\]{};\'\\:"|<,/<>?]', authors)[0] return last.lower()
609e9e72e821dde8360dae645ec2d7a915e67ab5
425,119
def source_information_from_method(source_method): """Obtain source information from a method of a source object. :param source_method: Source method that is used :type source_method: method :returns: string with source information identifying the object that the method belongs to :rtype: str """ source = source_method.__self__ info_str = f"source {source.name} of type {source.driver} using method " return info_str+f"{source_method.__name__}"
8f6cd465eb4b0979089753ce6cce4cdb96ab8283
39,463
import functools def map_wrap(f): """Wrap standard function to easily pass into 'map' processing. """ @functools.wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs) return wrapper
83e2d9cd2bd36e993dd168a64236095db3608acc
65,960
def get_word_freq_in_sentences(word, sentences): """ :param word: the word which frequency we calculate :param sentences: a list of the sentences, representing the document / search space :return: the number of occurrences of the given word in the search space. Letter case is ignored """ freq = 0 for sentence in sentences: for w in sentence: if str(word).lower() == str(w).lower(): freq += 1 return freq
beeed940dad86259c208ba893b4ae9354b01a115
423,060
def _get_subclasses(classname): """Recursively obtains all subclasses of the given class.""" subclasses = [] for p in classname.__subclasses__(): subclasses.append(p) subclasses.extend(_get_subclasses(p)) return subclasses
a5edde240c97d8c21109a055593ca488552affa3
616,501
import ast def _not_expr(expr: ast.expr) -> ast.expr: """Generates AST node for `not expr`.""" return ast.UnaryOp(op=ast.Not(), operand=expr)
2a0c2cdfc0fe07857661c568a1f45719e76b41e7
355,133
def _make_cache_key(times, targets): """ Make a unique key to reference this combination of ``times`` and ``targets``. Often, we wish to store expensive calculations for a combination of ``targets`` and ``times`` in a cache on an ``observer``` object. This routine will provide an appropriate, hashable, key to store these calculations in a dictionary. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint. targets : `~astropy.coordinates.SkyCoord` Target or list of targets. Returns ------- cache_key : tuple A hashable tuple for use as a cache key """ # make a tuple from times try: timekey = tuple(times.jd) + times.shape except BaseException: # must be scalar timekey = (times.jd,) # make hashable thing from targets coords try: if hasattr(targets, 'frame'): # treat as a SkyCoord object. Accessing the longitude # attribute of the frame data should be unique and is # quicker than accessing the ra attribute. targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape else: # assume targets is a string. targkey = (targets,) except BaseException: targkey = (targets.frame.data.lon,) return timekey + targkey
602cfb9aab0d358d0ca22de82cecc79b084dd590
382,583
def len_selfie(molecule): """Returns the length of selfies <molecule>, in other words, the number of characters in the sequence.""" return molecule.count('[') + molecule.count('.')
2eb96c839c94778a9f54c0f89633dea0edcbba46
232,902
def crop_roi(im, bbox): """Crops ROI :param im: (np.ndarray) image BGR :param bbox: (BBox) :returns (np.ndarray) BGR image ROi """ dim = im.shape[:2][::-1] x1, y1, x2, y2 = bbox.xyxy_int im_roi = im[y1:y2, x1:x2] return im_roi
5952d6617e3cf4e24e5aff2fb8527d5e7daba8b0
220,583
def _create_ip_range(range_start, range_end): """ Given a start ip, eg 192.168.1.1, and an end ip, eg 192.168.1.254, generate a list of all of the ips within that range, including the start and end ips. """ ip_range = [] start = int(range_start[range_start.rfind('.')+1:]) end = int(range_end[range_end.rfind('.')+1:]) for i in range(start, end+1): ip = range_start[:range_start.rfind('.')+1] + str(i) ip_range.append(ip) return ip_range
af3d96862c0522d3737c2587ff0c57477fc4cad3
618,427
def absolute(value): """ Get the absolute value for "value". This template tag is a wrapper for pythons "abs(...)" method. Usage: >>> absolute(-5) 5 """ try: return abs(value) except: return value
be4e5a28b1983a83f83f89bb483fcc1797be06b8
298,548
def lcsplit(lcstate): """Splits a composite lcstate and availability string into a tuple""" return lcstate.split('_', 1)
e6f01568317c07643dc63357adee34732ad0b6e6
490,548
def exclusion_payload(passed_keywords: dict) -> dict: """Create a properly formatted exclusion payload. { "comment": "string", "groups": [ "string" ], "value": "string" } """ returned_payload = {} if passed_keywords.get("comment", None): returned_payload["comment"] = passed_keywords.get("comment", None) if passed_keywords.get("groups", None): returned_payload["groups"] = passed_keywords.get("groups", None) if passed_keywords.get("value", None): returned_payload["value"] = passed_keywords.get("value", None) return returned_payload
d9578ae0b6a84b4c0a7d73a2d05e8fa4e5d5af3c
103,276
import re def _remove_extra_spaces_from_text(text: str) -> str: """ Replace multiple spaces with a single space """ return re.sub(r'\s+', ' ', text)
b2bb7977190145cdf038644830decac7d4743f9d
455,185
def escape_markdown(content): """ Escapes markdown from the given content. Parameters ---------- content : `None` or `str` The content to sanitize. Returns ------- content : `None` or `str` """ if (content is None): return content = content.replace('\\', '\\\\') content = content.replace('_', '\\_') content = content.replace('*', '\\*') content = content.replace('|', '\\|') content = content.replace('~', '\\~') content = content.replace('`', '\\`') content = content.replace('>', '\\>') content = content.replace(':', '\\:') return content
4e8cc969aead0232a712b87fc4c5a8f78bda1037
508,791
import importlib def import_cpp(name): """ Import jet generated cpp as module Args: name (str) The name (corresponding to the compile_cpp name given) Returns: Compiled and imported Python module """ try: return importlib.import_module('jet_generated.' + name) except ImportError: raise ImportError("The module %s doesn't exist. Has it been built?" % name)
5fdf871283cc207efe0360d5dbb4dfbfb10e768d
289,123
def mag_squared(x): """ Return the squared magnitude of ``x``. .. note:: If ``x`` is an uncertain number, the magnitude squared is returned as an uncertain real number, otherwise :func:``abs(x)**2`` is returned. """ try: return x._mag_squared() except AttributeError: return abs(x)**2
2fdc8527242b00549201efd79d86089ccdb0e10c
261,108
def bisect_right(arr, x): """Binary search array to find (right-most) x.""" lo, hi = 0, len(arr) while lo < hi: mid = lo + hi >> 1 if arr[mid] <= x: lo = mid + 1 else: hi = mid return lo
05985287fa1ea7fbe1ef3d3105f95460692a4afd
603,979
def parse_copy_availability(line): """Parses 'Status-'line for availability. :param line: Line string. :type line: str :return: Availability. :rtype: bool """ if 'Entliehen' in line: return False elif 'Verfügbar' in line: return True else: return False
c027bc6ee9f3a33c18f4eece47fa9e409354ba8d
677,511
def search_for_attr_value(obj_list, attr, value): """ Finds the first (not necesarilly the only) object in a list, where its attribute 'attr' is equal to 'value', returns None if none is found. """ return next((obj for obj in obj_list if getattr(obj,attr, None) == value), None)
2e92b69d253158747194cbd9d9415df9078ffc4c
601,853
import requests def zenodo_file_download_helper(auth_parameter, is_record, project_name, metadata_helper, files): """ Downloads a single file from Zenodo and returns the expected dictionary. Parameters ---------- auth_parameter : dict The Authentication parameter expected by Zenodo. is_record : bool Flag for if the resource is a published record project_name : str The name of the project. metadata_helper : dict JSON payload from Zenodo API files: list The list to append the file to. Returns ------- The list of files. """ if is_record is True: file_contents = requests.get( metadata_helper['contents'][0]['links']['self'], params=auth_parameter).content hashes = {'md5': metadata_helper['contents'][0]['checksum'].partition(':')[2]} title = metadata_helper['contents'][0]['key'] path = '/{}'.format(title) # No way of getting project title if passed a file id. source_path = '/{}'.format(title) else: file_contents = requests.get( metadata_helper['links']['download'], params=auth_parameter).content hashes = {'md5': metadata_helper['checksum']} title = metadata_helper['filename'] path = '/{}'.format(metadata_helper['filename']) source_path = "/{}/{}".format(project_name, metadata_helper['filename']) files.append({ 'file': file_contents, 'hashes': hashes, 'title': title, # If the file is the only resource we are downloading then we don't need it's full path. 'path': path, 'source_path': source_path, 'extra_metadata': {}}) return files
f14d8ee8be9e257331f4768c9cb64f29c358031f
422,945
def test(env, *args, **kwargs): """build Program with fixed RPATH""" denv = env.Clone() denv.AppendUnique(RPATH=["$PREFIX/lib"]) return denv.Program(*args, **kwargs)
c7121ce1551a5130d1b6e031486b87a4edad4ce9
607,525
def _entities_years_list_to_dict(l): """ Create a dict of entites by year :param l: list which each element is a size two tuple (year, entity_id :return: a dict in which each key is a year and each value is a list of entities :rtype: dict<int,list> """ d = {} for y, eid in l: y = str(y) # for easier mongo insert key need to be str if y not in d: d[y] = [] d[y].append(eid) return d
6a82707aeb7a9663297970d5bda03c3bd1041a15
335,705
from pathlib import Path def maybe_start_with_home_prefix(p: Path) -> Path: """ If the input path starts with the home directory path string, then return a path that starts with the home directory and points to the same location. Otherwise, return the path unchanged. """ try: return Path("~", p.relative_to(Path.home())) except ValueError: return p
6ee4e49e8dfb9bc68a1c10f5ea792715fb5d5336
4,531
def to_world_canvas(world_point, canvas_extents, world_extents): """Transforms a point from world coord system to world canvas coord system.""" x = int(world_point[0] / world_extents[0] * canvas_extents[0]) y = int(canvas_extents[1] - 1 - world_point[1] / world_extents[1] * canvas_extents[1]) return (x, y)
5dec7f87fae35542b5798f88b0353c9b593e88fb
49,369
from typing import Iterable import functools from operator import getitem def get_in(keys: Iterable): """Creates a function that returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys. >>> get_in(["a", "b", 1])({"a": {"b": [0, 1, 2]}}) 1 """ def get_in(coll): return functools.reduce(getitem, keys, coll) return get_in
b04c67beb2a9bf6f969b0f4193e5dc49f4d9bbf4
658,263
def get_high_lows_lookback(high, low, lookback_days): """ Get the highs and lows in a lookback window. Parameters ---------- high : DataFrame High price for each ticker and date low : DataFrame Low price for each ticker and date lookback_days : int The number of days to look back Returns ------- lookback_high : DataFrame Lookback high price for each ticker and date lookback_low : DataFrame Lookback low price for each ticker and date """ # getting max price for high prices excluding present day lookback_high = high.rolling(window=lookback_days).max().shift() # getting min price for low prices excluding present day lookback_low = low.rolling(window=lookback_days).min().shift() return lookback_high, lookback_low
d24905db2ae2425f7d57e3af503802c597d0c212
30,698
import torch def optim_method(name): """Get optimizer.""" method = {"sgd": torch.optim.SGD, "adam": torch.optim.Adam} return method[name]
69930ad96d2977592809a72511381462b6450f7f
354,952
import json def load_concepts(concept_fpath): """ Load stored concepts Params: concept_fpath (str): file-path to stored concepts Returns: the dict containing the report (stored) concepts """ with open(concept_fpath, 'r') as f: concepts = json.load(f) return concepts
bd75cab139556cdc95446c2c5505f56014c1ae77
236,987
def solution(A): """ On the face of it this is quite simple, until you factor in negative numbers. Approach is very simple: 1. Sort array in descending order (or ascending if you like) 2. Take the max value in between the sum of the head of the array and the first element of the array combined with the last two elements (two negatives) """ # sort array in descending order A = sorted(A, reverse=True) # get the product of the first three elements return max(A[0] * A[1] * A[2], A[0] * A[-2] * A[-1])
ad8b58e8bb9fe9959afed7b95f7abdbe05742a11
606,258
import textwrap def _textwrap_wrap(string, justify): """ Custom instantiation of textwrap.wrap() to account for new lines in string. """ output = [] for line in string.splitlines(): justified = textwrap.wrap( line, justify, break_long_words=False, replace_whitespace=False ) output.extend(justified) return output
b16e0865919dcfc688ea990928dde4a7fe79f56b
232,806
import functools def memoize(func, cache, num_args): """ Wrap a function so that results for any argument tuple are stored in 'cache'. Note that the args to the function must be usable as dictionary keys. Only the first num_args are considered when creating the key. """ @functools.wraps(func) def wrapper(*args): mem_args = args[:num_args] if mem_args in cache: return cache[mem_args] result = func(*args) cache[mem_args] = result return result return wrapper
b12ff23b822d18a3a0e6a5f6db497d8e67114369
84,932
from typing import Union from typing import List def read_file(file_path: str, split: bool = False) -> Union[str, List[str]]: """ Reads a text file. >>> from snakypy import helpers >>> file = '/tmp/my_file.txt' >>> helpers.files.create_file('My content file', file, force=True) True >>> helpers.files.read_file(file) 'My content file' Args: file_path (str): You must receive the full/absolute file path. split (bool): If this option is True, a list will be returned where the breaks will be made using line skips. (default: {False}) Returns: [str|list]: By default it returns a string. If the option split=True, a list of line breaks will be returned. """ try: with open(file_path) as f: if split: return f.read().split("\n") return f.read() except FileNotFoundError as err: raise FileNotFoundError(f'>>> File "{file_path}" does not exist. {err}')
af2429f9d696a693b89c0fa33200e453906ee0c8
29,791
def select_value_from_list(text, value_list): """ :param text: value (case insensitive) or index of value list or None :param value_list: list of strings to choose from :return: chosen value, or if text is None, None :raises ValueError: if the string is not a valid value and not an index :raises IndexError: if given index is invalid """ if text is None: return None if text in [p.lower() for p in value_list]: return value_list[[p.lower() for p in value_list].index(text)] else: return value_list[int(text)]
be1b316da9a68cb215187449bba33c1a2fc0ab71
246,456
def sumatorio_tope(tope): """ Va sumando números naturales hasta llegar al tope. Ejemplos -------- >>> sumatorio_tope(9) 6 # 1 + 2 + 3 >>> sumatorio_tope(10) 10 # 1 + 2 + 3 + 4 >>> sumatorio_tope(12) 10 # 1 + 2 + 3 + 4 >>> sumatorio_tope(16) 15 # 1 + 2 + 3 + 4 + 5 """ s = 0 n = 1 while s + n <= tope: s += n n += 1 return s
c8a0307f5876b2eb641edc10a4dc93ad38270e95
647,167
def pow2(x): """Return the square of x :param float x: input value :rtype: float """ return x*x
955e83c526430582a542eb6c3b1d2ab92d7bff61
674,621
def epc_calc_bin_mode(reg_dict): """ Get the current binning modes Parameters ---------- reg_dict : dict The dictionary that contains all the register information Returns ---------- bool Row binning mode bool Column binning mode """ bin_mode = reg_dict["bin_mode"][2] col_bin = bool(bin_mode & 0x01) row_bin = bool(bin_mode & 0x02) return row_bin, col_bin
551a36dc130c3c5b4048acadf50d108dc9100390
102,383
def is_absolute_url(parsed_url): """ check if it is an absolute url """ return all([parsed_url.scheme, parsed_url.netloc])
578c1443ec18f9b741cd205763604cba2242ac48
5,952
import math def regular_poly_side_length_to_apothem(n_sides, side_length): """Compute apothem for regular polygon with given side length.""" return side_length / (2 * math.tan(math.pi / n_sides))
38969b7d33086b9b9fd39f8b49b25001da595c97
525,077
from typing import Any from typing import Callable def ask_for_input( question: str, default: Any = None, callback: Callable[[Any], Any] = str ) -> Any: """ Ask for input from the user at the command line. Parameters ---------- question A string to display as a prompt for the user. default The default answer to the question. callback The return value of this callback is what is returned from this function. Useful for simple conversions, like receiving an integer instead of a raw string. Returns ------- answer The input, passed through ``callback``. """ while True: input_str = input(question + " [Default: {}] > ".format(default)) trimmed = input_str.replace(" ", "") if trimmed == "": return default try: return callback(trimmed) except Exception as e: print(e)
d43b2b9d954b8e0f3984b5937156e886a2f54c59
367,126
from typing import Any def _convert_sql_format(value: Any) -> str: """ Given a Python value, convert to string representation of the equivalent SQL datatype. :param value: A value, ie: a literal, a variable etc. :return: The string representation of the SQL equivalent. >>> _convert_sql_format(1) "1" >>> _convert_sql_format("John Smith") '"John Smith"' """ if value is None: return "NULL" elif isinstance(value, str): return f'"{value}"' elif isinstance(value, bytes): return '"' + str(value).replace("b'", "")[:-1] + '"' else: return str(value)
7e1728c19fb8698694ac194e60d76b2bddaf9c41
45,036
import json def CheckJsonParses(input_api, output_api): """Verifies that all JSON files at least parse as valid JSON.""" affected_files = input_api.AffectedFiles( include_deletes=False, file_filter=lambda x: x.LocalPath().endswith('.json')) warnings = [] for f in affected_files: with open(f.AbsoluteLocalPath()) as j: try: json.load(j) except ValueError: # Just a warning for now, in case people are using JSON5 somewhere. warnings.append(output_api.PresubmitPromptWarning( '%s does not appear to be valid JSON.' % f.LocalPath())) return warnings
23723bca79f110e5c161509127241aa901c8a056
457,468
from pathlib import Path import gzip def as_file_handle(filepath, mode): """Return a file-handle from a given file path. Infers if file is compressed or not based on extension. Parameters ---------- filepath : str or pathlib.Path Path to file to open. If file name ends in 'gz', will uncompress the file on the fly. mode : any supported mode for file or GzipFile Returns ------- file an open file handle (text) """ filepath = Path(filepath) if filepath.suffix == ".gz": return gzip.open(filepath, mode) return filepath.open(mode)
f49452b2f22f2501edf139259cf0e6b3e4b83368
175,048
def format_message(date_str, node, msg): """Format log message""" message = f"{date_str}: {node.site_name}: {node.location or node.model} ({node.serial}) {msg}" return message
53dc7e2716f935a083c36e40ad4887cfe23c0aad
14,089
def __getmappingcases__(mapping, value): """ Gets index of case value in mapping. :param mapping: mapping dictionary :param value: value for which the mapping is searched for :return: index of value in mapping """ if value in mapping["cases"]: return mapping["cases"].index(value) else: return len(mapping["cases"]) + 1
53fde9936da9e8b7a099e209ac824d370f225bfb
149,681
def can_ship(weight, distance): """ Determines if we can ship a package based on its weight, and distance traveled. """ if weight * distance < 1000: return True else: return False
63373b6fb11caf772180266e60ba3011ae87b625
424,257
def mel2hz(mel, formula="htk"): """ Convert the mel-scale representation of a signal into Hz Parameters ---------- mel : :py:class:`ndarray <numpy.ndarray>` of shape `(N, \*)` An array of mel frequencies to convert formula : {"htk", "slaney"} The Mel formula to use. "htk" uses the formula used by the Hidden Markov Model Toolkit, and described in O'Shaughnessy (1987). "slaney" uses the formula used in the MATLAB auditory toolbox (Slaney, 1998). Default is 'htk' Returns ------- hz : :py:class:`ndarray <numpy.ndarray>` of shape `(N, \*)` The frequencies of the items in `mel`, in Hz """ fstr = "formula must be either 'htk' or 'slaney' but got '{}'" assert formula in ["htk", "slaney"], fstr.format(formula) if formula == "htk": return 700 * (10 ** (mel / 2595) - 1) raise NotImplementedError("slaney")
67dab362223ceda0d211aa22b2ee20805fdde48f
316,999
def dict_map(f, d): """Apply function f to all terminal elements of dict d.""" if isinstance(d, dict): return {k: dict_map(f, v) for k, v in d.items()} elif isinstance(d, list): return [dict_map(f, x) for x in d] else: return f(d)
8abcb5df97ae6ff5c28361fdcf1a723dfe5b5640
139,166
def calculate_gruneisen_parameter_from_temperature(temperature_in_celcius): """ This function returns the dimensionless gruneisen parameter based on a heuristic formula that was determined experimentally:: @book{wang2012biomedical, title={Biomedical optics: principles and imaging}, author={Wang, Lihong V and Wu, Hsin-i}, year={2012}, publisher={John Wiley & Sons} } :param temperature_in_celcius: the temperature in degrees celcius :return: a floating point number, if temperature_in_celcius is a number or a float array, if temperature_in_celcius is an array """ return 0.0043 + 0.0053 * temperature_in_celcius
248214c393e52e24d301e3d79159ab9dfc9b0c6c
211,200
def find_correspondance_date(index, csv_file): """ The method returns the dates reported in the csv_file for the i-subject :param index: index corresponding to the subject analysed :param csv_file: csv file where all the information are listed :return date """ return csv_file.EXAMDATE[index]
915b9a493247f04fc1f62e614bc26b6c342783c8
706,074
def tag_filter(tag_list, base_df): """Search with tags. Args: base_df (DataFrame): Search target. tag_list (list): List of search tag. Returns: (DataFrame): Searched DataFrame. """ result_df = base_df for tag in tag_list: mask = result_df["tag"].apply(lambda x: tag in x) result_df = result_df[mask] return result_df
fe6a9f075d859167d3ec160b96c7f839db394154
663,872
def parse_authmap(fp): """ Low level parser for authmap files. Takes a file pointer to an open authmap file and parses it into a dict. The returned dict will be composed as follows: - The keys are the names of local entities (users or groups) - The values are lists of authorized LDAP entities, represented as strings. """ authmap = {} while True: line = line = fp.readline() if line == '': break line = line.strip() if line == '' or line[0] == '#' or line.rstrip() == '': continue localuser, entities = line.split(':') if not localuser in authmap: authmap[localuser] = [] for entity in entities.split(','): authmap[localuser].append(entity.strip()) return authmap
cfe4e5d53df4b208c14b76b02001b77c54d4b5c4
564,098
def add_inputs(input_1: float, input_2: float, *extra) -> float: """ adds 2 or more numbers together :param input_1: first number :type input_1: float :param input_2: second number :type input_2: float :return: all the inputs added together :rtype: float """ total = input_1 + input_2 for curr_num in extra: total += curr_num return total
a28e92787f51a2b4525562dcfb74ffd9a3c61d3b
639,442
import math def angle_vector_2D(v1, v2): """ Return angle between two 2D vectors (i.e. use only x and y coordinate values) Angle (from v1 to v2, positive angle of rotation) is in the range -pi < x < pi. """ dot = v1[0] * v2[0] + v1[1] * v2[1] # dot product det = v1[0] * v2[1] - v1[1] * v2[0] # determinant angl = math.atan2(det, dot) return angl
33addb9c0cacbc0a391e4c6267af9dce486c46be
588,839
import re def is_qt_example(example): """ Is the example a Qt example? :param example: The example. :return: True if it is a Qt example. """ reg = re.compile(r".*Qt.*", re.DOTALL) return reg.match(example)
28ac55ac65be28202e1fc01d78406909a0e5d5d7
338,701
def python2iraf(x1, x2, y1, y2): """ Convert from Pythonic indexes to IRAFonic indexes. Args: x1: int x2: int y1: int y2: int Returns: section : str """ s = '[{:1d}:{:1d}, {:1d}:{:1d}]'.format(x1 + 1, x2, y1 + 1, y2) return s
8035656637430c9b6ecf67cf9a9c14b8df68d87b
415,306
def identifyCategoricalFeatures(x_data,categorical_cutoff): """ Takes a dataframe (of independent variables) with column labels and returns a list of column names identified as being categorical based on user defined cutoff. """ categorical_variables = [] for each in x_data: if x_data[each].nunique() <= categorical_cutoff: categorical_variables.append(each) return categorical_variables
531eb49871155b5042de08b0c7b3189a15e55ca2
652,363
def batch_eye_like(tensor): """ Creates a sequence of identity tensors indicted by the batch size with the shape of the input tensor Parameters ---------- tensor: Tensor (b, n, ..., n) b tensors with the same shape Returns ------- Tensor (b, n, ..., n) b identity tensors """ return tensor.new_ones(tensor.size(-1)).diag().expand_as(tensor)
ef86884a79ec3ff62c02eb3aea15ea7373a766ef
405,047
def cli(ctx, toolShed_id): """Get details of a given Tool Shed repository as it is installed on this Galaxy instance. """ return ctx.gi.toolshed.show_repository(toolShed_id)
c69c0711c68ccd8a55ec5a95e2bc56f7e7246b29
130,006
def check_who_queued(user): """ Returns a function that checks if the song was requested by user """ def pred(song): if song.requested_by and song.requested_by.id == user.id: return True return False return pred
e53a1434077ec7b97e237d1ff8bcc8c2454c4015
702,539
def _t(a): """transpose the last two axes of a three axis array""" return a.transpose((0, 2, 1))
91911a8863136538f3f419e20e4515f28d85d859
501,211
from typing import OrderedDict def sorted_dict(d: dict): """ Returns OrderedDict sorted by ascending key :param d: dict :return: OrderedDict """ return OrderedDict(sorted(d.items()))
46e550f428841d66601a4e36c654dd450f0231ed
473,221
def my_vtk_grid_props(vtk_reader): """ Get grid properties from vtk_reader instance. Parameters ---------- vtk_reader: vtk Reader instance vtk Reader containing information about a vtk-file. Returns ---------- step_x : float For regular grid, stepsize in x-direction. step_y : float For regular grid, stepsize in y-direction. npts_x : float Number of cells in x-direction. npts_y : float Number of cells in y-direction. low_m_x : float Middle of first x cell high_m_x : float Middle of last x cell low_m_y : float Middle of first y cell high_m_y : float Middle of last y cell low_x : float Edge of first x cell high_x : float Edge of last x cell low_y : float Edge of first y cell high_y : float Edge of last y cell Notes ---------- 0: step_x 1: step_y 2: npts_x 3: npts_y 4: low_m_x - Middle of cells: first x cell 5: high_m_x - Middle of cells: last x cell 6: low_m_y - Middle of cells: first y cell 7: high_m_y - Middle of cells: last y cell 8: low_x - Edge of cells: first x cell 9: high_x - Edge of cells: last x cell 10: low_y - Edge of cells: first y cell 11: high_y - Edge of cells: last y cell """ vtk_output = vtk_reader.GetOutput() # Read attributes of the vtk-Array # num_cells = vtk_output.GetNumberOfCells() # num_points = vtk_output.GetNumberOfPoints() # whole_extent = vtk_output.GetExtent() grid_bounds = vtk_output.GetBounds() grid_dims = vtk_output.GetDimensions() # Grid information step_x = (grid_bounds[1] - grid_bounds[0]) / (grid_dims[0] - 1) step_y = (grid_bounds[3] - grid_bounds[2]) / (grid_dims[1] - 1) if grid_bounds[0] == 0.0: # CELLS npts_x = grid_dims[0] - 1 npts_y = grid_dims[1] - 1 low_m_x = grid_bounds[0] + 0.5 * step_x high_m_x = grid_bounds[1] - 0.5 * step_x low_m_y = grid_bounds[2] + 0.5 * step_y high_m_y = grid_bounds[3] - 0.5 * step_y low_x = grid_bounds[0] high_x = grid_bounds[1] low_y = grid_bounds[2] high_y = grid_bounds[3] else: # POINTS npts_x = grid_dims[0] npts_y = grid_dims[1] low_m_x = grid_bounds[0] high_m_x = grid_bounds[1] low_m_y = grid_bounds[2] high_m_y = grid_bounds[3] low_x = grid_bounds[0] - 0.5 * step_x high_x = grid_bounds[1] + 0.5 * step_x low_y = grid_bounds[2] - 0.5 * step_y high_y = grid_bounds[3] + 0.5 * step_y return step_x, step_y, \ npts_x, npts_y, \ low_m_x, high_m_x, low_m_y, high_m_y, \ low_x, high_x, low_y, high_y
26ef8a51648ea487372ae06b54c8ccf953aeb414
3,408
from typing import Dict def failed_chunk(event: Dict, _c: Dict) -> Dict: """ Lambda function to handle a failed chunk to allow processing to continue. :param event: lambda expected event object :param _c: lambda expected context object (unused) :returns: remaining records to be processed """ print(f'These failed to process:\n{event.get("chunkProcessed")}') # you would want to do something here with the `chunkProcessed` # like send to DynamoDB for offline processing or retry later, etc. del event['chunkResponse'] del event['chunkProcessed'] return event
75ed7fc0cb410099d111fd9bf4e03578e654c0ad
550,624
def index(seq, i): """ Returns the ith element in the sequence `seq`. """ return seq[i]
3ff17125a70154daece8df742109ae1c7e78878c
584,934
def poly_points(P, T): """ Returns the ordered points from the given polygons Parameters ---------- P : Tensor a (N, D,) points set tensor T : LongTensor a (M, T,) topology tensor Returns ------- tuple a tuple containing the points of the given polygons """ return tuple(P[T])
2fed449c71766d20e3cb48c518eca8a6b725eb1f
453,218
def rotl(num, shift): """Rotate left""" return ((num << shift) | (num >> (32 - shift))) & 0xFFFFFFFF
9f4f95fc5423366ca777d3876fe2a75eaaf65c12
284,847
def init_feed(feed): """ Initialise a feed :param feed: Feed :return: tf.data.Iterator.initializer :return: tensor slices """ feed = feed.feed iterator_tensor = feed.make_initializable_iterator() return iterator_tensor.initializer, iterator_tensor.get_next()
3d9148a4bc064883aad22c771fe68cf5551dd60f
51,564
def _pandas_in_schemas(schemas): """ Check if any schema contains pandas metadata """ has_pandas = False for schema in schemas: if schema.metadata and b"pandas" in schema.metadata: has_pandas = True return has_pandas
ec5bdb8f3431883f7f5bace86ebbb3a0d46e1c91
329,856
import string def find_words(text): """ text: string Returns a list of words from input text """ text = text.replace("\n", " ") for char in string.punctuation: text = text.replace(char, "") words = text.split(" ") return words
86c2691b9f0e57e758f7bc2d5b8fa51adcc1ebc0
661,537
def format_attr_value(entry, attr): """ Helper that formats attribute to be presented in the table output. Args: entry (Dict[str, str]): CONFIG DB entry configuration. attr (Dict): Attribute metadata. Returns: str: fomatted attribute value. """ if attr["is-leaf-list"]: return "\n".join(entry.get(attr["name"], [])) return entry.get(attr["name"], "N/A")
30de403f26a5fd1ae12758c461e94beda79fccd1
489,382
def hard_mish(x, inplace: bool = False) : """Implements the HardMish activation function Args: x: input tensor Returns: output tensor """ if inplace: return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) else: return 0.5 * x * (x + 2).clamp(min=0, max=2)
f2ad1e6604596e0bab73564ca67a2c4e2a7f1141
236,895
def misclassification_rate(preds, alt_preds, k=1): """ Computes the misclassification rate for a group of base and alternative predictions. For details, check: Narodytska, Nina, and Shiva Prasad Kasiviswanathan. "Simple black-box adversarial perturbations for deep networks." arXiv preprint arXiv:1612.06299 (2016). :param preds: The list of base predictions :param alt_preds: The list of alternative predictions :param k: The number of misclassified predictions to trigger a misclassification :returns: The misclassification rate """ n_preds = len(preds) n_misclassification = 0 for i in range(len(preds)): if len(preds[i]) == 0: n_preds -= 1 continue elif preds[i][0] not in alt_preds[i][:k]: n_misclassification += 1 rate = round(n_misclassification / n_preds, 2) return rate
37323d9036e06d5db4cc6e8873c45bf4242183ab
458,987
def _concat_lists_safe(A, B): """ Safely concatenate two lists if one or both may be None """ if A is None and B is None: return None elif isinstance(A, list) and B is None: return A elif isinstance(B, list) and A is None: return B elif isinstance(A, list) and isinstance(B, list): return A + B else: raise TypeError("A and B must be lists or None")
8254127958320aceea58c1e82c321a5c7fbdf493
604,834
def getIntUserResponse(uio, prompt, allowQuit=True): """Get int repsonse from user. If allowQuit is True and the user enters q then None is returned to indicate that the user selected quit.""" while True: response = uio.getInput(prompt=prompt) try: return int(response) except ValueError: uio.info("%s is not a valid integer value." % (response)) if allowQuit and response.lower() == 'q': return None
2bf06c8d8dc3af4955d743a94e2d640793a233c0
184,066