content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def chrange(start, stop): """ Construct an iterable of length-1 strings beginning with `start` and ending with `stop`. Parameters ---------- start : str The first character. stop : str The last character. Returns ------- chars: iterable[str] Iterable of strings beginning with start and ending with stop. Examples -------- >>> chrange('A', 'C') ['A', 'B', 'C'] """ return list(map(chr, range(ord(start), ord(stop) + 1)))
7d31406bd0920e2abe031f19f95dc728d7c64aba
569,127
def format_name(name: str, depth: int = 0) -> str: """Format a string for nested data printing Args: name: input string depth: depth of the nested information Returns: formatted string """ if depth == 0: return name elif depth == 1: return f"├─{name}" else: return f"{'| ' * (depth - 1)}└─{name}"
4787a36ac8ce92bce86797b9a318bafdef7ef64b
357,110
def byte_str(s='', encoding='utf-8', input_encoding='utf-8', errors='strict'): """ Returns a bytestring version of 's', encoded as specified in 'encoding'. Accepts str & unicode objects, interpreting non-unicode strings as byte strings encoded using the given input encoding. """ assert isinstance(s, str) if isinstance(s, str): return s.encode(encoding, errors) if s and encoding != input_encoding: return s.decode(input_encoding, errors).encode(encoding, errors) return s
f2fe37219d1cc3297bf107774775e6604406cb01
239,579
import string def is_south(band): """ Return if band is in southern hemisphere """ alpha = {c: num for num, c in enumerate(string.ascii_uppercase)} return alpha[band.upper()] < 12
8be5096b0f9d9fb69fbaa60b2205e5a0ce868777
549,665
import itertools def pairwise(iterable): """ Iterate pairwise through an iterable. pairwise([1,2,3,4]) -> (1,2),(2,3),(3,4) """ val, nextVal = itertools.tee(iterable) next(nextVal, None) return zip(val, nextVal)
495fedbaf2046d66bd791dc78dea8525761e01b1
10,905
import re def reorder(text): """reorder strings that are in format [title, The] to [The title]""" m = re.match("(.+)(?:,|;) The(.*)", text) return "The " + m.group(1) + m.group(2) if m else text
fe9ebd5fe657f0d55c73832b19fa40a7060202d2
263,762
def jetCollectionString(prefix='', algo='', type=''): """ ------------------------------------------------------------------ return the string of the jet collection module depending on the input vaules. The default return value will be 'patAK5CaloJets'. algo : indicating the algorithm type of the jet [expected are 'AK5', 'IC5', 'SC7', ...] type : indicating the type of constituents of the jet [expec- ted are 'Calo', 'PFlow', 'JPT', ...] prefix : prefix indicating the type of pat collection module (ex- pected are '', 'selected', 'clean'). ------------------------------------------------------------------ """ if(prefix==''): jetCollectionString ='pat' else: jetCollectionString =prefix jetCollectionString+='Pat' jetCollectionString+='Jets' jetCollectionString+=algo jetCollectionString+=type return jetCollectionString
c77c2fd3afcccb1e62e1d92103ce677775fae0e4
696,952
def name_sort_key(name): """ Gets sort key for the given name, when sorting attribute, method, or such names. Parameters ---------- name : `str` Name of an attribute, method or class. Returns ------- key : `tuple` (`int`, `str`) The generated key. """ if name.startswith('__'): if name == '__new__': prefix_count = 0 elif name == '__init__': prefix_count = 1 elif name == '__call__': prefix_count = 2 else: prefix_count = 102 elif name.startswith('_'): prefix_count = 101 else: prefix_count = 100 return prefix_count, name
a72699cff7f8f70fcc750289fea3951f733f0fd2
177,360
def Large(rect): """ Inflate a rect by a standard amount """ return rect.Inflate(5, 5)
34ed666edb53bf573916639736507664ceb28c63
295,750
def _n(value): """ Convert between an empty string and a None This function is translates django's empty elements, which are stored as empty strings into pyxb empty elements, which are stored as None. """ return None if value == '' else value
6928a8f6b81051b80236475d028b1ece8624e7d0
348,770
def ixps(value, other): """ Returns all IXPs on which both AS are peering together. """ return value.get_internet_exchange_points(other)
9e3e5f424cdee4ade40b9a8ff254a18e3237a9e5
505,074
def y(filen): """ Returns the integer in the filename 'filen'. For example, for 'image_13.npy', this function returns 13 as an integer """ cname = '' for c in filen: if c.isdigit(): cname += c return int(cname)
61809836717846f82ac36256bd4816691f56989f
468,915
def gen_anytext(*args): """ Convenience function to create bag of words for anytext property """ bag = [] for term in args: if term is not None: if isinstance(term, list): for term2 in term: if term2 is not None: bag.append(term2) else: bag.append(term) return ' '.join(bag)
54749b40142465c03a8ac19cb7a1175b7e7ee0e7
679,704
import math def convert_local_to_global_vector(v: list, yaw: float): """ Converts the given vector in vehicle coordinate system to the global one under the given vehicle yaw. """ vx = math.cos(math.radians(yaw)) * v[0] - math.sin(math.radians(yaw)) * v[1] vy = math.sin(math.radians(yaw)) * v[0] + math.cos(math.radians(yaw)) * v[1] return [vx, vy]
62f96a22c85f22125165e387bfeea76d78e5c519
10,958
def peek(c): """PEEK / [SP]""" v = "c.m[0x%04X]" % c.sp return v
52b1dc587037eac2698bbc68380165a90e5f0021
535,049
def squeeze_list(listA, val=-1): """ Compact a list of lists into a single list. Squeezes (spaghettify) a list of lists into a single list. The lists are concatenated into a single one, and to separate them it is used a separating value to mark the split location when unsqueezing the list. Parameters ---------- listA : list List of lists. val : number, optional Value to separate the lists. Returns ------- list A list with all lists concatenated into one. Examples -------- Compact a list of lists into a single list. >>> from dbcollection.utils.pad import squeeze_list >>> squeeze_list([[1,2], [3], [4,5,6]], -1) [1, 2, -1, 3, -1, 4, 5, 6] """ concatA = [l + [val] for l in listA] out = [li for l in concatA for li in l] return out[:-1]
8504cdcb3126ae2a65691ad87fd4ef45557c1b8e
538,858
import re def npm_package(package): """Convert the npm package name to remove unsupported character""" # Scoped package names (with the @) use the same naming convention # as the 'npm pack' command. if package.startswith("@"): return re.sub("/", "-", package[1:]) return package
51b27592c5fa390b179280ef5a68b30f84b6053a
429,996
def get_nearest_coords(cube, latitude, longitude, iname, jname): """ Uses the iris cube method nearest_neighbour_index to find the nearest grid points to a given latitude-longitude position. Args: cube (iris.cube.Cube): Cube containing a representative grid. latitude (float): Latitude coordinates of spot data site of interest. longitude (float): Longitude coordinates of spot data site of interest. iname (str): String giving the name of the y coordinates to be searched. jname (str): String giving the names of the x coordinates to be searched. Returns: Tuple[int, int]: Grid coordinates of the nearest grid point to the spot data site. """ i_latitude = cube.coord(iname).nearest_neighbour_index(latitude) j_longitude = cube.coord(jname).nearest_neighbour_index(longitude) return i_latitude, j_longitude
711cb2a7bd1dd85fc69992df87c972b5557e79ce
53,350
def transform_to_bytes(content: str): """ Transform a string to bytes Parameters ---------- - content (str): The string to convert Raises ------ ValueError: the string is not valid Returns ------- - bytes: the converted string in bytes """ if isinstance(content, bytes): return content if isinstance(content, str): return content.encode(encoding='utf-8') else: raise ValueError(("In order to transform the object to bytes " "you need to provide a string."))
cc3084c84444519f04c42a4f721fdd2272852deb
661,363
def mk_url(address, port, password): """ construct the url call for the api states page """ url = '' if address.startswith('http://'): url += address else: url += 'http://' + address url += ':' + port + '/api/states?' if password is not None: url += 'api_password=' + password return url
ff2a5714d447407a0fa8192eb2f61c9148ca23cb
248,015
def _group(template, resource, action, proid): """Render group template.""" return template.format( resource=resource, action=action, proid=proid )
0719b662bfc444188d268f809b47f2b57187af60
469,356
def filter_single_end_samples(df): """Filter single end samples from a dataframe Parameters ---------- df: DataFrame Dataframe as obtained from SRAb.sra_convert() Returns ------- df: DataFrame DataFrame with only single end samples """ df = df[~df["library_strategy"].str.contains("PAIRED")] return df
f91107939e6e9b8d3618ee84b0097d1eb1bba218
308,644
def reverse_labels_to_dict(labels: str) -> dict: """ Converts labels to dict, mapping a number to label Args: labels: Returns: dictionary of number -> label """ return dict([(i, c) for (i, c) in enumerate(labels)])
9a6d982f02796dece2f1ffbb13bf563e6211c1f1
328,629
def xor(_a_: bool, _b_: bool) -> bool: """XOR logical operation. :param _a_: first argument :param _b_: second argument :return: xor-ed value """ #pylint: disable-msg=superfluous-parens return bool((not(_a_) and _b_) or (_a_ and not(_b_)))
f13a98b29471e928b1758131639ea574be4ebb00
233,707
def expandtabs(s, tabstop=8, ignoring=None): """Expand tab characters `'\\\\t'` into spaces. :param tabstop: number of space characters per tab (defaults to the canonical 8) :param ignoring: if not `None`, the expansion will be "smart" and go from one tabstop to the next. In addition, this parameter lists characters which can be ignored when computing the indent. """ if '\t' not in s: return s if ignoring is None: return s.expandtabs(tabstop) outlines = [] for line in s.split('\n'): if '\t' not in line: outlines.append(line) continue p = 0 s = [] for c in line: if c == '\t': n = tabstop - p % tabstop s.append(' ' * n) p += n elif not ignoring or c not in ignoring: p += 1 s.append(c) else: s.append(c) outlines.append(''.join(s)) return '\n'.join(outlines)
90220575225518c6db0082c157901dfd19448b9f
647,379
def phredqual_to_prob(phredqual): """ Turns a phred quality into an error probability >>> '%.2f' % phredqual_to_prob(20) '0.01' """ assert isinstance(phredqual, int) #assert phredqual >= 0, ("Phred-quality must be >= 0, but is %s" % phredqual) # also works for phredqual=0 return 10**(-phredqual/10.0)
e1367f01146ca86c6badd244c24a8b4e66f2e69c
254,451
def unpack_uncertainty_parameter(u): """Unpack uncertainty parameter (integer or single character). See: https://www.minorplanetcenter.net/iau/info/MPOrbitFormat.html Args: u (str): Packed uncertainty parameter. Returns: int: Uncertainty parameter, or -1 if invalid. str: Empty string or letter if uncertainty parameter was invalid. """ try: u = int(u) u_flag = '' except ValueError: u_flag = u u = -1 return u, u_flag
17260dc1ee0b1bddde52431d61cf4b8ea8197816
630,495
def _prepare_response_data(message, data=None): """Prepare response output. Returns a simple dict containing a key `message` and optionally a key `data`. """ output = {"message": message} if data is not None: output["data"] = data return output
95b342dd13fb4a8be40b26c8291d9ece35378d43
405,328
def do_not_convert_internal(f): """Decorator that marks internal functions which do not need conversion.""" setattr(f, '__ag_compiled', True) return f
dc7ec04d637b4a0a7a4b23a0f61da5824468dcbb
170,499
import time def write_generator_lines(conctimes, concaz, concel, concva, concve, az_flags, el_flags): """ Produces a list of lines in the format necessary to upload to the ACU to complete a generated scan. Params are the outputs of generate. Params: conctimes (list): List of times starting at most recently used time for the ACU to reach associated positions concaz (list): List of azimuth positions associated with times concel (list): List of elevation positions associated with times concva (list): List of azimuth velocities associated with times concve (list): List of elevation velocities associated with times az_flags (list): List of flags associated with azimuth motions at associated times el_flags (list): List of flags associated with elevation motions at associated times """ fmt = '%j, %H:%M:%S' start_time = 10. true_times = [start_time + i for i in conctimes] fmt_times = [time.strftime(fmt, time.gmtime(t)) + ('%.6f' % (t%1.))[1:] for t in true_times] all_lines = [('%s;%.4f;%.4f;%.4f;%.4f;%i;%i\r\n' % (fmt_times[n], concaz[n], concel[n], concva[n], concve[n], az_flags[n], el_flags[n])) for n in range(len(fmt_times))] return all_lines
2fb671e5111a9b8ee7ca7f64f107e69a69a4dc2c
640,485
def read_file(f): """ Read an entire file and return the contents """ with open(f) as f: return f.read()
5fd04024806fc529df539d051ab15da88eaf6562
106,964
def _get_issue_tracker(obj): """Get issue_tracker dict from obj if dict is based on existing tracker""" if not obj: return None ret = obj.issue_tracker if not ret.get('_is_stub'): return ret return None
8e8d490f48d51be88706bf83121c4055c46ac5fd
679,039
import re def find_clades_by_name(tree, terminal=True, contains=[], contains_all=False, pattern=None): """Find clades by their name and whether or not they are terminal clades. Parameters ---------- terminal : bool If True, only the terminal nodes of the `tree` are searched. If False, only the non-terminal nodes are searched. If None, all nodes are searched. contains : iterable of str Collection of strings whose presence will be checked in a clade name. If `contains` is given, then `pattern` should not be given. contains_all: bool If True, all str specified in `contains` must be present in the clade name. If False, any str specified in `contains` gives a name of interest. pattern : str Regular expression pattern to match the clade name. If `pattern` is given then `contains` should not be given. Returns ------- generator expression Iterating over the returned generator will yield clades whose names match the given search criteria. Notes ----- Slightly faster implementation than BioPython's Phylo package find_clades search functionality. """ if contains: logic = all if contains_all else any return (clade for clade in tree.find_clades(terminal=terminal) if clade.name and logic(x in clade.name for x in contains)) elif pattern: pattern = re.compile(pattern) return (clade for clade in tree.find_clades(terminal=terminal) if clade.name and pattern.match(clade.name)) else: return (clade for clade in tree.find_clades(terminal=terminal))
66cf7e92502eaa6ba21d49a1741cc1759a5536a7
586,534
from typing import Dict def flatten_outputs_paging(raw_response: Dict) -> Dict: """ flatten the paging section of the raw_response - i.e removes 'cursors' key. Args: raw_response: response of an API call Returns: outputs dict """ paging: Dict paging = raw_response.get('paging', {}) outputs = raw_response.copy() cursor_before = paging.get('cursors', {}).get('before') cursor_after = paging.get('cursors', {}).get('after') outputs.pop('paging', None) outputs['paging'] = { 'before': cursor_before, 'after': cursor_after, } return outputs
eacf69e8aacd5c8d65979ea3712b72edf3c8a28e
273,333
import json def get_current_version_directory(top_dir): """ The current version of the data parameters and models is kept in the current_version file in the top directory. This function reads this file and returns the current version directories. """ f = open(top_dir + "current_version", "r") directories = json.load(f) f.close() return directories
3b71fe198480451fb7bdbb939f0aef3f16847854
463,988
def max_len_string_encoded(d_rev): """ Calculate maximum length of huffman encodings Args: d_rev: dict encoded --> element Returns: maximum number of characters """ maxs = 0 for x in d_rev: len_bit = len(x) if(len_bit > maxs): maxs = len_bit return maxs
1d3989cb55ba1feda53a6ca1f1bd78ffa2596b32
524,951
import torch def focal_loss(output, target, gamma=2, alpha=0.25): """Compute focal loss for label arrays. See https://arxiv.org/abs/1708.02002 Args: output: (tensor) with shape (num_labels, h, w). Each value is a logit representing a label (ie. class). target: (tensor) with same shape as target. Has one if label is present, zero otherwise. Returns: (tensor) with single float value """ # Add epsilon to avoid overflow when p is 0.0 epsilon = 0.00001 p = torch.sigmoid(output) + epsilon pt = (1-target) * (1-p) + target * p alphat = (1-target) * (1-alpha) + target * alpha bce = -torch.log(pt) weights = alphat * (1 - pt).pow(gamma) loss_arr = weights * bce return loss_arr.sum()
c2071e9c28ad21f5ddd3409d44efd013b0fa960b
483,209
import math def dist_2d(x1, y1, x2, y2): """Returns the distance between (x1,y1) and (x2,y2).""" return math.sqrt(math.pow(x2 - x1, 2) + math.pow(y2 - y1, 2))
cea845bf7be56ad14fa5576a320685e260b0166e
592,244
from typing import Dict from typing import List def _custom_text_tokenizer(text: str, lang: str, dictionary_terms: Dict[str, str]) -> List[str]: """Helper function to split text by a comma.""" del lang del dictionary_terms return text.split(',')
8489d1d02e06670938ae73ac650ee0c08c0892ec
60,838
def _abberation_correction(R): """Calculate the abberation correction (delta_tau, in degrees) given the Earth Heliocentric Radius (in AU)""" return -20.4898/(3600*R)
7fea54c0bc6fffc9e8ab40057b011da327fe0bd7
651,479
def unanimous(seq): """ Checks that all values in an iterable object are the same Args: seq: Iterable object Returns bool: True if all values are the same """ it = iter(seq.values()) try: first = next(it) except StopIteration: return True else: return all(i == first for i in it)
57b3164e9c71bd450ba03fe6ff37a53852328b9a
471,879
import tempfile import tarfile def extract_temporary_directory(file_path): """Extract the source tar archive contents to a temporary directory. Positional arguments: file_path (str) -- the file path to the source package tar file """ temporary_directory = tempfile.mkdtemp() source = tarfile.open(file_path) source.extractall(temporary_directory) source.close() return temporary_directory
de8edd5961cf42f13bb7eba31b504ea5ef5c5d73
187,461
def timedelta_seconds(delta): """ Converts the given timedelta to seconds. :type delta: timedelta :rtype: float """ return delta.days * 24 * 60 * 60 + delta.seconds + \ delta.microseconds / 1000000.0
a43133a46bbafe1831024685cd1ed1aa40e755f6
170,382
def answer_yn(question=None): """Prints a simple yes or no question and returns bool""" while True: answer = input(question or 'Are you sure? Y/n').lower() if answer == '' or answer == 'y' or answer == 'yes': return True elif answer == 'n' or answer == 'no': return False print('Please enter a valid answer (Y/n)')
a35d55101e72608de635e13e34edb90a73ba6696
620,924
def bounding_box(X): """ Calculates the boundaries of a given dataset on all sides Parameters: X - a data matrix where each row is a dimensional array of an item to be clustered Returns: Two tuples, where the first contains the minimum and maximum x values, and the second contains the minimum and maximum y values """ xmin, xmax = min(X,key=lambda a:a[0])[0], max(X,key=lambda a:a[0])[0] ymin, ymax = min(X,key=lambda a:a[1])[1], max(X,key=lambda a:a[1])[1] return (xmin,xmax), (ymin,ymax)
98b3b9d1e5f31a2e6375a34e6ba24d4b985ff0ec
618,544
from pathlib import Path def getSubdirs(path): """ Get all subdirectories Args: path (:class:`pathlib.Path`): path to get subdirectories from Returns: :class:`list` """ return [x for x in Path(path).iterdir() if x.is_dir()]
90ad1a8e2c1b6da627ce0916565c3ed80f491dee
536,098
def set_log_range(module, releases, earlier, later, releases_list): """ Set range to get logs for from parsed args or defaults if not given. Args: module(str): Module to get logs for releases(list of str): Releases range earlier(str): Specified start point later(str): Specified end point releases_list(list of str): List of releases from repository Raises: :class:`exception.ValueError`: Module <module> does not have a release <start>/<end> Returns: str, str: Start and end point to list releases for """ if releases and len(releases) == 2: start = releases[0] end = releases[1] elif earlier: start = earlier end = 'HEAD' elif later: start = "" end = later else: start = "" end = 'HEAD' # Check that releases exist if start not in releases_list and start != "": raise ValueError("Module " + module + " does not have a release " + start) if end not in releases_list and end != 'HEAD': raise ValueError("Module " + module + " does not have a release " + end) return start, end
0a8e938c9cfb31c30f9dcc181bdf4ee8757cf6ad
287,293
def convert_traces_mapping(traces_raw, mapping): """ Convert traces activity name using a given mapping Parameters ----------------------- traces: List, List of traces mapping: dict: Dictionary containing activities mapping Returns ----------------------- List of converted traces """ traces = [] for trace in traces_raw: current_trace = [] for act in trace: current_trace.append(mapping[act]) traces.append(current_trace) return traces
13f6513cfc0054a96c3cb298c2551ba1f9871780
405,404
from typing import Dict from typing import Any from typing import List def handle_foreign_rows( card_data: Dict[str, Any], card_uuid: str ) -> List[Dict[str, Any]]: """ This method will take the card data and convert it, preparing for SQLite insertion :param card_data: Data to process :param card_uuid: UUID to be used as a key :return: List of dicts ready for insertion """ foreign_entries = [] for entry in card_data["foreignData"]: foreign_entries.append( { "uuid": card_uuid, "flavorText": entry.get("flavorText", ""), "language": entry.get("language", ""), "multiverseId": entry.get("multiverseId", ""), "name": entry.get("name", ""), "text": entry.get("text", ""), "type": entry.get("type", ""), } ) return foreign_entries
b723cbe82d994be9a0102d8ee4da89d436bdbc93
453,972
from typing import Type from typing import Any from typing import Optional from typing import Dict import dataclasses def _has_default_value(cls: Type[Any], field: str) -> Any: """Returns if the given field of a class has a default value (requires a @dataclass-based class, others will silently return 'False') """ dataclass_fields: Optional[Dict] = getattr( cls, "__dataclass_fields__", None) if dataclass_fields: field_entry: Optional[dataclasses.Field] = dataclass_fields.get( field, None) if field_entry: has_value = field_entry.default is not dataclasses.MISSING has_factory = field_entry.default_factory is not dataclasses.MISSING return has_value or has_factory return False
96ab58e826abd3c35edb29ccae9eb3e612a62fbb
150,478
def my_dummy_dir(tmpdir_factory): """An empty directory.""" dummy_dir = tmpdir_factory.mktemp('singlecell_dummy') return dummy_dir
aef5a2ff31127eec2d7a76b1c3b8e2d9678ebe1f
529,615
def register_car(registration, company, plate): """ Registers a new car. NOTE: this function should not modify the registration dictionary that is given, instead it should create a new dictionary. NOTE: this function should not introduce duplicates in the registration system. Specifically, if a car is already registered with the given company it should return an identical registration information. If the car is registered with a different company it should remove the first registration. NOTE: if the company is not listed in the dictionary, it should not introduce it. Instead it should just return an identical registration. E.g., register_car({'Stark Industries': ['IRNMN']}, 'Stark Industries', 'JARVIS') is {'Stark Industries': ['IRNMN', 'JARVIS']} E.g., register_car({'Stark Industries': ['IRNMN']}, 'Wayne Enterprises', 'IMBTMN') is {'Stark Industries': ['IRNMN']} :param registration: preexisting registration information :param company: company to register the car for :param plate: license plate of the car to register :return: new registration information dictionary with added registration :rtype: dict """ my_output_dict = {**registration} my_company_list = [] my_plate_list = [] my_dual_list = [] for my_key, my_value in registration.items(): my_company_list.append(my_key) my_plate_list.append(my_value) my_dual_list.append([my_key, my_value]) my_plate_list = [x for sub in my_plate_list for x in sub] if company in my_company_list and plate not in my_plate_list: my_output_dict[company].append(plate) elif company in my_company_list and plate in my_plate_list: for my_list_item in my_dual_list: if my_list_item[0] == company: my_list_item[1].append(plate) for my_list_item in my_dual_list: if plate in my_list_item[1] and my_list_item[0] != company: my_list_item[1].remove(plate) for my_row in my_dual_list: my_output_dict[my_row[0]] = list(set(my_row[1])) return my_output_dict
a749c719152ead6c6cad83aa83c82f6efd1bf3c9
280,082
def convert_keys_to_string(dictionary): """Recursively converts dictionary keys to strings.""" if not isinstance(dictionary, dict): return dictionary return dict((str(k), convert_keys_to_string(v)) for k, v in dictionary.items())
8da7c34e48944b6f9c36e13e73f3cbf1406c3a0e
524,325
def find_project(testrun_url): """ Find a project name from this Polarion testrun URL. :param testrun_url: Polarion test run URL :returns: project name eg "CEPH" or "ContainerNativeStorage" """ url_suffix = testrun_url[59:] index = url_suffix.index('/') return url_suffix[:index]
a19019846fa084398a4967cb99417e7aebc90499
5,230
import torch def get_masks(slen, lengths, causal): """ Generate hidden states mask, and optionally an attention mask. """ assert lengths.max().item() <= slen bs = lengths.size(0) alen = torch.arange(slen, dtype=torch.long, device=lengths.device) mask = alen < lengths[:, None] # attention mask is the same as mask, or triangular inferior attention (causal) if causal: attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None] else: attn_mask = mask # sanity check assert mask.size() == (bs, slen) assert causal is False or attn_mask.size() == (bs, slen, slen) return mask, attn_mask
f71942a5d089372e8bbff6d08b8dab9df86e375a
668,238
from typing import Any def minmax(val: Any, minval: Any, maxval: Any) -> Any: """Return bound for the value within min and max""" return max(min(val, maxval), minval)
2c92aa5845e633be747303a2546ef8f81f3c3551
130,654
from typing import Counter def get_number_per_class_label(arr): """ Function that returns the total number of images per (cloud) class labels, ignoring the missing entries. Parameters ---------- arr : array-like The Image_Label column of the training.csv (after dropping the missing entries, where the entries are str dtype in the form "XXX.jpg_(class_name)". Here class_name is {Fish, Flower, Gravel, Sugar} Returns ------- number_per_labels : Counter A Counter object with the total number of images per (cloud) class labels. Examples -------- >>> arr = pd.DataFrame({'A': ["1.jpg_Fish", "1.jpg_Sugar", "2.jpg_Flower"]}) >>> get_number_per_class_label(arr.A) Counter({'Fish': 1, 'Sugar': 1, 'Flower': 1}) """ class_per_entries = arr.apply(lambda x: str.split(x, '_')[1]) number_per_labels = Counter(class_per_entries) return number_per_labels
08df218f786dcc5d1d6c90dcad0fee1e3d715540
346,659
import json def is_blacklisted(address): """ Checks if a particular address is known to be Fraudulent :param str address: Address to check for in blacklist :return: Boolean of whether the address is in the black list """ with open('blacklist.json') as blacklist_f: blacklist = json.load(blacklist_f) for entry in blacklist: if entry['address'] == address: return True return False
3477a341b43e26a68f03a88385ab72959f59b78a
415,008
def count_mismatches_before_variant(reference_prefix, cdna_prefix): """ Computes the number of mismatching nucleotides between two cDNA sequences before a variant locus. Parameters ---------- reference_prefix : str cDNA sequence of a reference transcript before a variant locus cdna_prefix : str cDNA sequence detected from RNAseq before a variant locus """ if len(reference_prefix) != len(cdna_prefix): raise ValueError( "Expected reference prefix '%s' to be same length as %s" % ( reference_prefix, cdna_prefix)) return sum(xi != yi for (xi, yi) in zip(reference_prefix, cdna_prefix))
59754e259de5331f8b330bfa4b5651544ee0d3d4
654,788
def parse_u24le(data): """ Parse a 24-bit little endian number """ return data[0] | (data[1] << 8) | (data[2] << 16)
fceed0c2e5ea717df3daee1cc652df0f8b9a7048
107,789
import re def tokenize_per_cluster_args(args, nclusters): """ Seperate per cluster arguments so that parsing becomes easy Params: args: Combined arguments nclusters(int): total number of clusters Returns: list of lists: Each cluster conf per list ex: [[cluster1_conf], [cluster2_conf]...] """ per_cluster_argv = list() multi_cluster_argv = list() common_argv = list() cluster_ctx = False regexp = re.compile(r"--cluster[0-9]+") index = 0 for i in range(1, nclusters + 1): while index < len(args): if args[index] == f"--cluster{i}": cluster_ctx = True elif regexp.search(args[index]): cluster_ctx = False break if cluster_ctx: per_cluster_argv.append(args[index]) else: common_argv.append(args[index]) index = index + 1 multi_cluster_argv.append(per_cluster_argv) per_cluster_argv = [] return multi_cluster_argv, common_argv
be4c8d0ef01a2d2431f46434bd1ca88127b75cb6
19,640
def integrate_euler_explicit(x_t, dx_dt, dt): """ Explicit euler integration x(t+1) = x(t) + dx/dt * dt :param x_t: known value at timestep t :param dx_dt: derivative dx/dt :param dt: timestep :return: x(t+1); solution for the time t+1 """ x_tp1 = x_t + dx_dt * dt return x_tp1
862feb02512142da98929aedc97707853b41242a
39,812
import itertools def pairwise(iterable): """ Takes an Iterable and returns an iterable of tuples that pair each of the elements with its respective successor. pairwise(range(5)) becomes [(0, 1), (1, 2), (2, 3), (3, 4)] """ t1, t2 = itertools.tee(iterable, 2) next(t2) return zip(t1, t2)
b105ad54596b2f7238def69b2056e25470b75da9
392,079
def profile_likelihood(likelihood, *dims): """ Compute the profile (log)likelihood for one or more parameters Parameters ---------- likelihood : n-d array_like (log)likelihood grid_steps : 1-d array_like the resolution (step length) of each dimension of the regular grid *dims : int, int, ... parameters (grid dimensions) for which to compute the profile (log)likelihood Returns ------- 1-d or n-d array values of the profile (log)likelihood on the regular grid. The number of dimensions depends on the number of `*dims` arguments given. """ Lp = likelihood.copy() ax = 0 for d in range(likelihood.ndim): if d in dims: ax += 1 continue Lp = Lp.max(axis=ax) return Lp
d6d7a305b41265041d1dbced095854ae76357d5d
348,435
def binary_search(lst: list, value: object) -> int: """ Return the index <i> of the first occurance of <value> in the list <lst>, else return -1. Precondition: assume that the list is sorted """ start = 0 end = len(lst) - 1 while start <= end: mid = (start + end) // 2 if lst[mid] < value: start = mid + 1 else: end = mid - 1 if start == len(lst) or lst[start] != value: return -1 else: return start
d9f5c0ebdf158a2639042a8238850c50e010a3e3
280,310
def _find_player(player_index, summary): """Find player in summary data.""" player = [player for player in summary['players'] if player['index'] == player_index] if not player: raise ValueError('player not found') return player[0]
f821d3384986feb98aafb2f3f0cfaa6a056920c7
421,530
def BHS_standard(err): """ Computes the BHS Standard metric Arguments: err {array} -- array of absolute error Returns: tuple -- tuple of percentage of samples with <=5 mmHg, <=10 mmHg and <=15 mmHg error """ leq5 = 0 leq10 = 0 leq15 = 0 for i in range(len(err)): if(abs(err[i])<=5): leq5 += 1 leq10 += 1 leq15 += 1 elif(abs(err[i])<=10): leq10 += 1 leq15 += 1 elif(abs(err[i])<=15): leq15 += 1 return (leq5*100.0/len(err), leq10*100.0/len(err), leq15*100.0/len(err))
a2a71a5fea63c7bc6965d54a7d7dcae7333e1838
53,596
def get_shape(obj): """ Get the shape of a :code:'numpy.ndarray' or of a nested list. Parameters(obj): obj: The object of which to determine the shape. Returns: A tuple describing the shape of the :code:`ndarray` or the nested list or :code:`(1,)`` if obj is not an instance of either of these types. """ if hasattr(obj, "shape"): return obj.shape elif type(obj) == list: if obj == []: return (0,) else: return (len(obj),) + get_shape(obj[0]) else: return ()
d02d755f4b9e4a4dbde6c87ddfe0b5729a8c158e
705,152
from typing import Dict from typing import List def get_unique_relevant_docs_count( question_id_to_docs: Dict[str, List[dict]], relevance_threshold: float ) -> float: """Given a dictionary mapping a question id to a list of docs, find the number of unique relevant docs.""" unique_relevant_docs = set() for qid, docs in question_id_to_docs.items(): for doc in docs: if len(doc['answer']) > 0 and doc['f1'] >= relevance_threshold: unique_relevant_docs.add(doc['docid']) return len(unique_relevant_docs)
90df20570d18dc454eaf7265debe464f1cc3e6c2
297,145
def bubble_sort(arr): """ Passes over a list comparing two elements and repeats with a smaller, sliced off end of the list each iteration until sorted. """ for i in range(len(arr)): for j in range(len(arr) - i - 1): # swap if left element is greater than right element if (arr[j] > arr[j+1]): tmp = arr[j] arr[j] = arr[j+1] arr[j+1] = tmp return arr
ec4351cbf497623c60e283a3de0f9f29f3b20320
601,895
def read_file(file_name): """ This function returns the contents of the file named file_name. """ with open(file_name,"r") as file: return file.read()
c6a52b69c611f6d093a59b709968562093e438b1
548,412
def searchfile (jobs, machines, path = "../tests/"): """ Given a number of jobs and a number of machines that define the complexity of a problem, this method looks for the file with the correct benchmarks. If the exactly specified number of jobs and machine is not found, an exception is raised. """ try: filename = f"t_j{jobs}_m{machines}.txt" f = open(path + filename) f.close() return filename except: raise Exception("Bechmarks with required caracteristics not found.")
ead4284c6cee76028b144053797cd6403bf984b5
332,860
def base_content(seq: str, base: str) -> float: """ Compute the fraction of <base> in the <seq> Args seq: DNA sequence base: 'A' gives the A content; 'GC' gives the GC content """ seq = seq.upper() base = base.upper() ret = 0 for b in set(base): ret += seq.count(b) / len(seq) return ret
fa7ef460db8c86f1ffbe220496e6f325bc8e708c
322,567
def _lookup_attributes(glyph_name, data): """Look up glyph attributes in data by glyph name, alternative name or production name in order or return empty dictionary. Look up by alternative and production names for legacy projects and because of issue #232. """ attributes = ( data.names.get(glyph_name) or data.alternative_names.get(glyph_name) or data.production_names.get(glyph_name) or {} ) return attributes
2706f0da8d1fc780c7afb8cf4251d24b09e0fe88
82,713
def _coo_gen_triples(A): """Converts a SciPy sparse matrix in **Coordinate** format to an iterable of weighted edge triples. """ row, col, data = A.row, A.col, A.data return zip(row, col, data)
05cc7702e2f19379ead29149e9610b2ac1c95214
439,904
def _extract_match_id(match_json): """ Extract the match_id from json response. { "matches": [ { "id": "0000...", "match_id": 1313, "similarity": "001000" } ] } """ matches = match_json.get('matches', []) if len(matches) == 1: return matches[0].get('match_id', None) return None
60109a70aceae4f5074338b268f498da56af65f0
635,362
import re def quoted_str(x): """ Strips the string representation of ``x`` to make it suitable for a ``dot2tex`` string, and especially a node label (``dot2tex`` gets confused by newlines, and braces) EXAMPLES:: sage: sage.graphs.dot2tex_utils.quoted_str(matrix([[1,1],[0,1],[0,0]])) '[1 1]\\n\\\n[0 1]\\n\\\n[0 0]' sage: print(sage.graphs.dot2tex_utils.quoted_str(matrix([[1,1],[0,1],[0,0]]))) [1 1]\n\ [0 1]\n\ [0 0] """ return re.sub("\n",r"\\n\\"+"\n", re.sub("\"|\r|}|{","", str(x)))
e9e5ef1eedb8c669756606e2c771df5cc69b6c90
602,492
def mjpeg_info_cmp(x,y): """ Comparison function for sorting a list of (camera_name, camera_info) pairs. """ name_x = x[0] name_y = y[0] value_x = int(name_x.replace('camera_', '')) value_y = int(name_y.replace('camera_', '')) if value_x > value_y: return 1 elif value_y > value_x: return -1 else: return 0
31d98998bd3ece11a591b841505d50e67af68182
62,060
import six def is_valid(value): """Validation to ensure timestamp is a string. """ if not isinstance(value, six.string_types): raise ValueError('Timestamp should be a String') return True
20079742e5cd169ce8c21d1799df36c76f774d4d
343,707
def empty_table(unique_database, request): """Create an empty table within the test database before executing test. The table will have the same name as the test_function itself. Setup and teardown of the database is handled by the unique_database fixture. Args: unique_database: pytest fixture defined in conftest.py request: standard pytest request fixture Returns: fq_table_name (str): the fully qualified name of the table: : dbname.table_name """ table_name = request.node.name fq_table_name = '.'.join([unique_database, table_name]) stmt = "CREATE TABLE %s (i integer, s string)" % fq_table_name request.instance.execute_query_expect_success(request.instance.client, stmt, query_options={'sync_ddl': 1}) return fq_table_name
5bbb079be2e2c1db2e55843937ac129e7317b3e3
679,238
def timedelta_to_hms(value): """ Return hours, minutes, seconds from a timedelta object """ hours, remainder = divmod(int(value.total_seconds()), 3600) minutes, seconds = divmod(remainder, 60) return hours, minutes, seconds
a9452da23670d7e5b8933c1728b956ea28aa8215
370,182
def get_fleet_instances(c, fleet_name): """Get all Spot Fleet instances for a Spot Fleet request.""" instances = [] next_token = None while True: if next_token is None: resp = c.describe_spot_fleet_instances(SpotFleetRequestId=fleet_name) else: resp = c.describe_spot_fleet_instances(SpotFleetRequestId=fleet_name, NextToken=next_token) instances.extend(resp['ActiveInstances']) next_token = resp.get('NextToken', None) if next_token is None: break return instances
0379889f2aafbc8663b4ce204ff87f361f9df311
343,579
def _attachment() -> str: """Returns a query term matching messages that have attachments.""" return 'has:attachment'
7a7882faf7b97e026da82379506ac71f6d2839e6
179,585
from typing import OrderedDict def display(choices, value): """ Get the display value for the selected choice. ie. get_FIELD_display() :type choices: dict|tuple :param choices: FIELD_CHOICE :param value: the value of the tuple :return: string """ if not choices: return value if isinstance(choices, tuple): choices = OrderedDict(choices) return choices.get(value, value)
728fd82cf6b6e9021b03842eef8e70940e30c734
152,488
import ipaddress def is_ipv6_address(ip): """Returns True/False if a string is a valid IPv6 address.""" ip = str(ip) try: if ipaddress.ip_address(ip).version == 6: return True else: return False except ValueError as e: print(f"{e}")
dc8a6bda250dbe8f3197577e7b93dc4cc58852d0
380,307
def memory_usage(job): """ Perform memory usage verification. :param job: job object :return: exit code (int), diagnostics (string). """ exit_code = 0 diagnostics = "" return exit_code, diagnostics
80ebee64aacd30a79a0a879933b9c49abf3066d0
359,893
def is_hidden_file(filename): """Does the filename start with a period?""" return filename[0] == '.'
534d3afe9b45393fb35866014b11eac55100d6ae
667,143
import yaml import json def get_json_data_from_file(filename): """Return json object from a json or yaml file""" if filename.lower().endswith(".yaml"): with open(filename, "rb") as fp: yaml_data = fp.read() # io.BytesIO(json.dumps(yaml.load(yaml_data, Loader=yaml.FullLoader)).encode()) return yaml.load(yaml_data, Loader=yaml.FullLoader) else: with open(filename, "r") as f: return json.load(f)
a685b9ec8c3c335944b1188970e3647ab4a545a3
144,383
from typing import Dict import yaml def read_config(path: str) -> Dict: """Load config based on provided filepath. Param --------- path: str Path to provided file. Returns --------- Dict configuration file in a dictionary form. """ with open(path, "rb") as confile: config = yaml.safe_load(confile) return config
6df96effbdf3bf39f8309994b4c44cd8f1e4f5a7
114,661
def _maybeEncode(someStr): """ Encode `someStr` to ASCII if required. """ if isinstance(someStr, str): return someStr.encode('ascii') return someStr
2b284e7aaa8fa2c2dcd0627a4b97e1fd5ca1bbfe
376,845
def length(value): """ Find the length of a value :type value: variable :param value: The value to find the length of """ # Try to return the length return len(value)
a7d842f432743eb8cbab137d19fb4a9937af7021
665,516
def ifstr(columnname, repls): """Returns a mipmap function string with encapsulated if statements for replacing given values of a column with predefined ones. This is used in a categorical/nominal column type Arguments: :param columnname: the column name(str) :param repls: list with Replacement namedtuples Replacement('source', 'target') """ local_repls = repls.copy() if len(repls) == 1: return 'if({} == \"{}\", \"{}\", null())'.format(columnname, repls[0].source, repls[0].target) elif len(repls) > 1: current = local_repls.pop(0) return 'if({} == \"{}\", \"{}\", {})'.format(columnname, current.source, current.target, ifstr(columnname, local_repls))
430103a764efe8bc4bb4a0d5c89b5dad9fdebe6e
213,112
def _find_physio(subject, session, bids_path): """Get physilogy data from BIDS dataset.""" physio_path = list( bids_path.glob(f"**/sub-{subject}_ses-{session}*_physio.tsv.gz") ) if physio_path and len(physio_path) == 1: return physio_path[0] else: raise ValueError("No associated physiology file")
a5938d6994f8898bdd204ac974f7e7d3374e3ebe
687,961
import re def delete_last_cell_if_empty(text: str) -> str: """Delete last cell in file if it contains no code and no comment. --- before --- # %% some_code = 'here' # %% --- after --- # %% some_code = 'here' """ # \Z matches only at end of string return re.sub(r"(?m)\s+^# %%\s*\Z", r"\n", text)
aae453c28d8784cfb5b91ca33d1a4ab91fd4501f
439,316
from urllib.request import urlopen import gzip def tomso_open(filename, *args, **kwargs): """Wrapper function to open files ending with `.gz` with built-in `gzip` module or paths starting with `http` using `urllib.request.urlopen`, otherwise use normal open. `.gz` and normal modes take the same arguments as `open` and `gzip.open` and return a file object.""" if filename.startswith('http'): return urlopen(filename) elif filename.lower().endswith('.gz'): return gzip.open(filename, *args, **kwargs) else: return open(filename, *args, **kwargs)
73e27a7e2b6ac928f3f368b129c115efb3c9ec15
121,680
def read_secret(session, *, secret_id): """ Retrieve a secret from Secrets Manager. """ secrets_client = session.client("secretsmanager") return secrets_client.get_secret_value(SecretId=secret_id)["SecretString"]
74d388f58c1778bd30fb9b2d991161d0600a68d6
467,846
def is_string_int(string): """ Checks if the string is a valid representation of an integer Examples -------- > is_string_int('12') > True > > is_string_int('a') > False """ try: int(string) return True except ValueError: return False
f857ae0ed69d6f12ce1a6f5984811b822bd94fe2
503,386
def add_matrices2D(mat1, mat2): """Adds two matrices element-wise""" rowLen1 = len(mat1) colLen1 = len(mat1[0]) rowLen2 = len(mat2) colLen2 = len(mat2[0]) if (rowLen1 != rowLen2) or (colLen1 != colLen2): return None ans = [[0] * colLen1 for i in range(rowLen1)] for i in range(rowLen1): for j in range(colLen1): ans[i][j] = mat1[i][j] + mat2[i][j] return ans
fc0feb4eaec2e856ff4a25fc763d130487e14fda
455,941
def to_resp_array(*parts: bytes): """Builds a RESP request""" request = bytearray(b'*%d\r\n' % len(parts)) for part in parts: request += b'$%d\r\n' % len(part) request += b'%b\r\n' % part return bytes(request)
9b0368da6de82a4c2f280fb64d3bc4974c87ff94
306,969
def city_country(name, country): """Return neatly formatted city's name and country it is in.""" return f"{name.title()}, {country.title()}"
a313372109deb25595f1e3428b725c030390d763
468,822