content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def get_app_name(obj): """ returns the app name of an object """ return type(obj)._meta.app_label
a6a32c5986bfc87bd2c892e8488210199c33c3b6
425,392
def get_pres_name(da): """ Returns name of pressure dimension in input array Author: Dougie Squire Date: 03/03/2018 Parameters ---------- da : xarray DataArray Array with coordinate corresponding to pressure Returns ------- name : str Name of dimension corresponding to pressure Examples -------- >>> A = xr.DataArray(np.random.normal(size=(2,2,2,2,2)), ... coords=[('lat', np.arange(2)), ('lon', np.arange(2)), ... ('depth', np.arange(2)), ('level', np.arange(2)), ... ('pfull', np.arange(2))]) >>> doppyo.utils.get_pres_name(A) 'pfull' """ if 'pfull' in da.dims: return 'pfull' elif 'phalf' in da.dims: return 'phalf' else: raise KeyError('Unable to determine pressure dimension') pass
417a74d4becb03ac895a8beb542f35fe6a90a89f
504,661
import difflib def file_compare(fn1, fn2): """ Compare two cQASM files and raise an Assertion error if they are different, but ignoring for the # comment block that the files start with. The assertion error contains a diff of the files. """ with open(fn1) as f, open(fn2) as g: flines = list(f.readlines()) while flines and flines[0].startswith('#'): flines.pop(0) glines = list(g.readlines()) while glines and glines[0].startswith('#'): glines.pop(0) d = difflib.Differ() diffs = [x for x in d.compare(flines, glines) if x[0] in ('+', '-')] if diffs: # all rows with changes raise AssertionError("Files not equal\n"+"".join(diffs)) return False else: return True
b8f62278d25ee38116bb061fdc0090efb8e63396
499,858
def Geq(a, b): """ Create an SMT greater-than-or-equal-to. See also the __ge__ overload (>= operator) for arithmetic SMT expressions. >>> x, y = Ints('x y') >>> Geq(x, y) x >= y """ return a >= b
28b69802fe88bd74b9e8e2cbb5868c05900876b6
192,178
def _get_reward(old_error, new_error): """Return RL agent reward. Reward for RL agent is difference between new and previous error for output. Plus small amount for error (prioritize higher error) """ return (old_error - new_error) + 0.2 * new_error
a6413cc700af343a11e62366395db9cc2bbda70b
117,248
def finish_figure(fig, title=None, hspace=None, wspace=None): """Add finishing touches to a figure. This function can be used to add a title or adjust the spacing between subplot panels. The subplots will also be given a tight layout. Parameters ---------- fig: The figure object to update. title: str The title of the figure. Underscores are automatically modified so LaTeX doesn't complain. hspace: float The amount of vertical space between subplots. wspace: float The amount of horizontal space between subplots. """ if title: fig.suptitle(title.replace("_", r"\_")) fig.tight_layout(rect=[0.015, 0.015, 0.985, 0.985]) if hspace is not None: fig.subplots_adjust(hspace=hspace) if wspace is not None: fig.subplots_adjust(wspace=wspace) return fig
a07d419bc3263c4de13c33c27ffbf4343f0aa53a
546,635
import socket def fqdn_to_ip(hostname): """Provides the IP address of a resolvable name on the machine it is running from. There are many reasons that a valid FQDN may not be resolvable, such as a network error from your machine to the DNS server, an upstream DNS issue, etc. Args: hostname (str): An FQDN that may or may not be resolvable. Returns: ip (str): The IP Address of a valid FQDN. Example: >>> from netutils.dns import fqdn_to_ip >>> from netutils.ip import is_ip >>> is_ip(fqdn_to_ip("google.com")) True >>> Raises: socket.gaierror: If FQDN is not resolvable, leverage is_fqdn_resolvable to check first. """ # The data structure is complex, only require the first item, and drill down from there. return socket.getaddrinfo(hostname, 0)[0][4][0]
1790b21ecb21ea7675789f89e9b1d2303ce76d33
487,569
def _normalize_validate(validate): """ Coerces the validate attribute on a Marshmallow field to a consistent type. The validate attribute on a Marshmallow field can either be a single Validator or a collection of Validators. :param Validator|list[Validator] validate: :rtype: list[Validator] """ if callable(validate): return [validate] else: return validate
85116c071c00914334279f25ed991d99549894ff
277,136
import requests import time def request_json_with_backoff(ontology_url): """Retrieve an ontology listing from EBI OLS returns JSON payload of ontology, or None if unsuccessful """ # add timeout to prevent request from hanging indefinitely response = requests.get(ontology_url, timeout=60) # inserting sleep to minimize 'Connection timed out' error with too many concurrent requests time.sleep(0.25) if response.status_code == 200: return response.json() else: return None
097c94b38428cf9647624c4e8f183baac542359b
281,511
def FIND(find_text, within_text, start_num=1): """ Returns the position at which a string is first found within text. Find is case-sensitive. The returned position is 1 if within_text starts with find_text. Start_num specifies the character at which to start the search, defaulting to 1 (the first character of within_text). If find_text is not found, or start_num is invalid, raises ValueError. >>> FIND("M", "Miriam McGovern") 1 >>> FIND("m", "Miriam McGovern") 6 >>> FIND("M", "Miriam McGovern", 3) 8 >>> FIND(" #", "Hello world # Test") 12 >>> FIND("gle", "Google", 1) 4 >>> FIND("GLE", "Google", 1) Traceback (most recent call last): ... ValueError: substring not found >>> FIND("page", "homepage") 5 >>> FIND("page", "homepage", 6) Traceback (most recent call last): ... ValueError: substring not found """ return within_text.index(find_text, start_num - 1) + 1
c93647bee7dfd75da8cfb28ef1cdd3d78361e47f
432,338
def get_region_index(nodes): """Get region index""" return nodes['NEM_REGION'].unique().tolist()
f4265c2803721eba6cd5b3b260684716b3e5cd4a
574,451
import re def postprocess_output(text, max_length, stop_string, output_regex): """ Modify model output to satisfy stop_string and output_regex keywords. Args: text: A string or list of strings containing model outputs. max_length: Model output will be truncated to be at most this length. stop_string: Model output will be truncated to the shortest string which includes stop_string. If None, no truncation will be performed. e.g. if stop_string='.', the model output "The apple is on the ground. What is" will be truncated to "The apple is on the ground." before being returned. output_regex: Rather than returning the full output, return the first match to the python regular expression output_regex in the output string. If there is no match, an empty string will be returned. If output_regex=None, no regular expression matching will be performed. e.g. if output_regex=r"\\d+", and the model output is "= 42. 7*7 = 49. 7*8 =", then "42" will be returned. Returns: A string or list of strings, all processed as described for each argument. """ if isinstance(text, list): return [ postprocess_output(mo, max_length, stop_string, output_regex) for mo in text ] # Ensure it is a string (will convert from bytes, ... as needed) if not isinstance(text, str): text = str(text, "utf-8") # truncate at max_length if max_length: text = text[:max_length] # Remove all text after any stop_string if stop_string: index = text.find(stop_string) if index > 0: text = text[: index + len(stop_string)] # extract substring matching regex (empty string for no match) if output_regex: _text = text text = next(iter(re.findall(output_regex, text)), "") assert ( not type(text) is tuple ), f'Regex {output_regex} returned multiple matching groups when applied to string {_text}. Try using non-capturing groups, by starting regex groups with ?: (e.g. "(stuff)" -> "(?:stuff)").' return text
6f95fd488ad2924024acfe2bd45ffdc869d3a252
492,395
import uuid def random_filename(instance, filename): """ Generates random filename and return it """ extension = filename.split(".")[-1] return "{}.{}".format(uuid.uuid4(), extension)
95d384d4c3a51374949d24578dece28d22b091ce
219,116
def jarManifest(target, source, env, for_signature): """Look in sources for a manifest file, if any.""" for src in source: contents = src.get_text_contents() if contents[:16] == "Manifest-Version": return src return ''
83bdb3cd01e83f3ba949e2a9faa11f8c98d1589a
627,015
import torch def ground_caption(captions, n_ground=1, prefix="describe visual inputs:", sort=True): """ For VG Args: captions n_ground Returns: source_text target_text Ex) (in vocab ids) captions ['Yellow banana', 'red crayon', 'black cow', 'blue sky'] n_ground > 1 ground_indices [1, 0, 2] source_text describe visual inputs: <vis_extra_id_1> <vis_extra_id_0> <vis_extra_id_2> target_text <extra_id_0> red crayon <extra_id_1> Yellow banana <extra_id_2> black cow n_ground == 1 source_text describe visual inputs: <vis_extra_id_1> target_text red crayon """ n_boxes = len(captions) if sort: ground_indices = torch.randperm(n_boxes)[:n_ground].sort().values else: ground_indices = torch.randperm(n_boxes)[:n_ground] ground_indices = ground_indices.tolist() source_text = [prefix] target_text = [] if n_ground == 1: idx = ground_indices[0] source_text.append(f'<vis_extra_id_{idx}>') target_text.append(f'{captions[idx]}') else: for j, idx in enumerate(ground_indices): source_text.append(f'<vis_extra_id_{idx}>') target_text.append(f'<extra_id_{j}>') target_text.append(f'{captions[idx]}') # target_text.append('</s>') source_text = " ".join(source_text) target_text = " ".join(target_text) # return ground_indices, source_text, target_text return source_text, target_text
df340b82e51bd1ea18a70e9d11c41910b21cf919
512,362
def findInEdges(nodeNum, edges, att=None): """ Find the specified node index in either node_i or node_j columns of input edges df. Parameters ---------- nodeNum : int This is the node index not protein index. edges : pandas dataframe Dataframe in which to search for node. Returns ------- pandas dataframe """ edges = edges.copy() if att is not None: return edges[(edges.attribute == att) & ((edges.node_i == nodeNum) | (edges.node_j == nodeNum))] else: return edges[(edges.node_i == nodeNum) | (edges.node_j == nodeNum)]
83ffce762bf660c33787f58c0d132f8a38cf8db9
676,148
def hasTemplate (s): """ Return True if string s has string templates """ return '{' in s and '}' in s
ab2f68c0e5b77ab4336a8814eb686b2743c2f6e1
69,276
import pprint import hashlib def generate_hash(data_in): """ Generate a hash from the data_in that can be used to uniquely identify equivalent data_in. Args: data_in (any): The data from which a hash is to be generated. This can be of any type that can be pretty printed. Returns: str: A hexadecimal string which is a hash hexdigest of the data as a string. """ bytestring = pprint.pformat(data_in).encode("utf-8") return hashlib.sha256(bytestring).hexdigest()
7d6b6bba59c8ae77ca7bb60bb0e8a9116c7e3ebf
459,511
def join_ipv4_segments(segments): """ Helper method to join ip numeric segment pieces back into a full ip address. :param segments: IPv4 segments to join. :type segments: ``list`` or ``tuple`` :return: IPv4 address. :rtype: ``str`` """ return ".".join([str(s) for s in segments])
9b5196d6f7dc10a6f627c078eb90d7107daeb949
313,545
def WaitForOpMaybe(operations_client, op, asyncronous=False, message=None): """Waits for an operation if asyncronous flag is on. Args: operations_client: api_lib.ml_engine.operations.OperationsClient, the client via which to poll op: Cloud ML Engine operation, the operation to poll asyncronous: bool, whether to wait for the operation or return immediately message: str, the message to display while waiting for the operation Returns: The result of the operation if asyncronous is true, or the Operation message otherwise """ if asyncronous: return op return operations_client.WaitForOperation(op, message=message).response
98221aaa1115c49c256824df072ef6f11b40e183
643,764
from typing import Union import math def truncate_to_block(length: Union[int, float], block_size: int) -> int: """ Rounds the given length to the nearest (smaller) multiple of the block size. """ return int(math.floor(length / block_size)) * block_size
cbcd3cfcb2bf32c0cc7d4f00a1b3d5d741113d7d
665,197
def argmax(d): """ Return key corresponding to maximum value in dictionary `d`. If several keys have the same maxum value, one of them will be returned. Parameters ---------- d : dict values must be numeric Returns ------- key in `d` """ return max(d, key = lambda k : d[k])
57f09b6b9470560aa49e67509575c496cbf25c02
516,113
def timedelta2duration(delta): """ Convert a datetime.timedelta to ISO 8601 duration format Parameters ---------- delta : datetime.timedelta Returns ------- str """ s = "P" if delta.days: s += f"{delta.days}D" if delta.seconds or delta.microseconds: sec = delta.seconds if delta.microseconds: # Don't add when microseconds is 0, so that sec will be an int then sec += delta.microseconds / 1e6 s += f"T{sec}S" if s == "P": s += "0D" return s
ffd550ce8852046d08c39388362e677741ad117c
642,721
def terminate_vy_less_zero(x, y, vx, vy, ax, ay, t): """Returns True if the y velocity is less than zero, otherwise returns False""" return vy[-1] < 0
ec0864962437e2906859f1bbe6eab04be0eeaf6c
447,749
import random def random_choice(choices): """returns a random choice from a list of (choice, probability)""" # sort by probability choices = sorted(choices, key=lambda x:x[1]) roll = random.random() acc_prob = 0 for choice, prob in choices: acc_prob += prob if roll <= acc_prob: return choice
f477abe220fa9d87ee3692bed8c41973af4c637c
705,303
def get_records(line1): """Collects record reported from the line. Args: line1 (str): 1.st line of data block Returns: str: record reported """ record = line1.split(' ')[-1] return record if record != '' else '-'
ccb8707d7583982a73d9a94ad4db3e2d3f90c871
440,315
def descendants_count(tree): """For every node, count its number of descendant leaves, and the number of leaves before it.""" n = len(list(tree.nodes())) root = n - 1 left = [0] * n desc = [0] * n leaf_idx = 0 children = [list(tree.neighbors(node))[::-1] for node in range(n)] # children remaining to process stack = [root] while len(stack) > 0: node = stack[-1] if len(children[node]) > 0: stack.append(children[node].pop()) else: children_ = list(tree.neighbors(node)) if len(children_) == 0: desc[node] = 1 left[node] = leaf_idx leaf_idx += 1 else: desc[node] = sum([desc[c] for c in children_]) left[node] = left[children_[0]] assert node == stack.pop() return desc, left
3fdea98bf542df9700da08dcbb297f0e9eaca4ee
77,941
def _parameters_link(args, parameters): """Build arguments based on the arguments and parameters provided.""" args_ = {} for index, arg in enumerate(args): args_.setdefault(arg, []).append(index) args_ = {key: iter(value) for key, value in args_.items()} return [next(args_.get(p, iter([p]))) for p in parameters]
36080d299b08d700e4b74e3b1db2903feaf0313c
140,288
def convert_bytes_to_GB(bytes_number): """ Convert a number in bytes to a number in GB, then round to 4 digits """ GB_number = bytes_number / (1024 ** 3) GB_number = round(GB_number, 4) return GB_number
d5c0c048060485966fccc11f6ea166535997ad45
243,174
def get_value(table, row, col) : """Get value in given row and column""" return table.find_all('tr')[row].find_all('td')[col].string
4d1c96dc239654fb3df57b4f12618b00bfe869d3
81,442
import pkg_resources def version() -> str: """ Return the current version of Shillelagh. As an example:: sql> SELECT VERSION(); VERSION() ----------- 0.7.4 """ return pkg_resources.get_distribution("shillelagh").version
874aeaceb2ada0a166184109da3f4202ace36e33
433,411
def soma(parcela, parcela_2): """Essa função calcula a soma de duas parcelas :param parcela: number :param parcela_2: number :return: number """ return parcela + parcela_2
79a2a081daa64a8bc865355d8054e26d858f914a
365,376
import logging from typing import Union from pathlib import Path import warnings def add_logging_handler( logger: logging.Logger, logging_file_path: Union[str, Path] ) -> logging.FileHandler: """Add a logging file-handler to the logger. Parameters ---------- logger : logging.Logger The logger. logging_file_path : Union[str, Path] The file path for the file handler. Returns ------- logging.FileHandler The file-handler that is added to the given logger. """ if not isinstance(logging_file_path, Path): logging_file_path = Path(logging_file_path) if logging_file_path.exists(): warnings.warn( f"Logging file ({logging_file_path}) already exists. " f"This file will be overwritten!", RuntimeWarning, ) # Clear the file # -> because same FileHandler is used when calling this method twice open(logging_file_path, "w").close() f_handler = logging.FileHandler(logging_file_path, mode="w") f_handler.setFormatter( logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") ) f_handler.setLevel(logging.INFO) logger.addHandler(f_handler) return f_handler
128954e07ba1435a5b65c550bbdbd54c09496b0f
411,012
def no_change(msg: str) -> str: """Add no change tag to string""" return f"[no_change]{msg}[/no_change]"
4b268d65f9b8478e72e7d91ad3c05dafeb35d288
277,100
import json def _read_as_json(msg): """Decode message (UTF-8) and read into dictionary. :param msg: str to decode and deserialize :return: Dictionary from message """ msg_body = msg.get_body().decode("utf-8") return json.loads(msg_body)
f1fb041508f2840ee966516f08b1471d928427bd
467,075
import collections def most_common(row, *, default=0): """ Return the most-common element in the array; break ties arbitrarily. """ C = collections.Counter(row) return C.most_common(1)[0][0] if C else default
0d263b740c43d88509aec40cb6a9a6408d402acd
435,906
from typing import Union from typing import List def list_of_ints(string: str, delimiter: Union[str, None] = None) -> List[int]: """Cast a string to a list of integers. Args: string: String to be converted to a list of int's. delimiter: Delimiter between integers in the string. Default is to split with any whitespace string (see str.split() method). """ return [int(sp) for sp in string.split(sep=delimiter)]
53a1ad5ace4cc9b1e6687910b34d9bbb12f595c9
620,980
def generate_kmers(kmerlen): """make a full list of k-mers Arguments: kmerlen -- integer, length of k-mer Return: a list of the full set of k-mers """ nts = ['A', 'C', 'G', 'T'] kmers = [] kmers.append('') l = 0 while l < kmerlen: imers = [] for imer in kmers: for nt in nts: imers.append(imer+nt) kmers = imers l += 1 return kmers
755752a2ade73a6066a5ba07027fb20b80f86880
440,651
def num_channels(I): """number of channels in the image""" return I.shape[2]
12f485391dff5386225ad13905e6292fc9f0bda1
610,039
def ship_name(fleet, designated_no): """Return ship's name for specified designated number from the fleet.""" # has dictionary a key? Use syntax: key in dictionary if designated_no in fleet: return fleet[designated_no]
530e00e2613c2c5424eb4abdfb19658e6ee5413f
620,459
def has_view_restrictions(page): """Returns True if the page has view restrictions set up, False otherwise.""" return page.view_restrictions.count() > 0
97780a4091c1461bdbeabb04f2fd8cd5a28b18ae
626,932
def csr_data_indices(arr): """extract data and indices arrays from a dense vector (this may seem like it may be done faster with np.array.nonzero and similar tools, but this implementation is faster) :param arr: dense array :return: arrays with nonzero data elements, and corresponding indices """ data, indices = [], [] for i in range(len(arr)): if arr[i] != 0.0: data.append(arr[i]) indices.append(i) return data, indices
7a25c7991f1ae8ae5ee747319b4add24f9fe1926
259,856
def pass_bailout(output, step_number): """Print info and return if in timeout or crash pass states.""" if output.HasTimedOut(): # Dashed output, so that no other clusterfuzz tools can match the # words timeout or crash. print('# V8 correctness - T-I-M-E-O-U-T %d' % step_number) return True if output.HasCrashed(): print('# V8 correctness - C-R-A-S-H %d' % step_number) return True return False
5e7be9fddc06c7b6d0cb5c392a21ee2d4d7fcebd
255,186
def _clamp_transpose(transpose_amount, ns_min_pitch, ns_max_pitch, min_allowed_pitch, max_allowed_pitch): """Clamps the specified transpose amount to keep a ns in the desired bounds. Args: transpose_amount: Number of steps to transpose up or down. ns_min_pitch: The lowest pitch in the target note sequence. ns_max_pitch: The highest pitch in the target note sequence. min_allowed_pitch: The lowest pitch that should be allowed in the transposed note sequence. max_allowed_pitch: The highest pitch that should be allowed in the transposed note sequence. Returns: A new transpose amount that, if applied to the target note sequence, will keep all notes within the range [MIN_PITCH, MAX_PITCH] """ if transpose_amount < 0: transpose_amount = -min(ns_min_pitch - min_allowed_pitch, abs(transpose_amount)) else: transpose_amount = min(max_allowed_pitch - ns_max_pitch, transpose_amount) return transpose_amount
008389fd9c9baedb74dfac9feb4e8080968b505a
239,283
import re def camel_case_to_underscore(name=''): """ Change string from camel case to using underscores. According to PEP8, class names should follow camel case convention, whereas methods, functions, and variables should use underscores. Parameters ---------- name : :class:`str` Name to be changed from camel case to using underscores. Returns ------- name : :class:`str` Name changed from camel case to using underscores. """ name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
bc9a1d28b6d63d2e3d8ad94dd093246cabf2d28a
341,695
def A002275(n: int) -> int: """Repunits: (10^n - 1)/9. Often denoted by R_n.""" if n == 0: return 0 return int("1" * n)
2fb82627f80991a954937837754011671829e48f
260,209
def get_user_response(prompt): # pragma: no cover """Display prompt and return user response lowercased.""" response = input(prompt) return response.lower()
bb07e4057be8ebaa96d6a645a9c794386b321a16
242,078
def append_collection_links(request, response, link_dict): """ Convenience method to append to a response object document-level links. """ data = response.data if not 'collection_links' in data: data['collection_links'] = {} for (link_relation_name, url) in link_dict.items(): data['collection_links'][link_relation_name] = url return response
8f25910c182f68c46172354f1a3bf9b336154126
636,594
def sanitize_filename(f): """Removes invalid characters from file name. Args: f (:obj:`str`): file name to sanitize. Returns: :obj:`str`: sanitized file name including only alphanumeric characters, spaces, dots or underlines. """ keepchars = (" ", ".", "_") return "".join(c for c in f if c.isalnum() or c in keepchars).rstrip()
d1f71503c6e85886ffdcbcbbde1ff5c22c363fc8
371,096
def to_sorted_subfloats(float_string): """Convert string of floats into sorted list of floats. This function will first remove the "TSH" and turn the string of floats into the list of floats and sort them from smaller to larger. Args: float_string (str): A string begin with "TSH" and followed by a sequence of floats. Returns: list: a list of pure sorted floats. """ float_list = float_string.split(",") float_list.remove("TSH") float_list = [float(i) for i in float_list] float_list.sort() return float_list
7764b648449c4ca79a8fe9aecd8a1b3b73acee1c
660,587
def header(token): """ returns a generic header used for insert and update Deployment manader API calls. :param token: bearer token :return: header dict as expected by requests """ return { 'Metadata-Flavor': 'Google', 'Authorization': f'Bearer {token}', 'Accept': 'application/json', 'Content-Type': 'application/json' }
0b36f056feaaf2c94c50d3a282e79af61ae6f4b5
190,848
def compute_usage_metrics(session, args, fqdn, user, collection, experiment, channel): """Add metrics to cloudwatch Args: session (boto3.session): args (dict): contains [x|y|z]_[start|stop] for computing extents fqdn (str): fully qualified domain name of the endpoint user (str): name of user invoking downsample collection (str): name of collection experiment (str): name of experiment channel (str): name of channel """ def get_cubes(axis, dim): extent = args['{}_stop'.format(axis)] - args['{}_start'.format(axis)] return -(-extent // dim) ## ceil div cost = ( get_cubes('x', 512) * get_cubes('y', 512) * get_cubes('z', 16) / 4 # number of cubes for a downsampled volume * 0.75 # assume the frame is only 75% filled * 2 # 1 for invoking a lambda # 1 for time it takes lambda to run * 1.33 # add 33% overhead for all other non-base resolution downsamples ) dimensions = [ {'Name': 'user', 'Value': user}, {'Name': 'resource', 'Value': '{}/{}/{}'.format(collection, experiment, channel)}, {'Name': 'stack', 'Value': fqdn}, ] client = session.client('cloudwatch') client.put_metric_data( Namespace = "BOSS/Downsample", MetricData = [{ 'MetricName': 'InvokeCount', 'Dimensions': dimensions, 'Value': 1.0, 'Unit': 'Count' }, { 'MetricName': 'ComputeCost', 'Dimensions': dimensions, 'Value': cost, 'Unit': 'Count' }] )
23d88341296c1568db41956c87ef16fef11e4bed
250,862
import uuid import random def make_ds_id() -> str: """Generate a dataset ID like DataLad would. This is intended for lightweight tests that don't create full-fledged datasets. """ return str(uuid.UUID(int=random.getrandbits(128)))
c85f4662f93df6dd7c3035170f806d50e3480c30
649,968
import random def uniformly_sample_list(vals): """ Returns a single value uniformly sampled from the list `vals`. """ return random.choice(vals)
f23a980fd00bf82696ace66348d5338f25acd91a
174,533
def correction_byte_table_q() -> dict[int, int]: """Table of the number of correction bytes per block for the correction level Q. Returns: dict[int, int]: Dictionary of the form {version: number of correction bytes} """ table = { 1: 13, 2: 22, 3: 18, 4: 26, 5: 18, 6: 24, 7: 18, 8: 22, 9: 20, 10: 24, 11: 28, 12: 26, 13: 24, 14: 20, 15: 30, 16: 24, 17: 28, 18: 28, 19: 26, 20: 30, 21: 28, 22: 30, 23: 30, 24: 30, 25: 30, 26: 28, 27: 30, 28: 30, 29: 30, 30: 30, 31: 30, 32: 30, 33: 30, 34: 30, 35: 30, 36: 30, 37: 30, 38: 30, 39: 30, 40: 30 } return table
240303c1812429f75731d80f4a51f3aa0a066e6e
598,070
def get_normal_form(obj_name, title=True): """Replaces underscores with spaces. Transforms to title form if title is True. (product_groups -> Product Groups). """ obj_name_wo_spaces = obj_name.replace("_", " ") return obj_name_wo_spaces.title() if title else obj_name_wo_spaces
276d1beab4e21f1a89e235c3f8afdabd08230679
262,735
import typing def has_duplicates(data: typing.Sequence) -> bool: """ Returns ``True`` if ``data`` has duplicate elements. It works both with hashable and not-hashable elements. """ try: return len(set(data)) != len(data) except TypeError: n = len(data) for i in range(n): for j in range(i + 1, n): if data[i] == data[j]: return True return False
8a49e4495ca581887fb39a0adf96c9c9f6016c62
137,014
def fibonacci(n): """Returns n fibonacci numbers Args: n: number of fibonacci numbers to return Returns: List that contains n fibonacci numbers Raises: ValueError when n is not a positive integer """ if n < 1: raise ValueError("n must be a positive integer") output = [] f1 = 0 f2 = 1 fn = 1 count = 0 for i in range(n): output.append(fn) fn = f1 + f2 f1 = f2 f2 = fn return output
ef8e26e63327a3fce20bd36f3d14bc9902cb44b8
665,449
def doolittle(matrix_a): """ Doolittle's Method for LU-factorization. :param matrix_a: Input matrix (must be a square matrix) :type matrix_a: list, tuple :return: a tuple containing matrices (L,U) :rtype: tuple """ # Initialize L and U matrices matrix_u = [[0.0 for _ in range(len(matrix_a))] for _ in range(len(matrix_a))] matrix_l = [[0.0 for _ in range(len(matrix_a))] for _ in range(len(matrix_a))] # Doolittle Method for i in range(0, len(matrix_a)): for k in range(i, len(matrix_a)): # Upper triangular (U) matrix matrix_u[i][k] = float(matrix_a[i][k] - sum([matrix_l[i][j] * matrix_u[j][k] for j in range(0, i)])) # Lower triangular (L) matrix if i == k: matrix_l[i][i] = 1.0 else: matrix_l[k][i] = float(matrix_a[k][i] - sum([matrix_l[k][j] * matrix_u[j][i] for j in range(0, i)])) # Handle zero division error try: matrix_l[k][i] /= float(matrix_u[i][i]) except ZeroDivisionError: matrix_l[k][i] = 0.0 return matrix_l, matrix_u
03ba90c29dfb67ffe1edf939b49f3ab537931831
12,715
import re def email_parse(email: str) -> dict: """ Парсит переданную email-строку на атрибуты и возвращает словарь :param email: строковое входное значение обрабатываемого email :return: {'username': <значение до символа @>, 'domain': <значение за символом @>} | ValueError """ # для упрощения примем, что адрес не содержит нац. символов и длина домена верхн. уровня - не более 4 симв. re_mail = re.compile(r'^[0-9a-z_.-]+@[0-9a-z-]+\.[a-z]{2,4}$') if len(re_mail.findall(email)) == 1: parce_list = email.split('@') else: raise ValueError(f'wrong email: {email}') return dict(username=parce_list[0], domain=parce_list[1])
bfa3b9c2c0e829dd3b133a3aad8718f9a77b06a6
233,522
def replace_last(full, sub, rep=''): """ replaces the last instance of a substring in the full string with rep :param full: the base string in which the replacement should happen :param sub: to be replaced :param rep: replacement substring default empty :return: """ end = '' count = 0 for c in reversed(full): count = count + 1 end = c + end if sub in end: return full[:-count] + end.replace(sub, rep) return full
9bbb8ff3f26972740e400041357c517f8c3db042
327,105
import random def next_unused_name_in_group(grp, length): """ Gives a name that isn't used in a Group. Generates a name of the desired length that is not a Dataset or Group in the given group. Note, if length is not large enough and `grp` is full enough, there may be no available names meaning that this function will hang. Parameters ---------- grp : h5py.Group or h5py.File The HDF5 Group (or File if at '/') to generate an unused name in. length : int Number of characters the name should be. Returns ------- name : str A name that isn't already an existing Dataset or Group in `grp`. """ # While # # ltrs = string.ascii_letters + string.digits # name = ''.join([random.choice(ltrs) for i in range(length)]) # # seems intuitive, its performance is abysmal compared to # # '%0{0}x'.format(length) % random.getrandbits(length * 4) # # The difference is a factor of 20. Idea from # # https://stackoverflow.com/questions/2782229/most-lightweight-way- # to-create-a-random-string-and-a-random-hexadecimal-number/ # 35161595#35161595 fmt = '%0{0}x'.format(length) name = fmt % random.getrandbits(length * 4) while name in grp: name = fmt % random.getrandbits(length * 4) return name
302c55264655ef5d5c011462527ea88565dedebe
669,180
def stripLabels(testFeatures): """ Strips label from a test sentence feature vector """ return [testFeatures[i][0] for i in range(len(testFeatures))]
24308c4e3fb0d56256b77fe0310381d8017f41ee
643,023
import re def sanitize_artist_name(name): """ Remove parenthentical number disambiguation bullshit from artist names, as well as the asterisk stuff. """ name = re.sub(r" \(\d+\)$", "", name) return re.sub(r"\*+$", "", name)
ba2e1122730957d1226c5392e4362e2bac3b1b4f
115,008
def bearer_auth(request, authn): """ Pick out the access token, either in HTTP_Authorization header or in request body. :param request: The request :param authn: The value of the Authorization header :return: An access token """ try: return request["access_token"] except KeyError: if not authn.startswith("Bearer "): raise ValueError('Not a bearer token') return authn[7:]
badcd4d6bba5f8868894f5981353283dc62946eb
503,970
def find_closest_frame(utc, timestamp): """ Takes timestamp as datetime object, returns the frame number corresponding to the cloest frame to that time Briefly sets the index to datetime column, then sets it back """ index_holder = utc.index utc.index = utc.datetime closest_loc = utc.index.get_loc(timestamp, method = "nearest") utc.index = index_holder return closest_loc
19b588176203777e04a596ac36fff7d0b96ad7f8
567,222
import math def H2N(H, e): """ Function: H2N Purpose: Maps the hyperbolic anomaly angle H into the corresponding mean hyperbolic anomaly angle N. Inputs: H = hyperbolic anomaly (rad) e = eccentricity (e > 1) Outputs: N = mean hyperbolic anomaly (rad) """ if e > 1.0: N = e * math.sinh(H) - H return N raise ValueError('Error: H2N() received e = {}, the value of e should be 0 <= e < 1'.format(str(e)))
beb663a71f91700340591a051cdf784bcab27e6d
571,193
from pathlib import Path def get_number_of_files_in_dir(directory): """ Sums the number of files in a directory :param directory: Any directory with files :return: Number of files in directory """ directory = Path(directory) files = directory.iterdir() total_files = sum(1 for x in files) return total_files
2cf080d5f839bd155faa75e6db0d3e3a5cf49127
100,851
def normalize(image, low=0.0, high=1.0): """ Normalized the image to a range. :param image: :param low: lowest value after normalization. :param high: highest value after normalization. :return: """ image_01 = (image - image.min()) / (image.max() - image.min()) return image_01 * (high - low) + low
411cbe6226603e5004e5d208c72c2b6f17f16cb5
495,729
def sigma_eaton(es_norm, v_ratio, n): """ calculate effective pressure with the ratio of velocity and normal velocity Notes ----- .. math:: {\\sigma}={\\sigma}_{n}\\left(\\frac{V}{V_{n}}\\right)^{n} """ return es_norm * (v_ratio)**n
708b59edebd6ea14dffbdb59d91d05ebfae0d8b2
101,420
def drop_column(df, columns_to_drop): """ Removes columns from a DataFrame del df[name] Args: df (`pandas.DataFrame`): The dataframe to drop columns on columns_to_drop (:type:`list` of :type:`str`): A list of the columns to remove Returns: `pandas.DataFrame`: `df` with the provided columns removed """ for ctd in columns_to_drop: del df[ctd] return df
1eadbf301aff80752c93ca4393910dfa19a76b3a
38,203
def list_to_string(list_, **kwargs): """Convert list_ into string. Accepts keyword 'join', default linebreak.""" ## http://www.decalage.info/en/python/print_list_ ## http://stackoverflow.com/questions/1769403/understanding-kwargs-in-python ## str.strip -- http://stackoverflow.com/questions/7984169/using-strip-on-lists results = '' jointoken = '\n' if ('join' in kwargs): # http://stackoverflow.com/questions/14017996/python-optional-parameter jointoken = kwargs['join'] list_ = map(str, list_) ## convert any ints into strings list_ = map(str.strip, list_) ## strip whitespace list_ = jointoken.join(list_) ## join list items into multi-line string results = list_ return results
ec50f6a2ff231930056fa7c8a47a3b7b20d6d116
285,922
import re def guess_type_from_decl(typename, varname): """Guess the name of the type from the inputs. typename and varname are match 1 and 3 from regex: /^\s*(\w[^=]+)(\*?) (\S+) = new/ guess_type_from_decl(str, str) -> str >>> guess_type_from_decl('List<Moveable>', 'occupants') ('List<Moveable>', True) >>> guess_type_from_decl('Moveable', 'm') ('Moveable', True) >>> guess_type_from_decl('List<Coroutine>', '_Anims') ('List<Coroutine>', True) >>> guess_type_from_decl('Dictionary<int, Coroutine>', 'm_Things') ('Dictionary<int, Coroutine>', True) >>> guess_type_from_decl('var', 'list') ('List<>', True) >>> guess_type_from_decl('var', 'dict') ('Dictionary<>', True) >>> guess_type_from_decl('List<const int>* const', '_Anims') ('List<const int>', True) >>> guess_type_from_decl('byte[]', 'data') ('byte', False) >>> guess_type_from_decl('byte[,]', 'data') ('byte', False) """ if typename == 'var': t = varname # m_blah/_blah -> blah t = re.sub(r"^[mk]?_", '', t) if len(t) > 1: t = t[0].upper() + t[1:] # Guess the variable name is close to the typename. # i.e, var list = new List<> list_names = [ 'List', 'Items', 'Elements', ] dict_names = [ 'Dict', 'Map', ] if t in list_names: t = "List<>" elif t in dict_names: t = "Dictionary<>" return t, True else: # Limited C++ support. t = typename t = re.sub(r"[*].*$", '', t) t = re.sub(r"^(public|internal|protected|private) ", '', t) t = re.sub(r"^(static |readonly |const )*", '', t) use_square = t.endswith(']') if use_square: first_square = t.find('[') t = t[:first_square] return t, not use_square
ad071ac50c7faaad497ab3a29379577d096402bb
353,231
def get_yearweek(yearweekstr: str) -> tuple: """Transform string of form '2020-W10' into tuple (2020, 10) """ return tuple(map(int, yearweekstr.split('-W')))
d8a4753d126e12709f41e1058753fd06762c765f
589,116
def _get_bit(x: int, i: int) -> bool: """Returns true iff the i'th bit of x is set to 1.""" return (x >> i) & 1 != 0
567ee4edb81ffd648f89dd1b0ed41b11efd21536
598,105
def get_whereclause(params): """Given a dictionary of params {key1: val1, key2: val2 } return a partial query like: WHERE key1 = val1 AND key2 = val2 ... """ query_parts = [] first = False for key, val in params.items(): if not first: first = True query_parts.append("WHERE %s = '%s'" % (key, val)) else: query_parts.append("AND %s = '%s'" % (key, val)) return " ".join(query_parts)
a9d2198ae3308be862eecdb148c484e0d63ccfac
63,417
def bottom_up(num_steps: int) -> int: """Compute number of steps using a bottom up approach This iterative appraoch is the best approach. Since it avoids the max recursion depth and uses O(1) space. Actually, the space complexity is more complicated. We only use 3 ints but the space needed by those 3 ints grows as n grows. I guess it actually uses O(log(n)) space? Args: num_steps: number of total steps Returns: The number of possible ways to climb the stairs """ if num_steps <= 2: return num_steps if num_steps == 3: return 4 a = 1 b = 2 c = 4 for _ in range(4, num_steps): a, b, c = b, c, a + b + c return a + b + c
8645158380859b5fe8a84ee8b5ac5059437ecdb5
51,171
from pathlib import Path def is_folder_included(folder): """ Checks if a folder should be included in the submission. """ return (Path(folder) / '.include_in_submission').exists()
0489e74530f1881ff8916bba712f7555b28e25b1
257,983
def get_e_rtd_default(hw_type): """当該給湯機の効率 Args: hw_type(str): 給湯機/給湯温水暖房機の種類 Returns: float: 当該給湯機の効率 """ if hw_type in ['ガス潜熱回収型給湯機', 'ガス潜熱回収型給湯温水暖房機']: return 0.836 elif hw_type in ['ガス従来型給湯機', 'ガス従来型給湯温水暖房機']: return 0.704 else: raise ValueError(hw_type)
efc90c2a19d894093e790518e4d67f9fa4d3d700
693,491
import string def remove_partsymbol(atom): """ strips the part symbol like C1_4b from an atom name :param atom: 'C1_4b' :type atom: string >>> remove_partsymbol('C2_4b') 'C2_4' >>> remove_partsymbol('C22_b') 'C22' >>> remove_partsymbol('C_5') 'C_5' >>> remove_partsymbol('SAME/SADI') 'SAME/SADI' >>> remove_partsymbol('C22_4^b') 'C22_4' >>> remove_partsymbol('C23^b') 'C23' >>> remove_partsymbol('C24_0^b') 'C24' >>> remove_partsymbol('C25_0b') 'C25' """ if '_' in atom or '^' in atom: if '^' in atom: # since SHELXL 2016/5, residue and part are devided by "^" name = atom.split('^')[0] if name.split('_')[-1] == '0': return name.split('_')[0] else: return name else: presuff = atom.split('_') prefix, suffix = presuff[0], presuff[-1].strip(string.ascii_letters) if not suffix: return prefix else: if suffix == '0': atom = prefix else: atom = prefix + '_' + suffix return atom
f877efebce884a16f6dada10d8af5c34f50aaa30
388,929
from typing import List import random def gen_sha(charset: List[str], length: int): """Partial function for generating a long string of random characters.""" def inner() -> str: return ''.join(random.choice(charset) for _ in range(length)) return inner
00ade63b5144be3828036636fa509f2dd162bf19
387,709
def charset_to_encoding(name): """Convert MySQL's charset name to Python's codec name""" if name == 'utf8mb4': return 'utf8' return name
b97c9e2d8e91cec0be4d49d78154f3dddb77fde4
569,605
def itofm(i): """Converts midi interval to frequency multiplier.""" return 2 ** (i / 12.0)
eea7e26e63b5b8e56af41b34444440ebe5b2b4bb
614,015
def splitIntoGroupsOf(groupSize, theList): """ splits a list into list of lists, where the inner lists have at most groupSize number of items. """ result = [] for i in range(0, len(theList), groupSize): result.append(theList[i:i + groupSize]) return result
13ed5d13a0438f77b6d4f2d5463baf03bc6df0fc
324,339
def _specific_humidity(ea, pair): """"Specific humidity from actual vapor pressure Parameters ---------- ea : ee.Image or ee.Number Specific humidity [kPa]. pair : ee.Image or ee.Number Air pressure [kPa]. Returns ------- q : ee.Image or ee.Number Specific humidity [kg/kg]. Notes ----- q = 0.622 * ea / (pair - 0.378 * ea) """ return ea.multiply(-0.378).add(pair).pow(-1).multiply(ea).multiply(0.622) # return ea.multiply(0.622).divide(ea.multiply(-0.378).add(pair))
fa400b784a8b8e117ed9370a334163f9be529822
198,165
from typing import Callable from typing import Optional from typing import Tuple def then_parser( first_parser: Callable[[str], Optional[Tuple[str, str]]], second_parser: Callable[[str], Optional[Tuple[str, str]]], text: str, ) -> Optional[Tuple[str, str]]: """ Uses one parser on the text, then uses the next parser on the remaining text from the first parse. """ first_result = first_parser(text) if first_result is None: return None else: parsed, rest = first_result second_result = second_parser(rest) if second_result is None: return None else: parsed_2, rest_2 = second_result return parsed + parsed_2, rest_2
7604deaed51af9177661defe5e62a13766a77065
44,139
def copy_with_meta(class_to_copy, meta_src): """ Return a copy of "class_to_copy" but with metadata (n_actions and n_obs) copied from meta_src. Usually class_to_copy is an agent-class and meta_src is an environment-class in whose instances instances of class_to_copy are to be run. """ class result(class_to_copy): n_actions, n_obs = meta_src.n_actions, meta_src.n_obs result.__name__ = class_to_copy.__name__ result.__qualname__ = class_to_copy.__qualname__ return result
1d2eb8fe03a5452407e39a40140e161b497a0524
574,964
def determine_var(w, q2): """ :param w: omega as estimated :param q2: allele freq of SNP in p2 :return: sigma2, estimate of variation """ return w * (q2 * (1 - q2))
68f4c6dce3327b263000bd812a8d253102347643
435,515
import re def format_id(s): """Formats the given string so it can be used as an ID. Example: >>> format_id('name.surname') name-surname """ if not s: return '_' return re.sub(r'[^-a-zA-Z0-9_]', '-', s)
22e2a3353453b9e1c38402fec850fdc666ac7815
125,384
def create_identifiers_lists(identifiers): """Splits identifiers in two lists.""" issn_list = [] isbn_list = [] for ident in identifiers: if ident["scheme"] == "ISSN": issn_list.append(ident["value"]) if ident["scheme"] == "ISBN": isbn_list.append(ident["value"]) return issn_list, isbn_list
28ba76f72b29eb5625a13363fcd7eb84b29b9025
137,261
def invert_permutation_indices(indices): """Invert the permutation giver by indices. """ inverted = [0] * len(indices) for i, index in enumerate(indices): inverted[index] = i return inverted
4d48c544c1d9684ba1f618c34293d4512d164180
491,375
def last(y): """ Returns the last column from 2d matrix """ return y[:, (y.shape[1] - 1)]
981429949c74846bc079fb47f49c67e05884452a
244,215
def provider_names(providers): """ Returns the names of the :providers:, separated by commas. """ return ", ".join([p.provider_name for p in providers])
c89f7dc5c6d976753f4f44398d4fd906c32ec56a
588,862
def output_handler(data, context): """ Post-process TensorFlow Serving output before it is returned to the client """ if data.status_code != 200: raise ValueError(data.content.decode("utf-8")) return data.content, context.accept_header
cb7fedf142cd19ebfcc756295a087a603a789b43
468,604
def check_order(df, topcol, basecol, raise_error=True): """ Check that all rows are either depth ordered or elevation_ordered. Returns 'elevation' or 'depth'. """ assert basecol in df.columns, f'`basecol` {basecol} not present in {df.columns}' if (df[topcol] > df[basecol]).all(): return 'elevation' elif (df[topcol] < df[basecol]).all(): return 'depth' elif raise_error: raise ValueError('Dataframe has inconsistent top/base conventions') else: return None
9b4e7b9938bb2fe14ab99d5c111883a0f6d73337
9,005
def _split_sparse_columns(arff_data, include_columns): """ obtains several columns from sparse arff representation. Additionally, the column indices are re-labelled, given the columns that are not included. (e.g., when including [1, 2, 3], the columns will be relabelled to [0, 1, 2]) Parameters ---------- arff_data : tuple A tuple of three lists of equal size; first list indicating the value, second the x coordinate and the third the y coordinate. include_columns : list A list of columns to include. Returns ------- arff_data_new : tuple Subset of arff data with only the include columns indicated by the include_columns argument. """ arff_data_new = (list(), list(), list()) reindexed_columns = {column_idx: array_idx for array_idx, column_idx in enumerate(include_columns)} for val, row_idx, col_idx in zip(arff_data[0], arff_data[1], arff_data[2]): if col_idx in include_columns: arff_data_new[0].append(val) arff_data_new[1].append(row_idx) arff_data_new[2].append(reindexed_columns[col_idx]) return arff_data_new
5fd93b569222a50cae815731b7f0b40c8bc138bf
226,162
def name_of(obj): """Returns the name of function or class.""" class_name = type(obj).__name__ if class_name in ['function', 'type']: return obj.__name__ else: return class_name
fad30e12c9ef9a52700a8044094bab5b1ea5487e
540,053
def calc_upsampling_size(input_size: int, dilation: int = 1, tconv_kernel_size: int = 3, tconv_stride: int = 2, tconv_padding: int = 1, output_padding: int = 1) -> int: """ Helper function to calculate the upsampling size desired from the convolution parameters. This is the same formula as used by the transposed 3D convolution of PyTorch Args: - input_size (int): size of the input tensor - dilation (int): dilation of the convolution - tconv_kernel_size (int): kernel of the convolution - tconv_stride (int): stride of the convolution - tconv_padding (int): padding of the convolution - output_padding (int): Output padding to add """ return (input_size - 1) * tconv_stride - 2 * tconv_padding + dilation * ( tconv_kernel_size - 1) + output_padding + 1
138261e65211c2fa193c40f6c2882c18214f8a10
152,258
def check_the_guess(guessed_number, number_to_guess, attempts_left): """ compare the player's guess with the random number to be guessed, provide a feedback """ if guessed_number == number_to_guess: return True else: if guessed_number > number_to_guess: print("Your number is too high!", end="") elif guessed_number < number_to_guess: print("Your number is too low!", end="") if attempts_left > 0: print(" Try it again!") print(f"\tAttempts left: {attempts_left} ...") return False else: print(f"\n\tAttempts left: {attempts_left} ...") return True
b412275c4a33faf4a86e4dbca7e01bb286067f07
394,152
def decrypt_single_char(char, key): """Decrypt a single char with a given key.""" assert isinstance(char, str) assert isinstance(key, int) upper_a_integer = ord('A') upper_z_integer = ord('Z') lower_a_integer = ord('a') lower_z_integer = ord('z') char_integer = ord(char) result = char_integer difference = (upper_z_integer - upper_a_integer) + 1 if upper_a_integer <= char_integer <= upper_z_integer: result -= key if not upper_a_integer <= result <= upper_z_integer: result += difference elif lower_a_integer <= char_integer <= lower_z_integer: result -= key if not lower_a_integer <= result <= lower_z_integer: result += difference return chr(result)
e96ced88c9b43cee45a17e417ce4f35a9136c8b8
373,506