content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def hdr3Dtohdr2D(hdr3D,verbose=True): """ Removing the wavelength component of a hdr, i.e., converting the WCS from 3D (lambda,ra,dec) to 2D (ra,dec) --- INPUT --- hdr3D The 3D hdr to remove wavelength components from verbose Toggle verbosity """ hdr2D = hdr3D for key in list(hdr2D.keys()): if '3' in key: del hdr2D[key] hdr2D['WCSAXES'] = 2 return hdr2D
2b851f719befe84662138cadccade78a32756de0
144,302
def sensitivity_calc(sign_residues_per_iter): """ | Inputs the output of ``bootstrapped_residue_analysis`` and calculates the sensitivity of each residue. | The returned sensitivity of each residue is calculated by calculating ``residue_appearances / iterations``. | A sensitivity of 1 is ideal meaning that the residue was significant to all the iterations. | Args: sign_residues_per_iter: A list of sets containing the residue ids of the significant residues on each iteration Returns: A dictionary of ``ResidueId(key), Sensitivity(value)`` for all the residues that appeared at least on one iteration """ sens_dict = {} for which_iter in sign_residues_per_iter: for which_res in which_iter: try: sens_dict[which_res] += 1 except KeyError: sens_dict[which_res] = 1 # Get the sensitivity by calculating residue_appearances / total_iterations sens_dict = {k: v / len(sign_residues_per_iter) for k, v in sens_dict.items()} return sens_dict
f92772ea60d3ca660ee63aefa024047f8e3c5495
630,392
def host(ctx): """Returns true if player has host role""" if "Host" in [role.name for role in ctx.message.author.roles]: return True return False
e36c3a7e3977142bdded4ff48cca68c0831fc37a
91,072
import io import tokenize def _decode_source(source_bytes): """ Decode bytes representing source code and return the string. Universal newline support is used in the decoding. Based on CPython's implementation of the same functionality: https://github.com/python/cpython/blob/3.9/Lib/importlib/_bootstrap_external.py#L679-L688 """ # Local imports to avoid bootstrap issues # NOTE: both modules are listed in compat.PY3_BASE_MODULES and collected into base_library.zip. source_bytes_readline = io.BytesIO(source_bytes).readline encoding = tokenize.detect_encoding(source_bytes_readline) newline_decoder = io.IncrementalNewlineDecoder(decoder=None, translate=True) return newline_decoder.decode(source_bytes.decode(encoding[0]))
1bd6efc154cfb6bf384d0f70aff68ff2cb5a21e9
58,029
def max_tau_dist(K): """Return the maximum unnormalised distance of K rankings. In other words, the maximum number of swaps performed by the bubble sort algorithm to reverse a list of rankings""" return (K * (K - 1)/2 )
75f437490e9c47b807580572ab814551ae5b8720
342,602
def phred_to_prob(q): """Convert a phred score (Sanger or modern Illumina) in probabilty Given a phred score q, return the probabilty p of the call being right Args: q (int): phred score Returns: float: probabilty of basecall being right """ p = 10 ** (-q / 10) return 1 - p
8d1c0304c0058650f4ac613b6dde958ca22faa7f
605,565
def bedl_matches_vcf(variant, line_split: list) -> bool: """ Consecutive checks of whether the chromosome, start position and repeat unit in the supplied variant and (split + stripped) bedl line match. If all matches, return True, otherwise return False """ if not line_split[0] == variant.CHROM: return False if not int(line_split[1]) == variant.start + 1: return False try: if not variant.INFO.get('RU').upper() == line_split[4]: return False except AttributeError: return False return True
c8950677bdfab2152314acf95138662de6e373d1
665,570
def to_language(locale): """Turns a locale name (en_US) into a language name (en-us).""" p = locale.find('_') if p >= 0: return locale[:p].lower() + '-' + locale[p + 1:].lower() else: return locale.lower()
260735d10cd06bf5d149cbed126f1bb99659c0be
654,772
def get_session_summary(sessions): """ Takes a list of sessions and extract a summary-dictonary. """ summary = dict() summary['all'] = len(sessions) summary['waiting'] = len([s for s in sessions if s.proc_status == 'initialized']) summary['running'] = len([s for s in sessions if s.proc_status in ('running', 'stopping')]) summary['done'] = len([s for s in sessions if s.proc_status in ('completed', 'stopped', 'canceled', 'failed')]) summary['success'] = len([s for s in sessions if s.session_status == 'success']) summary['warning'] = len([s for s in sessions if s.session_status == 'warning']) summary['error'] = len([s for s in sessions if s.session_status == 'error']) return summary
cdaf2c25f9cdca9578fe2ef3413db0869645b36d
623,044
def mean(numbers): """ Calculates the mean of an arrary """ return float(sum(numbers)) / max(len(numbers), 1)
f27f8393547773f89dba587dc5cd25fa8e14a296
421,628
import re def check_for_str(text, pattern): """ Checks for pattern in text :param text: string to check for pattern in :param pattern: pattern to check for :return: boolean confirming or denying the presence of pattern in string """ has_url = False urls = re.findall(pattern, text) if len(urls) != 0: has_url = True return has_url
e4f17499bbb86254e5ded3c0085e40dea6cca3fe
337,045
def identity(test_item): """Identity decorator """ return test_item
799d2325e04066c0dfa405d24d5718b67eb64a00
37,709
def escape_tex(text): """Substitutes characters for generating LaTeX sources files from Python.""" return text.replace( '%', '\%').replace( '_', '\_').replace( '&', '\&')
99ec41da9a1effb12b6cbb733ee0468530a8dd2d
333,103
import torch def _broadcast_shape(shapes): """ Given a list of tensor sizes, returns the size of the resulting broadcasted tensor. Args: shapes (list of torch.Size): list of tensor sizes """ shape = torch.Size([1]) for s in shapes: shape = torch._C._infer_size(s, shape) return shape
3804cea0144466d5a44e578512081f2ae141ec63
607,512
def traverse_item_heirarchy(root, keys): """ Given a root and a list of keys, follow each key to get the value. e.g. traverse_item_heirarchy(root, [0, 'a', 'b']) is equivalent to root[0]['a']['b'] """ item = root for key in keys: item = item[key] return item
3eb8a090dd56a982b02933a801c45ecb57e5f4bb
319,881
from typing import Tuple import re def parse_assignment_name(name: str) -> Tuple[str, str]: """Splits an assignment id into its type and number""" matches = re.match(r'^(HW|LAB|WS)(\d+)$', name, re.IGNORECASE) if not matches: raise ValueError('Invalid assignment ID: {}'.format(name)) else: match_groups = matches.groups() return match_groups[0], match_groups[1]
500b1a9c4ad93b74b75e2b01fef2f80053f23938
376,446
def modify_config_for_default_image_exp(config): """ Modifies a Config object with experiment, training, and observation settings that were used across all image experiments by default. Args: config (Config instance): config to modify """ assert config.algo_name not in ["hbc", "iris"], "no image training for HBC and IRIS" with config.experiment.values_unlocked(): # save model during every evaluation (every 20 epochs) config.experiment.save.enabled = True config.experiment.save.every_n_epochs = 20 # every epoch is 500 gradient steps, and validation epoch is 50 gradient steps config.experiment.epoch_every_n_steps = 500 config.experiment.validation_epoch_every_n_steps = 50 # do 50 evaluation rollouts every 20 epochs # NOTE: horizon will generally get set depending on the task and dataset type config.experiment.rollout.enabled = True config.experiment.rollout.n = 50 config.experiment.rollout.horizon = 400 config.experiment.rollout.rate = 20 config.experiment.rollout.warmstart = 0 config.experiment.rollout.terminate_on_success = True with config.train.values_unlocked(): # only cache low-dim info, and use 2 data workers to increase fetch speed for image obs config.train.num_data_workers = 2 config.train.hdf5_cache_mode = "low_dim" # batch size 16 and 600 training epochs config.train.batch_size = 16 config.train.num_epochs = 600 with config.observation.values_unlocked(): # default low-dim observation is eef pose, gripper finger position # default image observation is external camera and wrist camera config.observation.modalities.obs.low_dim = [ "robot0_eef_pos", "robot0_eef_quat", "robot0_gripper_qpos", ] config.observation.modalities.obs.rgb = [ "agentview_image", "robot0_eye_in_hand_image", ] config.observation.modalities.goal.low_dim = [] config.observation.modalities.goal.rgb = [] # default image encoder architecture is ResNet with spatial softmax config.observation.encoder.rgb.core_class = "VisualCore" config.observation.encoder.rgb.core_kwargs.feature_dimension = 64 config.observation.encoder.rgb.core_kwargs.backbone_class = 'ResNet18Conv' # ResNet backbone for image observations (unused if no image observations) config.observation.encoder.rgb.core_kwargs.backbone_kwargs.pretrained = False # kwargs for visual core config.observation.encoder.rgb.core_kwargs.backbone_kwargs.input_coord_conv = False config.observation.encoder.rgb.core_kwargs.pool_class = "SpatialSoftmax" # Alternate options are "SpatialMeanPool" or None (no pooling) config.observation.encoder.rgb.core_kwargs.pool_kwargs.num_kp = 32 # Default arguments for "SpatialSoftmax" config.observation.encoder.rgb.core_kwargs.pool_kwargs.learnable_temperature = False # Default arguments for "SpatialSoftmax" config.observation.encoder.rgb.core_kwargs.pool_kwargs.temperature = 1.0 # Default arguments for "SpatialSoftmax" config.observation.encoder.rgb.core_kwargs.pool_kwargs.noise_std = 0.0 # observation randomizer class - set to None to use no randomization, or 'CropRandomizer' to use crop randomization config.observation.encoder.rgb.obs_randomizer_class = "CropRandomizer" # kwargs for observation randomizers (for the CropRandomizer, this is size and number of crops) config.observation.encoder.rgb.obs_randomizer_kwargs.crop_height = 76 config.observation.encoder.rgb.obs_randomizer_kwargs.crop_width = 76 config.observation.encoder.rgb.obs_randomizer_kwargs.num_crops = 1 config.observation.encoder.rgb.obs_randomizer_kwargs.pos_enc = False return config
8fb5179d6ef5a81f73a26e870d6541778b55268b
311,722
def check_compatibility(f): """ Decorator to assert that a preprocessing function returns compatible train and test sets. Specifically it asserts than the number of columns is equal for both sets. :param f: The preprocessing function :return: The preprocessed train and test sets """ def wrap(*args, **kwargs): train, test = f(*args, **kwargs) assert(train.shape[1] == test.shape[1]) return train, test return wrap
24f131362c68d60be0b5fbace0dd517acf9eb379
464,976
def get_datastore_id(client, name): """return the id of a datastore arguments: client -- an authenticated trove client name -- the name of the datastore to find returns: the id of the datastore or None if not found """ try: store = client.datastores.find(name=name) return store.id except Exception: return None
d7a9e8e9d4ed1c12d40128516aedfbdbbf44bd74
615,968
def analizar_anio(anio: int) -> str: """ Realiza el análisis del año recibido por parámetro. Parámetros: anio (int) Año que se desea analizar. Debe ser un número entero positivo. Retorno: str: Mensaje con la forma "El año X hace parte del milenio Y, siglo Z, y década W." """ # Las siguientes instrucciones permiten calcular el milenio # el siglo y le década del año ingresado por el usuario milenio = anio // 1000 + 1 siglo = anio // 100 + 1 decada = (anio % 100) // 10 + 1 # Se construye el string de respuesta respuesta = "El año " + str(anio) + " hace parte del milenio " + \ str(milenio) + ", siglo " + str(siglo) + " y década " + \ str(decada) + "." # Se retorna el string return respuesta
0b92a8f2d2ddb0d333ce151bef55a17b262fb327
455,423
def get_voted_content_for_user(user): """Returns a dict where: - The key is the content_type model - The values are list of id's of the different objects voted by the user """ if user.is_anonymous(): return {} user_votes = {} for (ct_model, object_id) in user.votes.values_list("content_type__model", "object_id"): list = user_votes.get(ct_model, []) list.append(object_id) user_votes[ct_model] = list return user_votes
799eff5efd184da0b5121f59025c39d0ddb593a3
701,082
def _prevent_sbd(doc): """ Disables spaCy's sentence boundary detection """ for token in doc: token.is_sent_start = False return doc
f749e38a156825b63b527aff52487fa72b9fa345
285,619
def normalize_timestamp(timestamp): """ Format a timestamp (string or numeric) into a standardized xxxxxxxxxx.xxxxx (10.5) format. Note that timestamps using values greater than or equal to November 20th, 2286 at 17:46 UTC will use 11 digits to represent the number of seconds. :param timestamp: unix timestamp :returns: normalized timestamp as a string """ return "%016.05f" % (float(timestamp))
fa87b8e1c06343362c8c5496ad016a4671b8f125
380,907
import torch def min_max_norm(x): """Min-Max normalization using PyTorch.""" maX = torch.max(x) mIn = torch.min(x) return (x - mIn) / (maX - mIn)
027489c8325a250bcd33481bb61a35338a776324
49,911
import torch def torch_multinomial(input, num_samples, replacement=False): """ Like `torch.multinomial()` but works with cuda tensors. Does not support keyword argument `out`. """ if input.is_cuda: return torch_multinomial(input.cpu(), num_samples, replacement).cuda() else: return torch.multinomial(input, num_samples, replacement)
bed9ac316c6f54f429d3da86a91d2337cd3b4e22
651,830
def calculate_target_shape(volume, header, target_spacing): """ Calculates a new number of pixels along a dimension determined by multiplying the current dimension by a scale factor of (source spacing / target spacing) The dimension of the volume, header spacing, and target spacing must all match, but the common usecase is 3D :param volume: as numpy.ndarray :param header: ITK-SNAP header :param target_spacing: as tuple or list :return: target_shape as list """ src_spacing = header.get_voxel_spacing() target_shape = [int(src_d * src_sp / tar_sp) for src_d, src_sp, tar_sp in zip(volume.shape, src_spacing, target_spacing)] return target_shape
27a4b8aac20dead14e12c89142111c9940903c17
245,675
import random def random_user_agent() -> str: """Create a randomly generated sorta valid User Agent string.""" uas = { "Edge": ( "AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/98.0.4758.80 Safari/537.36 Edg/98.0.1108.43" ), "Chrome": ( "AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/97.0.4692.99 Safari/537.36" ), "Firefox": "Gecko/20100101 Firefox/96.0", "iphone": ( "AppleWebKit/605.1.15 (KHTML, like Gecko) " "Version/15.2 Mobile/15E148 Safari/604.1" ), "Safari": ( "AppleWebKit/605.1.15 (KHTML, like Gecko) " "Version/11.1.2 Safari/605.1.15" ), } os = { "windows": "Windows NT 10.0; Win64; x64", "iphone": "iPhone; CPU iPhone OS 15_2_1 like Mac OS X", "mac": "Macintosh; Intel Mac OS X 10_11_6", } template = "Mozilla/5.0 ({os}) {ua}" return template.format( os=random.choice(list(os.values())), ua=random.choice(list(uas.values())) )
52d2fe258abe3770dafcd40488a0aeef6438c9b5
136,076
def allow(whitelist): """ Decorates a handler to filter all except a whitelist of commands The decorated handler will only be called if message.command is in the whitelist: @allow(['A', 'B']) def handle_only_a_and_b(client, message): pass Single-item whitelists may be passed as a string: @allow('THIS') def handle_only_this(client, message): pass """ whitelist = [whitelist] if isinstance(whitelist, str) else whitelist def inner_decorator(handler): def wrapped(client, message): if message.command in whitelist: handler(client=client, message=message) return wrapped return inner_decorator
c193dddb1a530132d69a8b7d378751430aec0198
626,779
def max_common_prefix(a): """ Given a list of strings (or other sliceable sequences), returns the longest common prefix :param a: list-like of strings :return: the smallest common prefix of all strings in a """ if not a: return '' # Note: Try to optimize by using a min_max function to give me both in one pass. The current version is still faster s1 = min(a) s2 = max(a) for i, c in enumerate(s1): if c != s2[i]: return s1[:i] return s1
e301b89e19e17d4fe40140501510c4f239b6286b
164,594
def qmap(f, q): """ Apply `f` post-order to all sub-terms in query term `q`. """ if hasattr(q, '_fields'): attrs = [] for field in q._fields: attr = getattr(q, field) attrs.append(qmap(f, attr)) cls = type(q) obj = cls(*attrs) return f(obj) elif isinstance(q, (list, tuple)): cls = type(q) return cls(qmap(f, x) for x in q) return f(q)
993dc15cd875a786f89379968d793599ad5af8c9
117,609
def load_text_file(filename): """ Load text file ``filename`` and return the (stripped) lines as list entries. :param filename: path to the file to be loaded :type filename: str :return: list of strings consisting of the (stripped) lines from filename """ res = [] with open(filename, 'r') as f: for line in f: if line == "\n": res.append(line) else: res.append(line.strip()) return res
a3ea196fedf0246dcef88e440d9c6be1516296fe
484,257
def chinese_cycle(date): """Return 'cycle' element of a Chinese date, date.""" return date[0]
0734a1062e81ffd661b9fe3cb12bdbcd34a793be
115,974
import re def symbolize(s): """Drop non-symbol characters and convert to lowercase.""" return re.sub(r'(?u)[^\w\-_]', '', s).lower()
8cc7638c2d35a13439bb8924cef8fba904d10aff
433,079
def apply_geo_transform(inx, iny, gt): """ Apply a geotransform @param inx: Input x coordinate (double) @param iny: Input y coordinate (double) @param gt: Input geotransform (six doubles) @return: outx, outy Output coordinates (two doubles) """ outx = gt[0] + inx*gt[1] + iny*gt[2] outy = gt[3] + inx*gt[4] + iny*gt[5] return outx, outy
60f48499156aefeb3de04826503d956a735d7107
522,276
import ast def get_module_docstring(path): """get a .py file docstring, without actually executing the file""" with open(path) as f: return ast.get_docstring(ast.parse(f.read()))
e253372bfb6f65907a5461332d14c414c2370c66
707,283
def slice_dict(d, keys): """ Only return subset of dict keys :param d: dict :param keys: list :return: dict """ keys = set(keys).intersection(d.keys()) sd = {k:d[k] for k in keys} return sd
4cba1ae4ac5c42eb7d19ddd956e9837892a6fd90
418,438
def getIndices(lines): """Returns list of tuples: (index1,index2,pair probability). """ index_list = [] #For each line that ends with 'ubox' (which denotes lines with indices) for line in filter(lambda x: x.endswith('ubox'), lines): #split on whitespace up_index,down_index,probability,ubox = line.split() #Build tuple with indices and pair probability index_list.append((int(up_index), int(down_index), \ float(probability))) return index_list
286a76562b3a4307aa5ba42de872bfdb540d2dcf
97,670
import six def _transform(providers, transform_fun, out_mapping_type=dict): """Syntactic sugar for transforming a providers dict. Args: providers: provider dictionary transform_fun: transform_fun takes a (vpkg, pset) mapping and runs it on each pair in nested dicts. out_mapping_type: type to be used internally on the transformed (vpkg, pset) Returns: Transformed mapping """ def mapiter(mappings): if isinstance(mappings, dict): return six.iteritems(mappings) else: return iter(mappings) return dict( (name, out_mapping_type( [transform_fun(vpkg, pset) for vpkg, pset in mapiter(mappings)] )) for name, mappings in providers.items())
cb731daa44bf92e53c635677c6f399c9e656dee1
147,944
import itertools import collections def count_valid_jolt_chains(adapters: list[int], gap: int) -> int: """ Count the number of configurations of functioning jolt chains from the charging outlet (0) to the built-in adapter (where two consecutive power devices must be within the given gap). This function implements a dynamic programming algorithm with O(nk) running time where n is the number of adapters and k is the input gap size. An improved version of this algorithm using O(n) time (not implemented here) is to maintain a sliding window of preceding config counts plus their total sum. """ builtin_adapter = max(adapters) + gap jolt_chain = itertools.chain([0], sorted(adapters), [builtin_adapter]) config_counts = collections.Counter({0: 1}) for jolt in jolt_chain: for downstep in range(-gap, 0): config_counts[jolt] += config_counts[jolt + downstep] return config_counts[builtin_adapter]
c62a971b006a931f01c14145f06efb8c8536583e
176,583
def image_crop(src, x1, y1, x2, y2): """ Crop image from (x1, y1) to (x2, y2). Parameters ---------- :param src: Input image in BGR format :param x1: Initial coordinates for image cropping :param y1: Initial coordinates for image cropping :param x2: End coordinates of image cropping :param y2: End coordinates of image cropping """ return src[x1:x2, y1:y2]
6ab70dc644d0d7054ea70fadcf7ec0ca381918d8
699,802
def get_selfcontrol_out_pattern(content_pattern): """Returns a RegEx pattern that matches SelfControl's output with the provided content_pattern""" return r'^.*org\.eyebeam\.SelfControl[^ ]+\s*' + content_pattern + r'\s*$'
e8bf556cbf14ef7c4ad20a60d84f049057bc4ffe
607,776
def assoc(d, key, val): """ Return copy of d with key associated to val """ d = d.copy() d[key] = val return d
ff69a77e20b588f8a40cf4f74947df45374b0972
500,767
def fib(n): """Return the n-th number in the fibonacci series. >>> fib(0) 0 >>> fib(1) 1 >>> fib(2) 1 >>> fib(3) 2 >>> fib(4) 3 >>> fib(5) 5 >>> fib(29) 514229 """ if n <= 0: return 0 elif n == 1: return 1 return fib(n - 1) + fib(n - 2)
f14d884171d027e3c83c4f433248bd57530e465e
468,255
def expand_tensor(tensor, length): """ :param tensor: dim: N x M :param length: l :return: tensor of (N * l) x M with every row intercalated and extended l times """ rows, cols = tensor.size() repeated = tensor.repeat(1, length) return repeated.view(rows * length, cols)
5682f59bca4b93d6d0544c55afbe14de1f5e7876
256,399
def create_msearch_payload(host, st, mx=1): """ Create an M-SEARCH packet using the given parameters. Returns a bytes object containing a valid M-SEARCH request. :param host: The address (IP + port) that the M-SEARCH will be sent to. This is usually a multicast address. :type host: str :param st: Search target. The type of services that should respond to the search. :type st: str :param mx: Maximum wait time, in seconds, for responses. :type mx: int :return: A bytes object containing the generated M-SEARCH payload. """ data = ( "M-SEARCH * HTTP/1.1\r\n" "HOST:{}\r\n" 'MAN: "ssdp:discover"\r\n' "ST:{}\r\n" "MX:{}\r\n" "\r\n" ).format(host, st, mx) return data.encode("utf-8")
cb0da629716e992dd35a5b211f26cae3ad949dcb
698,547
import re def verify_email(email): """ This function validates the given email using regular expression. Args: email: Email to be verified. Returns: True if email format is correct otherwise False. """ email_rule = r"^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$" return re.search(email_rule, email)
99e27d71660d67ddb50d4d6ba759496e053fdaa9
571,481
def _ensure_aggregate(ctx, agg_uuid): """Finds an aggregate and returns its UUID (which is the same as the supplied parameter). If not found, creates the aggregate with the supplied UUID and returns the new aggregate's UUID. """ query = """ MERGE (agg:AGGREGATE {uuid: '%s'}) RETURN agg """ % agg_uuid result = ctx.tx.run(query).data() return agg_uuid
34b09277b89db00a4e7cedaefafb16becf22d4a7
481,660
def heuristic(node, goal)->int: """ Heuristic function that estimates the cost of the cheapest path from node to the goal. In our case it is simple manhatan distance. :param node: :param goal: :return: Manhatan distance of nodes """ # expand nodes x1, y1 = node.get_pos() x2, y2 = goal.get_pos() # compute return abs(x1 - x2) + abs(y1 - y2)
b625872a0eef65e860a41f768975f724b9f8cbb6
109,402
def _string_score(input_str): """Sum the ord(c) for characters in a string.""" return sum([ord(x) for x in input_str])
4674ca9c091afc2a47dd2d0a2b54e8f9adf90c71
596,949
def get_any_idi(sipper): """ Returns the interdrink intervals for a Sipper, disregarding side or bottle contents Parameters ---------- sipper : Sipper sipper data loaded into the Sipper class Returns ------- idi_minutes : pandas.Series array of the interdrink intervals in minutes """ data = sipper.data combined = data['LeftCount'].diff() + data['RightCount'].diff() combined.dropna(inplace=True) combined = combined[combined > 0] idi_delta = combined.index.to_series().diff().dropna() idi_minutes = idi_delta.dt.total_seconds()/60 return idi_minutes
178fa152d432a7bc33ede3594a7c3577f89860f0
152,057
from typing import Dict from typing import List from typing import Tuple import re def _extract_attributes(line: str) -> Dict[str, str]: """Extracts attributes from an m3u8 #EXT-X tag to a python dictionary.""" attributes: Dict[str, str] = {} line = line.strip().split(':', 1)[1] # For a tighter search, append ',' and search for it in the regex. line += ',' # Search for all KEY=VALUE, matches: List[Tuple[str, str]] = re.findall( r'([-A-Z]+)=("[^"]*"|[^",]*),', line) for key, value in matches: attributes[key] = value return attributes
2154b95a9ec018d70af4fa0481a84107cf64e8b0
441,417
from pathlib import Path def getfilepath(filename: str) -> Path: """ Searches for the filepath in the current directory and parent directory If an empty string or nothing is passed as the parameter, sets the current working directory as path Args: filename: An str object that represents a file or a folder path Return: Path to the file object """ filepath = None if not filename: filepath = Path(__file__).parent else: filepath = Path(filename) if not filepath.exists(): check_in_parent = Path(__file__).parent / filename if not check_in_parent.exists(): raise FileNotFoundError else: filepath = check_in_parent return filepath
ef4efe7b333ca25c676e3ffc7e83d6fb59f32f91
246,803
def is_in_element(coords, data_labeled): """ Evaluate if the given coords are in a element. Parameters --------- coords: Tuple Contains the indices to check data_labeled : NdArray Data with elements labeled Returns --------- Boolean True if coords point to an element of the data, False otherwise. """ return data_labeled[coords] != 0
85968c2f034955cf74f5ef47afcf9d4aaf5bde2e
590,895
from typing import Tuple def calculate_rules_indices(rules_idx: list, fold_id: int, num_folds: int) -> Tuple[list, list]: """ Calculates the indices of the samples which are to be included in training and test sets for k-fold cross validation :param rules_idx: all rules indices (shuffled) that are to be splitted into cw training & cw test set rules :param fold_id: number of a current hold-out fold :param num_folds: the whole number of folds the data should be splitted into :return: two arrays containing indices of rules that will be used for cw training and cw test set accordingly """ test_rules_idx = rules_idx[fold_id::num_folds] train_rules_idx = [rule_id for rule_id in rules_idx if rule_id not in test_rules_idx] if not set(test_rules_idx).isdisjoint(set(train_rules_idx)): raise ValueError("Splitting into train and test rules is done incorrectly.") return train_rules_idx, test_rules_idx
9be1004ed0d1d2934f97d965358ea0a237d4b50b
170,592
def nillable_string(func): """Decorator that retuns None if input is None.""" def wrapper(cls, string): if string is None: return None else: return func(cls, string) return wrapper
e4dc2fda61334e6ed1368dfca431bdc5b8479e6c
696,322
def reformat_replace_star_list_with_dash_list(text): """ Replaces bullet lists starting with `*` with the lists starting with `-` :param text: Original text :return: New text without `*` bullet lists """ lines = text.split('\n') new_text = '' for line in lines: if line.strip().startswith('* '): line = line.replace('* ', '- ', 1) new_text += line + '\n' return new_text
d80d1abae6c54daecabf9434e083ec3abd24bcc3
649,111
def subclasses(cls): """List all subclasses of cls recursively.""" p = {cls} subclasses = set() while p: subclasses.update(p) q = set() for base in p: for subclass in type.__subclasses__(base): if subclass not in subclasses: q.add(subclass) p = q return subclasses
836fffe9a6bc34b39b3ae9ba1b9f9ca2b8b9456f
375,563
def hc_response() -> str: """Generate a health check response.""" response = 'HTTP/1.1 200 OK\n' response_body = 'Healthy' # print('sending healthcheck') response_headers = { 'Content-Type': 'text/html; encoding=utf8', 'Content-Length': len(response_body), 'Connection': 'close', } response += ''.join(f'{k}: {v}\n' for k, v in response_headers.items()) response += f'\n{response_body}' return response
81be8fb786594a2a228606dcebc0c7d109eb2109
245,205
def decimal_normalize(value): """ Normalize decimal value like cut zero ending. """ return value.normalize()
e9737bcb3d0b09a247ec89c3db257ca62550d5c6
50,129
from pathlib import Path def get_data_dir() -> Path: """Return the path of the data volume.""" return Path("/app/data")
e9f437046d9b58313f8356d1fc94304331413d85
313,123
def days_with_errors_query(limit=None): """Return an SQL string to qurery for days with many HTTP errors. Args: limit (int): The maximum number of results to query for. If ommited, query for all matching results. Returns: str: An SQL string. When used in a query, the result will contain two columns describing days which had an HTTP response error rate of more than 1%: - The date as a string (e.g. "December 31, 2000"). - The rate of HTTP requests for that day which did not result in a "200 OK" response (e.g. "1.11% errors"). """ return """ WITH date_errpercent AS ( SELECT time::date AS date, ( -- Count the number of errors on this date sum(CASE WHEN status != '200 OK' THEN 1 ELSE 0 END) -- Calculate percentage compared to the total requests -- Convert from integer so that we get decimal places / count(*)::decimal * 100 ) as err_percent FROM log GROUP BY date ) SELECT to_char(date, 'FMMonth FMDD, YYYY') AS date, format('%s%% errors', round(err_percent, 2)) AS errors FROM date_errpercent WHERE err_percent > 1 ORDER BY err_percent LIMIT {number}; """.format(number=limit or 'ALL')
40f031676915ac6d3987c3db0feaae0232725150
480,012
def fit_beta_mean_uncertainty(mean, var): """ Compute parameters from the mean and variance following this parameterization: https://en.wikipedia.org/wiki/Beta_distribution#Mean_and_variance """ EPSILON = 1E-2 mean = EPSILON if mean==0 else 1-EPSILON if mean==1 else mean nu = -1 + (mean * (1 - mean)) / var alpha = mean * nu beta = (1 - mean) * nu if alpha < 0 or beta < 0: pass return alpha, beta
a5097d368f93746c6a03fc0ad6144e14694931c4
447,380
def has_undir_edge(g, x, y): """ Returns whether there is an undirected edge from x to y in Graph g """ return g.has_edge(x, y) and g.has_edge(y, x)
922019db92855515d98236740e62df01cef572eb
519,257
def get_week_sec(epoch, epoch_start): """Returns seconds since the beginning of the week Parameters ---------- epoch : datetime.datetime epoch_start : datetime.datetime Returns ------- seconds : float """ current_epoch = epoch - epoch_start week = current_epoch.days / 7. seconds = (week - int(week)) * (7 * 24 * 60 * 60) seconds += current_epoch.seconds return seconds
3810daaa4165286c4e063b284578255a2e4700a1
99,950
def signal_lf_hf_ratio(lf, hf): """Computes the ratio between the high and low frequency components of a signal Parameters :param lf: scalar Summation of low frequency energies in a signal :param hf: scalar Summation of high frequency energies in a signal :return: scalar Ratio of the high and low frequency energies in a signal """ return lf / hf
95f9033d964210ffc98716b6cb64ec0bb30d0e8b
83,532
def distribute_atoms(atoms, n): """ split a 1D list atoms into n nearly-even-sized chunks. """ k, m = divmod(len(atoms), n) return [atoms[i*k+min(i,m) : (i+1)*k+min(i+1,m)] for i in range(n)]
12a21245f2e1cb412bdb35aadf3c7d130d11107f
692,409
def str_starts_with_any_in_list(string_a, string_list): """Check if string_a starts with any string the provided list of strings Parameters ---------- string_a : str Any string string_list : list A list of strings Returns ------- bool True if any ``string_a`` starts with any string in ``string_list`` """ for string_b in string_list: if string_a.startswith(string_b): return True return False
a0431d9e0461c25bb3011197f4cd68211ae62266
256,578
def get_attrib_recursive(element, *attribs): """Find the first attribute in attribs in element or its closest ancestor that has any of the attributes in attribs. Usage examples: get_attrib_recursive(el, "fallback-langs") get_attrib_recursive(el, "xml:lang", "lang") Args: element: an etree element where to search for attributes in attribs attribs: one or more attribute label(s) to search for Returns: the value of the first attribute in attribes found in element or the closest ancestor that has any of the attributes in attribs, or None """ for attrib in attribs: # We could also element.attrib[attrib] instead of xpath, but it only # works for attributes without a name, like attrib="lang", while xpath # also works for attributes with a namespace, like attrib="xml:lang". path = element.xpath("./@" + attrib) if path: return path[0] if element.getparent() is not None: return get_attrib_recursive(element.getparent(), *attribs) else: return None
d04ba71a280bd1697cc61b79af21df46023473d2
10,012
def get_distance(point_cloud): """ :param point_cloud: (B, C, N) Point cloud data. :return: (B, N) Distance of each point in the point cloud. """ distance = point_cloud.pow(2).sum(axis=1).sqrt() return distance
e8f77872d1e8d36a45740230a60b1c9c6f76f7af
136,037
def oct2dec(x): """ Convert octal string to decimal number. For instance: '11' -> 9 """ return int(x, 8)
e94f5b66262852c99e976574fd699becd8363a5c
344,193
def f_to_k(temp): """ Converts Fahrenheit to Kelvin. """ return (temp + 459.67) * 5/9
8f7bafca6e55acd3937d4783c681b477fefce5af
424,786
def BuildCvt(cvt): """Organizes information about a single CoordinatedVideoTiming object. Full object name: coordinated_video_timings.CoordinatedVideoTiming. Args: cvt: A single CoordinatedVideoTiming object. Returns: A dictionary of coordinated video timing information. """ return { 'Active vertical lines': cvt.active_vertical_lines, 'Aspect ratio': cvt.aspect_ratio, 'Preferred refresh rate': cvt.preferred_vertical_rate, 'Supported refresh rates': cvt.supported_vertical_rates }
baf3caaf881abaee69c078984de2c2960c44d82c
124,556
def bucopt(self, method="", nmode="", shift="", ldmulte="", rangekey="", **kwargs): """Specifies buckling analysis options. APDL Command: BUCOPT Parameters ---------- method Mode extraction method to be used for the buckling analysis: LANB - Block Lanczos SUBSP - Subspace iteration nmode Number of buckling modes (i.e., eigenvalues or load multipliers) to extract (defaults to 1). shift By default, this value acts as the initial shift point about which the buckling modes are calculated (defaults to 0.0). ldmulte Boundary for the load multiplier range of interest (defaults to ). rangekey Key used to control the behavior of the eigenvalue extraction method (defaults to CENTER): CENTER - Use the CENTER option control (default); the program computes NMODE buckling modes centered around SHIFT in the range of (-LDMULTE, +LDMULTE). RANGE - Use the RANGE option control; the program computes NMODE buckling modes in the range of (SHIFT, LDMULTE). Notes ----- Eigenvalues from a buckling analysis can be negative and/or positive. The program sorts the eigenvalues from the most negative to the most positive values. The minimum buckling load factor may correspond to the smallest eigenvalue in absolute value, or to an eigenvalue within the range, depending on your application (i.e., linear perturbation buckling analysis or purely linear buckling analysis). It is recommended that you request an additional few buckling modes beyond what is needed in order to enhance the accuracy of the final solution. It is also recommended that you input a non zero SHIFT value and a reasonable LDMULTE value (i.e., a smaller LDMULTE that is closer to the last buckling mode of interest) when numerical problems are encountered. When using the RANGE option, defining a range that spans zero is not recommended. If you are seeking both negative and positive eigenvalues, it is recommended that you use the CENTER option. This command is also valid in PREP7. If used in SOLUTION, this command is valid only within the first load step. Distributed ANSYS Restriction: Both extraction methods (LANB and SUBSP) are supported within Distributed ANSYS. However, the subspace iteration eigensolver (SUBSP) is the only distributed eigensolver that will run a fully distributed solution. The Block Lanczos eigensolver (LANB) is not a distributed eigensolver; therefore, you will not see the full performance improvements with this method that you would with a fully distributed solution. """ command = f"BUCOPT,{method},{nmode},{shift},{ldmulte},{rangekey}" return self.run(command, **kwargs)
0c1256711940b6511ff6a7e6a5aa9ab0b0d2d625
627,008
import click def module_template_options(func): """Merge the module template option decorators into a single one.""" template_dec = click.option("--template", "-t", default="csharp", show_default=True, required=False, type=click.Choice(["c", "csharp", "java", "nodejs", "python", "csharpfunction"]), help="Specify the template used to create the default module") group_id_dec = click.option("--group-id", "-g", default="com.edgemodule", show_default=True, help="(Java modules only) Specify the groupId") return template_dec(group_id_dec(func))
246b65725c199ac21dd5e22aa355f830143929fd
95,148
def endpoints(edge): """ Return a pair with the edge's **endpoints** (the nodes it connects). """ if len(edge) == 2: return tuple(edge) else: return tuple(edge) + tuple(edge)
bc958c9933bf44af801b6c9c53be368ed9f67d3d
392,188
def invite_user(slack_client, user, channel): """ Invite a user to a given channel. """ response = slack_client.api_call("channels.invite", channel=channel, user=user) return response
6519e85299ef817aca5a9a734599e38d7b6a7905
66,281
def get_epsilon_rate(data) -> str: """ Computes the epsilon rate by taking the least common bit for each bit in all binary entries of the given data set. :param data: Array of binary numbers arranged as Strings. :return: Epsilon rate as a string of binary values. """ _output = '' for elem in range(len(data[0])): _count_zeroes = 0 _count_ones = 0 # Iterate over each bit of the entries for bit in range(len(data)): if int(data[bit][elem]) == 0: _count_zeroes += 1 elif int(data[bit][elem]) == 1: _count_ones += 1 # Check if there are more '1' or '0' for the current bit if _count_ones > _count_zeroes: _output += '0' else: _output += '1' return _output
f5c2ce8e07e08c0a6673f6ea81948b68d7c5e36b
568,826
def xgcd(a: int, b: int) -> tuple: """Extended Euclidean (GCD) algorithm gcd(a, b) = u*a + v*b Returns gdc, u, v """ x, x1, y, y1 = 1, 0, 0, 1 while b: q, a, b = a // b, b, a % b x, x1 = x1, x - q * x1 y, y1 = y1, y - q * y1 return a, x, y
3dba9fee305f2c423f84607177ca1b9025d33978
16,118
def is_scheduled(num_words, frequency, words_per_iteration): """Checks if an event is scheduled to be performed within given number of updates after this point. For example, word_per_iteration=9, frequency=2: num_words: 1 2 3 4 [5] 6 7 8 [9] 10 11 12 * frequency: 2 4 6 8 10 12 14 16 18 20 22 24 modulo: 2 4 6 8 1 3 5 7 0 2 4 6 :type num_words: int :param num_words: number of words so far :type frequency: int :param frequency: how many times per iteration the event should be performed :type words_per_iteration: int :param words_per_iteration: number of words in one iteration :rtype: bool :returns: whether the operation is scheduled to be performed """ modulo = num_words * frequency % words_per_iteration return modulo < frequency
9949e1af3384b3e592ef673e76e6d7dd88d8876e
134,129
def row_major_form_to_ragged_array(flat, indices): """Convert [1, 2, 3, 5, 6], [0, 3] to [[1,2,3], [5,6]] for serialization """ endices = indices[1:] + [None] return [flat[start:end] for start, end in zip(indices, endices)]
e085735f2b3e6f73f63a37d6f7024aaca5eddf5b
137,420
def pix_to_xyz(pixel, height, bounds, pixel_size, skip_height=False): """Convert from pixel location on heightmap to 3D position.""" u, v = pixel x = bounds[0, 0] + v * pixel_size y = bounds[1, 0] + u * pixel_size if not skip_height: z = bounds[2, 0] + height[u, v] else: z = 0.0 return (x, y, z)
06d05b273865ad08e44e507a8172f7d495dc58d9
512,282
def fib (n: int) -> int: """ Returns the n'th Fibonacci number. Note that this function uses zero style indexing, meaning that the first Fibonacci number has an index of zero (n=0).""" f_p, f_c= 0, 1 for i in range(n): f_n = f_c + f_p f_p = f_c f_c = f_n return f_c
431c0ba58b2dfb25eca03c14890d7b6ecab88f0d
27,213
def _consolidate_bounds_with_equality_constraints(equality_pc, params): """consolidate bounds with equality constraints. Check that there are no incompatible bounds on equality constrained parameters and set the bounds for equal parameters to the strictest bound encountered on any of them. Args: equality_pc (list): List of constraints of type "equality". params (pd.DataFrame): see :ref:`param`. Returns: pp (pd.DataFrame): Copy of params with stricter bounds. """ pp = params.copy() lower = pp["lower_bound"].copy() upper = pp["upper_bound"].copy() for eq in equality_pc: lower.iloc[eq["index"]] = lower.iloc[eq["index"]].max() upper.iloc[eq["index"]] = upper.iloc[eq["index"]].min() pp["lower_bound"] = lower pp["upper_bound"] = upper return pp
a9034b7dc3c1c33913363fb5e996117027d19013
344,260
def to_int(value): """Converts the given string value into an integer. Returns 0 if the conversion fails.""" try: return int(value) except (TypeError, ValueError): return 0
f219844de96d1d2236e94c4427c0ad27cc4b587b
701,832
def FormatDate( DateTime ): """ Return a HL7 formatted DateTime (YYYYMMDDHHmmss). Arguments: DateTime -- The datetime.datetime object to format """ return DateTime.strftime( '%Y%m%d%H%M%S' )
a98d81480637287792d9cb82009550fbac979851
297,698
def hit_count(result): """ Parse the hit count from an ElasticSearch response ES7: result['hits']['total']['value'], ES6: result['hits']['total']""" return result['hits']['total']['value'] \ if type(result['hits']['total']) is dict \ else result['hits']['total']
254241975f5e5198cd96e423c175d728f81ef4b2
182,429
def is_member_of(userObj, groupObj): """ Will determine is a user is a member of a group. Returns ``Tru`` or ``False`` """ if userObj.groups: for group in userObj.groups: if group.group_name == groupObj.group_name: return True return False else: return False
0e08688ce975dc6333ce75742d209b17e08b8f0f
374,917
def get_object_id(obj: dict) -> str: """Returns unique id of the object.""" return obj["name"].rpartition("/")[-1]
806942ca0c5e033e9bdf6343e65a0e6895f75d92
383,362
import json def read_json(read_path): """Returns dict from JSON file""" with open(read_path, 'r') as f_in: # read in JSON file as dict data = json.load(f_in) return data
e54ce61645012b6dc5efdd9cc9c81f35cdb6a377
240,866
def identity(x): """The identity activation function. Shortcut is ``linear``. Parameters ---------- x : Tensor input. Returns ------- Tensor A ``Tensor`` in the same type as ``x``. """ return x
6815fd8d46c0ec3921a39ff588ee7d9b19ed0e67
507,345
def derived_P_species(df_R, f_TDP): """ Calculate: Total P as the sum of TDP and PP SRP as a function of TDP for both the daily mass flux and the concentrations Inputs: reach results dataframe; value to multiply TDP by to get SRP Returns: dataframe of reach results, with extra columns added """ # Calculate total P as the sum of TDP and PP df_R['TP_mgl'] = df_R['TDP_mgl'] + df_R['PP_mgl'] # Concentration (mg/l) df_R['TP_kg/day'] = df_R['TDP_kg/day'] + df_R['PP_kg/day'] # Mass (kg/day) # Calculate SRP from TDP using a constant user-supplied factor df_R['SRP_mgl'] = df_R['TDP_mgl']*f_TDP # Concentration (mg/l) df_R['SRP_kg/day'] = df_R['TDP_kg/day']*f_TDP # Mass (kg/day) return df_R
3c5193c38847421fcffbfbf367aa643ca817aeb6
655,826
def _undefined_pattern(value, fn, undefined): """ If ``fn(value) == True``, return `undefined`, else `value`. """ if fn(value): return undefined return value
634629dce914be4c948105c9b4b385fb22dcccd4
224,994
def address_pairs(fields): """ Zips address fields into pairs, appending the last field if the total is an odd number. """ pairs = list(zip(fields[::2], fields[1::2])) if len(fields) % 2: pairs.append(fields[-1]) return pairs
d77150a67b117f9f023dd2fd0c2bd261c4c38b5f
131,130
def trim_image(image): """ Returns an image with extra whitespace cropped out. >>> name = "examples/images/playerShip1_orange.png" >>> source_image = PIL.Image.open(name) >>> cropped_image = trim_image(source_image) >>> print(source_image.height, cropped_image.height) 75 75 """ bbox = image.getbbox() return image.crop(bbox)
23426bd57abd93a90df03e20f3ff91b2f36ab555
505,015
def _recall(tp, fn): """Calculate recall from true positive and false negative counts.""" if fn == 0: return 1 # by definition. else: return tp / (tp + fn)
ef6b40b984a5620e36fda8aae54d7a0c1ffcfbe2
92,385
def _fix_date_offset_format(date_str): """ Remove ':' in the UTC offset, for example : >>> print(_fix_date_offset_format("2018-10-15T08:49:00+02:00")) 2018-10-15T08:49:00+0200 """ return date_str[:-3] + date_str[-2:]
90661b2b3ef539c044975f9c32880ca56237b8b0
142,055
def get_resources_list(object_dict, resources_list): """ Method to get the list of resources labels :param object_dict: dictionary to describe XNAT object parameters :param resources_list: list of resources requested from the user :return: None if empty, 'all' if all selected, list otherwise """ # Get list of resources' label if resources_list == 'all': res_list = object_dict('resources', None) else: res_list = resources_list return res_list
ec68442c18ad0bc185b3bfae2fb8007d3ff42cf5
375,807
from typing import Tuple def _color_rgb_to_int(rgb_color_tuple: Tuple[int, int, int]) -> int: """Convert an RGB color tuple to an 24 bit integer. Parameters ---------- rgb_color_tuple : Tuple[int, int, int] The color as RGB tuple. Values must be in the range 0-255. Returns ------- int : Color as 24 bit integer """ return int("0x{:02x}{:02x}{:02x}".format(*rgb_color_tuple), 0)
ffd04cecfba4868fc4acc228056e029a9011d0ae
515,033
def make_desc_dict(ecfp): """ Format tuple of fingerprint information into dictionary :param ecfp: Tuple of fingerprint features and values :return: dictionary of fingerprint features and values """ ecfp_feat, ecfp_val = zip(*ecfp) return ecfp_feat, ecfp_val
5925cda8d428fad5c9bfd46a7a26276e34256569
34,002
def CombineListResults(result1, result2): """Combines two outputs of list operations into one. Either result can be None, or JSON dictionary representing result of list operation. Args: result1: First list result to combine, or None result2: Second list result to combine, or None Returns: Combined result containing resources from both result1 and result2. """ if result1 is None: return result2 if result2 is None: return result1 items = [] items.extend(result1.get('items', [])) items.extend(result2.get('items', [])) kind = result1.get('kind') or result2.get('kind') return {'kind': kind, 'items': items}
7b2cbced73fc39056fcf29ff299f578da4c646d3
409,197