content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
from typing import Optional from typing import Mapping from typing import Union def error_response( reason: str, message: Optional[str] = None ) -> Mapping[str, Union[str, Mapping[str, str]]]: """Build error response structure. :param reason: error reason :type reason: str :param message: error message, defaults to None :type message: Optional[str], optional :return: error response structure :rtype: Mapping[str, Union[str, Mapping[str, str]]] """ return { 'status': 'error', 'error': { 'reason': reason, 'message': message or '', } }
886ea8dfbe5f6155ed172eb6049adc2da0792e75
300,567
def _fan_in_concat_shape(input_shape, axis=-1): # pylint: disable=invalid-name """Helper to determine the shape of FanInConcat output.""" ax = axis % len(input_shape[0]) concat_size = sum(shape[ax] for shape in input_shape) out_shape = input_shape[0][:ax] + (concat_size,) + input_shape[0][ax+1:] return out_shape
59a0589d626dfa4a1b2e196f9cbd2b2d1b3b86c1
190,516
def file2set(file_obj): """Turn lines in a file into a set.""" return set(line.strip() for line in file_obj)
d22269e8e177d361e61d838ab122d6fdf646455a
372,863
from typing import Counter import re import math def cosine(text1, text2): """ Find the similarity between two strings using cosine vectors. Returns float score value, 0.0 being completely different strings and 1.0 being equal strings. Arguments: text1: main string to compare against. text2: second string to compare to text1. """ try: vec1 = Counter(re.compile(r"\w+").findall(text1)) vec2 = Counter(re.compile(r"\w+").findall(text2)) intersection = set(vec1.keys()) & set(vec2.keys()) numerator = sum([vec1[x] * vec2[x] for x in intersection]) sum1 = sum([vec1[x] ** 2 for x in list(vec1.keys())]) sum2 = sum([vec2[x] ** 2 for x in list(vec2.keys())]) denominator = math.sqrt(sum1) * math.sqrt(sum2) if not denominator: return 0.0 else: return float(numerator) / denominator except: return None
1444c57501e94ef1da7df0406332c6681fe447a8
257,136
import torch def impute(x, lengths, channels_last=True): """ Set elements of a batch of a sequence of tensors to zero according to sequence lengths. :param x: A tensor with shape [batch, time_step, ...] or [batch, ..., time_step] :param lengths: A tensor with shape [batch] :param channels_last: A bool. If true, the time_step dimension is the second dimension, otherwise the last. :returns: A tensor with the same shape of x, with elements time_step > corresponding length set to 0. """ if channels_last: max_length = x.shape[1] else: max_length = x.shape[-1] mask = torch.arange(max_length, device=lengths.device)[None, :] < lengths[:, None] # [B, T] for _ in range(len(x.shape) - 2): if channels_last: mask = mask.unsqueeze(-1) else: mask = mask.unsqueeze(1) return x * mask
b1f6fa73a0ce68b726c17fd2a533c2fc41651f12
634,924
def _parse_nodata_values(input_str): """ Helper callback function to parse no-data values. """ return [int(part) for part in input_str.split(",")]
ecdb917de5b45fa61dcaf72d4edf3bcd17452df8
190,715
def inchesToPoints( inches ): """ Converts the given number of inches to points. """ return inches * 72.0
fcaede227e314e3cf4654a711b7084556d8a9a36
543,582
def linear_search(arr, value): """ My Python implementation of linear search Searches an array and returns either the index of the value (if found) or -1 (if not found) Time complexity: O(n) Space complexity: O(1) """ for i in range(len(arr)): # O(n) if arr[i] == value: return i return -1
3bd79ace661d0019ca0ce9e6f533a160146832c8
380,595
def _GetIndentedString(indentation, msg): """ Return `msg` indented by `indentation` number of spaces """ return ' ' * indentation + msg
8161f3b850ff9d2804541bcf7c12f94087d5f7fd
345,491
def update_rbac_assignment( self, username: str, roles: str = "null", asset: str = "null", ) -> bool: """Create or update rbac assignment .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - rbacAssignment - POST - /rbac/assignment :param username: Username for assignment :type username: str :param roles: Comma separated list of roles. Required if asset value is not provided, e.g. "role1, role2, role3", defaults to "null" :type roles: str, optional :param asset: Appliance access group or asset name. Required if roles value is not provided, e.g. "group1", defaults to "null" :type asset: str, optional :return: Returns True/False based on successful call :rtype: bool :raises ValueError: Both optional parameters cannot be value of "null" """ if roles == "null" and asset == "null": raise ValueError("Roles and asset variables cannot both be 'null' ") data = { "username": username, "roles": roles, "asset": asset, } return self._post( "/rbac/assignment", data=data, return_type="bool", )
53bfa4adb35ba8c50762b06b614de97f59aca63e
72,080
def table(rows, margin=0, columns=[]): """ Return string representing table content, returns table as string and as a list of strings. It is okay for rows to have different sets of keys, table will show union of columns with missing values being empty spaces. :param rows: list of dictionaries as rows :param margin: left space padding to apply to each row, default is 0 :param columns: extract listed columns in provided order, other columns will be ignored :return: table content as string and as list """ def projection(cols, columns): return [(x, cols[x]) for x in columns if x in cols] if columns else cols.items() def row_to_string(row, columns): values = [(row[name] if name in row else "").rjust(size) for name, size in columns] return "|%s|" % ("|".join(values)) def header(columns): return "|%s|" % ("|".join([name.rjust(size) for name, size in columns])) def divisor(columns): return "+%s+" % ("+".join(["-" * size for name, size in columns])) data = [dict([(str(a), str(b)) for a, b in row.items()]) for row in rows] cols = dict([(x, len(x) + 1) for row in data for x in row.keys()]) if data else {} for row in data: for key in row.keys(): cols[key] = max(cols[key], len(row[key]) + 1) proj = projection(cols, columns) # extract certain columns to display (or all if not provided) table = [divisor(proj), header(proj), divisor(proj)] + \ [row_to_string(row, proj) for row in data] + [divisor(proj)] table = ["%s%s" % (" " * margin, tpl) for tpl in table] if margin > 0 else table table_text = "\n".join(table) return (table_text, table)
b307d23dd8b90641cbc2782a096578ea8737dcc3
519,976
def ar(vi, vf, D=0, W=0): """The Arithmetic Return is the simplest way of calculating the rate of return on an investment. To calculate it, you need the amount of growth, which is simply the final value `Vf` minus the initial value `Vi`. Then you just divide the amount of growth by the initial amount. Args: vi: Initial value of investment vf: Final value of investment D: The total deposit made into the investment W: Total of any withdrawals Returns: The arithmetic return of a given investment. Example: By providing initial and final value of investment you can get the percentage return of your investment: >>> import malee >>> malee.ar(100, 140) 0.4 """ return (vf - D + W - vi) / vi
4ef06750e6c1b9e1e1b5931ca69b20d851e40c44
437,564
def un_nested (l) : """Returns list `l` in un-nested form (i.e., if it is a one-element list whose first element is a list, returns l [0]). This is handy if you want to support the passing of a list to a `* args' argument without using `apply`. >>> un_nested (list (range (3))) [0, 1, 2] >>> un_nested ([list (range (3))]) [0, 1, 2] >>> un_nested ([list (range (3)), list (range (2))]) [[0, 1, 2], [0, 1]] """ if l and len (l) == 1 and isinstance (l [0], (list, tuple)) : l = l [0] return l
d3712e7f6518ed81087030b264b905fc2da4355d
393,882
def payment_amount(balance, payment): """ The amount of a payment given a balance is which ever is less """ curr_payment = min(balance, payment) return curr_payment
4678ac5354c5d5413754ff51accafa21481f7363
506,431
def less(x,y): """ Check if x < y :param x: Left Comparative Value :param y: Right Comparative Value :return: Boolean """ return x < y
0761fad6c8fadf9b71fb786ca2922bd64a7bafb3
268,097
import random import string def rand_name(len_ = 4): """generates a random string for naming recording sessions""" return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(len_))
438040d8333c1ac8586242f70974929675923aed
60,334
def lower_username(username: str) -> str: """Returns lowered username""" return username.strip().lower().replace(" ", "")
374240011a8f4a10d456e81535c8631a1d6871a0
540,294
def calculate_distance(p1, p2): """Calculates the distance between two points Args: p1 (tuple): (x ,y) of first point p2 (tuple): (x, y) of second point >>> calculate_distance((1,1), (1,1)) 0.0 >>> calculate_distance((52, 3), (26, 77)) 78.43468620451031 """ x1, y1 = p1 x2, y2 = p2 distance = ((x1 - x2)**2 + (y1 - y2)**2)**0.5 return distance
6e94b51c86500bce0cef1089ef9aeb53367c3e86
182,753
def do_range(stop): """ Wrap the standard range() method, to enable things like {% for i in range(6) %} ... """ return list(range(stop))
692c0c9708299bf6c323578fcee73804993bc512
242,813
def normalise_series(series): """Normalise a Pandas data series. i.e. subtract the mean and divide by the standard deviation """ ave = series.mean() stdev = series.std() return (series - ave) / stdev
97d53c0697a56e5ab559d2564c5d7386125ed254
17,402
import torch def accuracy(predictions, labels): """ Compute the average accuracy of the given predictions. Inspired on https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html Args: predictions (torch.Tensor): Tensor containing the output of the linear output layer of the network. labels (torch.Tensor): Tensor containing the labels of the mini-batch Returns (float): average accuracy of the given predictions """ _, pred_labels = torch.max(predictions.data, 1) total = labels.size(0) correct = (pred_labels == labels).sum().item() return correct/total
f80e27f342e30b6904f1856c968c61100abfcd56
619,591
def to_string(number): """ Convert float/int to string >>> to_string(13) '13' """ return str(int(number))
7f1d0f90c6de7cde246b755f020a121975330ddf
643,425
def classify(op): """ Given an operation name, decide whether it is a constructor or an annihilator. The convention is that constructors are operations starting with 'c', and all other operations are annihilators. >>> classify("c") 'C' >>> classify("c2df") 'C' >>> classify("a") 'A' >>> classify("h") 'A' """ if op[0] is 'c': return 'C' else: return 'A'
63bbab6fbb25bab8d0ae3bc68cdf9afa2e9e6347
240,894
from pathlib import Path def load_texmfbd(lsR, ctanDict): """ Create a dictionnary from the ls-R database from the LaTeX installation :param lsR: The path to the TEXMF/ls-R :param ctanDict: A dictionnary built from the CTAN package list :return: a tuple (lsRdb, allfiles) - lsRdb is a dictionnary mapping for every package in ctanDict all the .sty, .cls, .def files listed under that dictionnary in lsR - allfiles is an array of all the .sty, .cls, .def files listed in lsR """ lsRdb = {} pkg = None pkgfiles = [] allfiles = [] allpkgs = ctanDict.keys() with open(lsR) as fd: for line in fd: line = line.rstrip('\n') if line.startswith('./doc') or line.startswith('./source'): # Skip source and doc entries continue if line.endswith(':') and line.startswith('./'): # Enter a new package pkg = None for part in reversed(Path(line[0:-1]).parts): if part in allpkgs: pkg = part break pkgfiles = [] if pkg is None: continue if line != '': pkgfile = Path(line) if pkgfile.suffix in ['.sty', '.def', '.cls']: pkgfiles.append(pkgfile.name) allfiles.append(pkgfile.name) else: if len(pkgfiles) > 0: if pkg not in lsRdb: lsRdb[pkg] = [] lsRdb[pkg].extend(pkgfiles) pkg = None return (lsRdb, allfiles)
01e5bb2d613124c8351764799ca416bcc34465f6
554,045
def extend_pyflow_docstring(docstring): """ Updates a doc string to include the usage instructions for default pyflow parameters """ default_docstring = """ Pyflow arguments: Optional arguments: --run_mode Valid options: (Default) 'local', 'sge' --nCores Number of threads to use (Default: 1) --memMb memory available in local mode in MB --mailTo Email address to send updates on pipeline running progress. --pyflow_dir Directory to store pyflow tracking data. --dry Set this flag to run in dry mode (does not launch tasks). --isContinue Modify the continuation method of pyflow. Default: auto --forceContinue If isContinue is set, allows task definitions to change. --startFromTasks A comma-delimited list of task labels as strings. Any tasks which are not in this set or descendants of this set will be marked as completed. --ignoreTasksAfter A comma-delimited list of task labels as strings. All descendants of these task labels will be ignored. --resetTasks A comma-delimited list of task labels as strings. These tasks and all of their descendants will be reset to the "waiting" state to be re-run. Note this option will only effect a workflow which has been continued from a previous run. This will not overide any nodes alterned by the startFromTasks setting in the case that both options are used together --schedulerArgList A list of additional Parameters related to scheduler """ return docstring + default_docstring
025f24c09d93c5baef793a49b55677783feee468
377,054
from typing import Dict from typing import Any def _make_liveness_probe(port: int) -> Dict[str, Any]: """Generate liveness probe. Args: port (int): [description] Returns: Dict[str, Any]: liveness probe. """ return { "tcpSocket": { "port": port, }, "initialDelaySeconds": 45, "timeoutSeconds": 5, }
2d6b6ebf079565e163adb10af8ba7bcb7d3bfa1d
523,290
def _reverse(object2idx): """ Reverse 1-to-1 mapping function. Return reversed mapping. :param object2idx: Mapping of objects to indices or vice verse. :type object2idx: `dict` :rtype: `dict` """ return dict(list(zip(list(object2idx.values()), list(object2idx.keys()))))
37a421e5a1bf6dc86322ed13e0baedb50e10dcc8
370,538
def horizontal_strip_gridmap(width, alternating=True): """ Determines the pixel number for a grid with strips arranged horizontally. :param width: grid width in pixels :param alternating: Whether or not the lines in the grid run alternate directions in a zigzag :return: mapper(x, y) """ def mapper(x, y): if alternating and y % 2: return y * width + (width - 1 - x) return y * width + x return mapper
474f420adc69af45b3ccebe3a406ec7c5a87a122
345,901
import hashlib def hasher(string, size=8): """Simple function to generate a SHA1 hash of a string. Parameters: - string : string or bytes The string to be hashed. - size : int Size of the output hash string. Returns: - h : string Hash string trunked to size. """ string = str(string) h = hashlib.sha256(string.encode()).hexdigest() return h[:size]
a591ae9da62acd7b958b4fd407a7f31bcda90db1
101,563
def yintercept(x, y, slope): """Get the y intercept of a line segment""" if slope is not None: return y - slope * x else: return None
22660b6f71e1b84a95755c141780b075773fb825
241,054
def knapsack_unbounded(w, wt, vt, n): """ It's an unbounded knapsack problem as we can use 1 or more instances of any resource. A simple 1D array, say dp[W+1] can be used such that dp[i] stores the maximum value which can achieved using all items and i capacity of knapsack. Note that we use 1D array here which is different from classical knapsack where we used 2D array. Here number of items never changes. We always have all items available. We can recursively compute dp[] using below formula - dp[i] = 0 - dp[i] = max(dp[i], dp[i-wt[j]] + val[j] where j varies from 0 to n-1 such that: wt[j] <= i :param w: total capacity :type w: int :param wt: weight of each element :type wt: list[int] :param vt: value of each element :type vt: list[int] :param n: number of elements :type n: int :return: the maximum value that can be put in a knapsack of capacity w :rtype: int """ # dp[i] is going to store maximum value with knapsack capacity i dp = [0 for _ in range(w + 1)] # fill dp[] using above recursive formula for w_idx in range(w + 1): for n_idx in range(n): if wt[n_idx] <= w_idx: dp[w_idx] = max(dp[w_idx], dp[w_idx - wt[n_idx]] + vt[n_idx]) return dp[w]
703f73cb7632c050c98b659618a257862a4bbe6e
577,844
def get_field_names_for_custom_model(model, conf, use_attname=False): """ Obtain fields for a custom model `model`, depending on its configuration `conf`. If `use_attname` is True, use the `attname` property of the field, else the `name` property ("attname" has a "_id" suffix for ForeignKeys). """ if 'fields' in conf: fields = conf['fields'] else: fields = [f.attname if use_attname else f.name for f in model._meta.fields] exclude = set(conf.get('exclude_fields', [])) return [f for f in fields if f not in exclude]
7170705b1ef220f24cc44fa17fdef8b189bbb41e
141,853
def is_pr(issue): """ :param issue: an issue. May also be a PR. :return: True iff the issue is actually a PR. """ return issue.html_url and "pull" in issue.html_url
913ef274579baa74b344bbd1158bb2dc47248626
502,560
def str_to_bool(s): """Convert string boolean values to bool. The conversion is case insensitive. :param val: input string :return: True if val is 'true' otherwise False """ return str(s).lower() == 'true'
9580e9f1dbcfd2a9f89b69eb3b2fe4c6276b3f9d
138,044
import time def get_local_time_str(for_file_name=False) -> str: """Return current time str in the format %Y-%m-%d %H:%M:%S""" if not for_file_name: cur_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) else: cur_time = time.strftime("%Y-%m-%d--%H-%M-%S", time.localtime()) return cur_time
aeaae88c1ed33aa30318c486a3ecb8a413ce2f34
626,358
def get_column(dtf, column_name): """ Get a column from a dataframe if it exists """ if column_name in dtf.columns: return dtf[column_name] return None
83e8dbec67b4828a0d2a1b24c06423f00e72028a
528,803
def single_timeseries_to_json(value=None, unit="", label="", asset_type=""): """format the information about a single timeseries in a specific JSON""" if value is None: value = [] return {"value": value, "unit": unit, "label": label, "asset_type": asset_type}
f617bd6e1b0f82effe3c1090e6c417d82f997fe8
385,904
def filter_letter_to_num(letter): """ This filter turns one letter into a number. A is 0, B is 1, etc. This is not case sensitive. """ letter = letter[0].lower() return ord(letter) - ord('a')
266a3a46281c8fe6ccf503cb92463e65d73ad0bc
475,398
def colons(make, v1, *values): """Create a hash over a sequence of values. Colons are interspersed between sequence elements.""" result = make(v1) for value in values: if value: result.update(':') result.update(value) return result
40d394c8743ccf135dc6d9a08f8899b25f5e0d65
160,353
from typing import Dict def linear_utility(exchange_params_by_currency: Dict[str, float], balance_by_currency: Dict[str, int]) -> float: """ Compute agent's utility given her utility function params and a good bundle. :param exchange_params_by_currency: exchange params by currency :param balance_by_currency: balance by currency :return: utility value """ money_utility = [exchange_params_by_currency[currency] * balance for currency, balance in balance_by_currency.items()] return sum(money_utility)
9f1789800b0b4f6207cfa64bf59670ce77028944
233,641
import math def value_within_range(value, minimum, maximum): """ Checks whether the given value falls wtihin the given minimum/maximum range, inclusive """ if math.isclose(value, minimum, rel_tol=1e-6) or math.isclose(value, maximum, rel_tol=1e-6): return True if value >= minimum and value <= maximum: return True return False
273e53786d554dc70ed59b23e0c1b3b312ebe408
421,176
def solve(grades): """ Return grades rounded up to nearest 5 if above 38, otherwise, do not round the score. """ rounded_grades = [] for grade in grades: if grade >= 38: rounded = ((grade + 5) / 5) * 5 if rounded - grade < 3: grade = rounded rounded_grades.append(grade) return rounded_grades
42012d042ec6e997abb419f5cbcac78b607ca0b0
639,182
import inspect from typing import Dict from typing import List def parse_method_signature( signature: inspect.Signature, param_dict: Dict[str, str] ) -> List[List[str]]: """Combines Signature with the contents of a param_dict to create rows for a parameter table. Args: signature: the output from running `inspect.signature` on a method param_dict: dictionary of parameter: parameter description key: values for the inspected method. Returns: A list containing lists of strings, each of which represents one row in a parameter's table. The rows are in the format [parameter name, parameter typing, parameter default value, parameter description]. """ sig_type = "" sig_default = "" sig_results = [] for key, value in signature.parameters.items(): value = str(value) if ":" in value: sig_parameter, remainder = value.split(":", 1) if "=" in remainder: sig_type, sig_default = remainder.rsplit("=", 1) else: sig_type = remainder elif "=" in value: sig_parameter, sig_default = value.split("=", 1) else: sig_parameter = value sig_results.append( [sig_parameter, sig_type, sig_default, param_dict.get(sig_parameter, "")] ) return sig_results
91e333a6ac37e938fdadadb7d47f312d3c43edb3
632,907
import yaml def _get_yaml_versions(yaml_file): """ Return a dict with packages and versions from requirements.yaml. """ with open(yaml_file, "r") as stream: env = yaml.safe_load(stream) versions = {} for dependency in env["dependencies"]: # remove channel prefix if "::" in dependency: dependency = dependency.split("::")[1] # split tool and version (ignore build if present) package, version = dependency.split("=")[0:2] versions[package] = version return versions
3b4f0c7741a7fb0c18660f0b714580bb0436803d
505,540
import torch def getMaskFromTensor(tensor: torch.Tensor): """ This functions extracts the mask from the unthresholded output of a NN Args: tensor (Tensor): Tensor image of size (1, H, W) Returns: Tensor: UnNormalized image. """ # Find sigmoid probability and apply a probability Threshold # tensor = F.relu(tensor) tensor = torch.sigmoid(tensor) return tensor
f7907938073de282d36a9303edcfb43ad12ce957
534,721
def interval_to_cat(interval, breakpoint): """Compare an MIC interval with a breakpoint and assign a resistance category. """ x, y = interval assert x <= y if x >= breakpoint: return "R" elif y < breakpoint: return "S" else: return "NA"
3673a406328220375a6e472b83dfa08f85788880
255,459
def dict_get_path(data, path, default=None): """ Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path. Args: data (dict or list): data to traverse path (str): '.' delimited string Kwargs: default: value to return if path does not exist """ keys = path.split(".") for k in keys: if type(data) == list: found = False for item in data: name = item.get("name", item.get("type")) if name == k: found = True data = item break if not found: return default elif type(data) == dict: if k in data: data = data[k] else: return default else: return default return data
00cc29d35f23ebff77c8d66ac95c863b70240f17
18,229
def get_filtered_key(input_key, indices): """Filter input keys to indices. Args: input_key (tuple): Contains input key information. indices: Which indices to select from the input key. Returns: filtered_key (tuple): The components of the input key designated by indices. """ return tuple([input_key[i] for i in indices])
fa62171def87f56da954e78d786daddcb18b69f4
398,818
def exp_transformation(data, orig_col, new_col, power): """ Performs feature transformation by raising the indicated feature column to the given power. Parameters: _________________ data: dataframe containing training data orig_col: (str) column in data to be transformed new_col: (str) header for new transformed column power: (int) number to raise the original column by Returns: _________________ transformed_df: new dataframe with transformed column added as the last col """ transformed_df = data.withColumn(new_col, pow(data[orig_col], power)) print('The transformed DataFrame is:') transformed_df.show() return transformed_df
0fb10d848d3bec1c6a8b2a3a0776a3f0bb501b0d
34,139
def find_which_schedule_this_belongs_to(schedule_array, sample_val): """ Takes a sample and determines with schedule this belongs to. Note: A schedule is task * task sized :param sample_val: an int :return: schedule num """ for i, each_array in enumerate(schedule_array): if each_array[0] <= sample_val <= each_array[1]: return i else: continue
7ae648152bd5f26344ffa6c51ec1a4ec8c367015
443,363
def flatten_dict(d: dict) -> dict: """Flatten dictionary d Example >>> flatten_dict(d={"a":{1}, "b":{"yes":{"more detail"}, "no": "level below" }}) returns {'a': {1}, 'b.yes': {'more detail'}, 'b.no': 'level below'} """ def items(): for key, value in d.items(): if isinstance(value, dict): for subkey, subvalue in flatten_dict(value).items(): yield key + "." + subkey, subvalue else: yield key, value return dict(items())
ce1e6a30c9e374c7ff4e68c275d9f94e98b84eaf
444,183
import pickle def load_keypoints_boxes(vid_path, i): """ Given a video path, with .pkl file included, load the keypoints & boxes """ keypoints = pickle.load(open(vid_path + "keypoints_vid"+str(i)+".pkl", "rb")) keypoints[:, 0, :] = keypoints[:, 0, :]*1.40625 + 280 keypoints[:, 1, :] = keypoints[:, 1, :]*1.40625 boxes = pickle.load(open(vid_path + "boxes_vid" + str(i) + ".pkl", "rb")) boxes = boxes.reshape(-1, 4) boxes[:, 0] = boxes[:, 0]*1.40625 + 280 boxes[:, 1] = boxes[:, 1]*1.40625 boxes[:, 2] = boxes[:, 2]*1.40625 boxes[:, 3] = boxes[:, 3]*1.40625 return keypoints, boxes
7a3de4d529541ea2bc66801f6b3ad261de146132
615,017
def _(dtobj, fmt='%Y-%m-%d %H:%M:%S'): """Get datetime str from datetime obj""" return dtobj.strftime(fmt)
047073128989a87a85ada0a825a7f3ac625abb79
455,247
import re def to_snakecase(string): """ Converts the given string to snake-case. >>> to_snakecase('HelloWorld') 'hello_world' >>> to_snakecase('__Init__File__') '__init_file__' >>> to_snakecase('') '' >>> to_snakecase('already_snake_case') 'already_snake_case' >>> to_snakecase(' string ') '___string__' >>> to_snakecase('ABCde.F.G..H..IH') 'a_b_cde.f.g..h..i_h' :param string: The string to convert. :return: The snake-cased string. """ string = re.sub('(\s)', lambda match: '_', string) string = re.sub('^(_*)([^_])', lambda match: match.group(1) + match.group(2).lower(), string) string = re.sub('(\w*)([.]+)([A-Z])', lambda match: (match.group(1) + match.group(2) + match.group(3).lower()), string) string = re.sub('(?<=[^_])_+([^_])', lambda match: '_' + match.group(1).lower(), string) return re.sub('[A-Z]', lambda match: '_' + match.group(0).lower(), string)
88a81d731e2efe2de35f38b7180f85ffdc38be0f
289,145
def calc_number_of_spikes(in_array, threshold=0.0): """ :param in_array: array of values :param threshold: value that if in_array passes, counts as a spike :return: num_spikes: integer value of the number of spikes """ num_spikes = 0 for in_ind, in_val in enumerate(in_array[0:-2]): if in_val < threshold < in_array[in_ind+1]: num_spikes += 1 return num_spikes
2465427b824295830d782de2fdda6c915611c0a0
121,197
from typing import List from typing import Callable import inspect def find_scrapers(module) -> List[Callable]: """ Build a list of 'scraper' functions contained in a module. These consist of every function public function (i.e., does not begin with_) that is defined, but not imported, in the module :param module: Module containing local :return: List of scraper functions, each of which returns a List of Event dicts """ def predicate(event): return inspect.isfunction(event) and inspect.getmodule(event) == module scrapers = inspect.getmembers(module, predicate) return [s[1] for s in scrapers if not s[0].startswith('_')]
7f6c3ebdf41f16a5998b4cba679b895b05d491da
643,276
def filter_samples(sample_dirs, changed_files): """Filers the list of sample directories to only include directories that contain changed files.""" result = [] for sample_dir in sample_dirs: if sample_dir.startswith('./'): sample_dir = sample_dir[2:] for changed_file in changed_files: if changed_file.startswith(sample_dir): result.append(sample_dir) return list(set(result))
9bf488bae52c37b69635a282ccf04d9821428bd5
614,488
import re def condense_zero_units(css): """Replace `0(px, em, %, etc)` with `0`.""" return re.sub(r"([\s:])(0)(px|em|%|in|cm|mm|pc|pt|ex)", r"\1\2", css)
93982e353c1fab75c4249efcfa7b0b526cf56d6c
605,187
def get_recommended_simplification_params(warning_len): """Return the recommended geometry simplification tolerance and buffer. These settings are based on the number of warnings present, and designed to prevent the map interface from lagging if many warnings are present. Parameters ---------- warning_len : int number of warnings in the warning list. Returns ------- dict {'tol': float, 'buf': float}. Parameters which determine the degree of shape approximation recommended for mapping. """ if warning_len < 10: return {'tol': 0.000, 'buf': 0.000} tol = (round(warning_len, -1) - 10) * 0.000025 buf = (round(warning_len, -1) - 10) * 0.00005 return {'tol': tol, 'buf': buf}
f9f144dffb9fd5d0817fd7ca76b5d061d1b2bce7
335,358
def cached_and_cgi(name, template_func, render): """Return 2 functions for testing template in cached and cgi modes.""" _template = template_func() def test_cached(): # reuse early created template render(_template) test_cached.__doc__ = "test_%s" % name def test_cgi(): # create new template on each call render(template_func()) test_cgi.__doc__ = "test_%s_cgi" % name return test_cached, test_cgi
da616817b7a45cfa0c340f7cbde970e009c35f73
16,957
import torch import unittest def require_multi_gpu(test_case): """ Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple GPUs. """ if torch.cuda.device_count() < 2: return unittest.skip("test requires multiple GPUs")(test_case) else: return test_case
84b32c17519ed6030346691a2812f03248730057
572,669
def cost(distance): """ Calculates the cost to run a trip. Returns a float. July 21, 2020: $0.89/L of fuel in NYC Fuel Efficiency (Avg Estimate): 9.4L/100 km c(x) = price * ((distance * price)/fuel efficiency) ==Parameters== distance: distance in metres """ d = distance/1000 price = 0.89 fuel_e = 9.4 amount = (d * price)/fuel_e return price * amount
da0039e01cbfbdb8b80eedde8d2b853ed40e239a
556,555
def _split_interaction_name_field(interactions): """take the interaction field "chr:start-stop,score" and return component parts as pandas Series """ interactions = interactions.str.split(",", n=2, expand=True) chrom = interactions[0].str.split(":", n=2, expand=True)[0] start = interactions[0].str.split( ":", n=2, expand=True)[1].str.split("-", n=2, expand=True)[0] stop = interactions[0].str.split( ":", n=2, expand=True)[1].str.split("-", n=2, expand=True)[1] score = interactions[1] return chrom, start, stop, score
90b1acda92af98f038b8819bce4c7ecf64d6ecac
497,294
def index(lst, trunc): """ Converts an n-ary index to a 1-dimensional index. """ return sum([lst[i] * trunc**(len(lst)-i-1) for i in range(len(lst))])
0df82084ff01c703e780e4a43924965ed398da3e
586,271
from typing import Iterable def multi_assert_not_in(items: Iterable, container: Iterable) -> bool: """Return True if none of the item is in container.""" for item in items: if item in container: return False return True
4e316cdec97d99a52a193c5f0590fb1d348ca746
353,722
import copy def CountUnique(inputList): """ Count the number of unique entries in the supplied list """ lInputList = copy.copy(inputList) lInputList.sort() count = 0 if len(lInputList) > 0: count += 1 for i in range(1, len(lInputList)): if not lInputList[i] == lInputList[i - 1]: count += 1 return count
965610acf91339bd6b6fff93602c3fe5da5ca6d6
653,819
def sort_by_median(medians, verbosity): """ Make a list of taxonomic groups ordered by decreasing median median relative abundance Arguments: medians (dict) -- maps tax groups to their median median relative abundance across all samples verbosity (int) -- user defined parameter specifying whether or not to print rel abs of each tax group Returns: tax_order (list) -- names of tax groups in order of decreasing median median relative abundance """ tax_order = [] for key, value in sorted(medians.items(), key=lambda x:x[1], reverse=True): tax_order.append(key) if verbosity > 0: print(key, value) return tax_order
0eed7bc8b6c712d229c09586c94ff38f8aab6afb
564,980
import binascii def encode_to_base64(content): """ Calculate base64 of input byte array. :param content: Input byte array. :return: base64 encoding of input byte array. """ return binascii.b2a_base64(content).strip().decode('utf-8')
8d87681e06dffba1af68ee373c391110c21793dd
235,791
def no_normal_check(inclimav): """ Check if a climatological average is equal to None :param inclimav: the input value :type inclimav: float :return: 1 if the input value is None, 0 otherwise :return type: integer """ result = 0 if inclimav is None: result = 1 return result
389eaf1d826ce1e05646b204b3ed3595d8093c86
193,079
def nested_pairs2dict(pairs): """Create a dict using nested pairs >>> nested_pairs2dict([["foo",[["bar","baz"]]]]) {'foo': {'bar': 'baz'}} :param pairs: pairs [key, value] :type pairs: list :returns: created dict :rtype: dict """ d = {} try: for k, v in pairs: if isinstance(v, list): v = nested_pairs2dict(v) d[k] = v except ValueError: return pairs return d
a66383253d46efa775a9ced85a0ab5128c604968
335,628
def create_freq2opacity(n: int) -> dict: """Create mapping between variant frequency (lower-bound) and opacity level""" freq2opacity = {} for i in range(n): freq2opacity[i] = (1./n)*(i+1) return freq2opacity
6095ec4ee9fc4b635f3b3338999dbdbbe91c2bed
402,555
def crc_calc(input_string): """ Calculates cumulative XOR CRC of inputted string :param input_string: Str to CRC :return: 2 char CRC str """ crc = 0 for next_char in input_string: ascii_val = ord(next_char) crc ^= ascii_val hex_crc = hex(crc) if crc <= 0xF: crc_byte = '0' + hex_crc[2] else: crc_byte = hex_crc[2:4] return crc_byte
551dd3f04ff1f1c0c069cd4d9aa40085f4ca723e
614,077
def fsummarizer(pepseq, prop_dict, func): """ Summarize pepide sequence with by mapping amino acids to prop_dict entires and a summary func. Parameters ---------- pepseq : str sequence. prop_dict : dict amino acid property mapping. func : function Summary function (sum, std, max, ..). Returns ------- func applied to mapped AA outcomes. """ return func([prop_dict[i] for i in pepseq if i.isupper()])
3b85a6cf920ec23e36bf42e122f6228e90203650
597,838
async def read_root(): """My index roote for testing purposes""" return {"result": "Welcome to the covid-19 Chatbot Back-end"}
d4820fc49d2b91c64b61851af52e30443efb7385
95,933
def _normalize_protease_rules(rule): """ Given a rule like: "AB, C; D, EF, G" convert to: ["", "", "AB", C", "D", "EF", "G", ""] """ n_side, c_side = rule.split(";") n_side = [i.strip() for i in n_side.split(",")] c_side = [i.strip() for i in c_side.split(",")] eight_tuple = [""] * (4 - len(n_side)) + n_side + c_side + [""] * (4 - len(c_side)) assert len(eight_tuple) == 8 return eight_tuple
40912ec3c8b35e5819ef67474042c91d5692dd2b
487,280
from operator import not_ from operator import eq def not_eq(a, b): """Functional form of "not-equal".""" return not_(eq(a, b))
5e49cdff5d32255471fcd62a961dc3d7cc145600
517,444
from pathlib import Path import csv def read_csv(path: Path, *args, **kwargs) -> csv.DictReader: """Return a csv.DictReader, removing commented lines (which start with a #).""" csvfile = open(path, newline="") # skip comments return csv.DictReader(filter(lambda row: row[0] != "#", csvfile), *args, **kwargs)
d212a9c64078c97c21ada455af8bc35f644312e8
417,551
def get_file_content(file: str) -> str: """ Get file content. """ try: with open(file, 'r') as f: content = f.read() return content except IOError as e: print(e) print('Exiting...') exit(1)
c10407d73ba2cd2d84eb99c0f131d3895ede460d
704,327
def contain_filter(file, filters=None): """ Check if a file contains one or many of the substrings specified in filters :param file: :param filters: :return bool: """ if filters is None: return True for filter in filters: if len(file.split(filter)) >= 2: return True return False
11d01cdf1fb9a9a1fd4dd355de7fc4d2d1f25de9
307,044
def resorted(values): """ Sort values, but put numbers after alphabetically sorted words. This function is here to make outputs diff-compatible with Aleph. Example:: >>> sorted(["b", "1", "a"]) ['1', 'a', 'b'] >>> resorted(["b", "1", "a"]) ['a', 'b', '1'] Args: values (iterable): any iterable object/list/tuple/whatever. Returns: list of sorted values, but with numbers after words """ if not values: return values values = sorted(values) # look for first word first_word = next( (cnt for cnt, val in enumerate(values) if val and not val[0].isdigit()), None ) # if not found, just return the values if first_word is None: return values words = values[first_word:] numbers = values[:first_word] return words + numbers
9d9f6608fddc7e2cef82d939a980aaf4e97893cb
521,364
def decToDegMinSec(dd: float) -> tuple: """ Converts decimal degrees to deg/min/sec. Parameters: dd (float): Decimal Degrees Returns: tuple: (degrees,minutes,seconds) of integers """ isPositive = dd >= 0 dd = abs(dd) minutes,seconds = divmod(dd*3600, 60) degrees,minutes = divmod(minutes, 60) degrees = degrees if isPositive else -degrees return (round(degrees),round(minutes),round(seconds))
c0c46ab9be29812084a4a88efde08a9e5702757c
48,581
def weekday_difference(weekday_ref, weekday_target): """ Return 1 if `weekday_target` is one day ahead of `weekday_ref` 0 if `weekday_target` and `weekday_ref` are the same -1 if `weekday_target` is one day behind of `weekday_ref` None otherwise """ if weekday_target == weekday_ref: return 0 if (weekday_target + 7 - weekday_ref) % 7 == 1: return 1 if (weekday_target + 7 - weekday_ref) % 7 == 6: return -1 return None
5ff9a550b996fb0e49b0b67317a90d0cf9ec88be
170,029
def parse_post_meta(raw_postmeta): """ Parse meta-data of a post contained in the special latex command \postmeta{...}. Example: \postmeta{ Title: Compactness and Open Sets in $\mathbb{R}^{d}$ Date: 2014-06-29 Tags: topology, compactness Slug: compactness_open_sets } Return a dictionary """ lines = raw_postmeta.split('\n') D = {} for i, l in enumerate(lines): if l.strip() != '': colon_splits = l.split(':') key = colon_splits[0].strip() val = colon_splits[1].strip() D[key] = val return D
26b97249c73d7aebcd5d2b630d4c1f5bf4d898ad
122,606
import hashlib def _hash_gene(gene, length=6): """Get the first letters of the SHA-1 digest of the gene.""" encoded = sum( v * 4 ** i for i, v in enumerate(gene) ).to_bytes(13, byteorder='big') digest = hashlib.sha1(encoded).hexdigest() return digest[0:length]
981722aeafeef4899a858da9998fe95046207218
647,560
def rotate_address(level, address, rotate_num): """Rotates the address with respect to rotational symmetries of SG Args: level: A nonnegative integer representing the level of SG we're working with. address: np.array of size (level+1) representing the address vector of a point in some SG graph. rotate_num: A number in {0, 1, 2} representing the type of rotation we're making. 0 represent no rotation, 1 represent counterclockwise 2*np.pi/3, 2 represent counterclockwise 4*np.pi/3. Returns: new_address: np.array of size (level+1) representing the address vector of the rotated point in some SG graph. """ new_address = [] for i in range(level): new_address.append(int((address[i] + rotate_num) % 3)) new_address.append(int(address[-1])) return new_address
98048b7d1a6de2a35da55e181f8d7ac4a1bc00f9
673,253
import glob import shutil def copy_egg_info(dest_dir): """Copies the .egg-info directory to the specified location. Args: dest_dir: str. The destination directory. Returns: 0 on success, 1 on failure. """ # Remove any trailing slash on the destination directory. dest_dir = dest_dir.rstrip('/') egg_srcs = glob.glob('google_nucleus-*-py*.egg-info') if not egg_srcs: print('Could not find source .egg-info directory') return 1 egg_src = egg_srcs[0] print('Copying egg-info from ', egg_src, ' to ', dest_dir) shutil.copytree(egg_src, dest_dir) return 0
39edab7773f9d02beac07f58e9eb5191f7f65321
644,769
def is_float(s): """ Detertmine if a string can be converted to a floating point number. """ try: float(s) except: return False return True
2df52b4f8e0835d9f169404a6cb4f003ca661fff
1,752
def WindowWithTitleExists(driver, title): """Verifies if one of the open windows has the specified title. Args: driver: Active window for this Chromedriver instance. title: Title of the window we are looking for. Returns: True if an open window in this session with the specified title was found. False otherwise. """ for handle in driver.window_handles: driver.switch_to_window(handle) if driver.title == title: return True return False
0aebdacf85e52e522a90538527fcbd1a84340f35
384,396
import re def _un_camel(name): """ Convert `CamelCase` to `camel_case`. EXAMPLES:: sage: sage.interfaces.mathematica._un_camel('CamelCase') 'camel_case' sage: sage.interfaces.mathematica._un_camel('EllipticE') 'elliptic_e' sage: sage.interfaces.mathematica._un_camel('FindRoot') 'find_root' sage: sage.interfaces.mathematica._un_camel('GCD') 'gcd' """ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
3cae8883e206d961327e271967efbe6c00868899
543,353
def path_nodes_to_edges(path): """ Returns an ordered list of edges given a list of nodes Args: path - a path, as a sequence of nodes Returns: An ordered list of edges. """ # Edge sequence initialization edge_sequence = [] for i in range(len(path) - 1): edge_sequence.append((path[i], path[i+1])) return edge_sequence
57cdb3fb930533b4bb3081c924072791619de27b
277,073
def command(f): """Decorator for Tubot command methods.""" f.is_command = True return f
08acd38367a3019ce2ba01f37616ae3e9fe12c31
434,139
import colorsys import random import math def init(world): """Color of an agent, random hue, full saturation and colorness.""" return [int(math.floor(i*256)) for i in (list(colorsys.hsv_to_rgb(random.random(), 1.0, 1.0))+[1.0])]
9bb7d5e10022f808701150fde7457dcd012d65b8
215,089
def split_dt(dt): """Split a numpy.datetime64 value so as to just keep the date part.""" return str(dt).split('T')[0]
ab66ba29a895a94c70ce0e24224233a9ad0a986a
556,494
def get_name(node_name): """ Omit any parenting information from the provided node name. :param str node_name: :return: Name :rtype: str """ return node_name.rsplit("|", 1)[-1]
dad13b8544110b96f6b1da7ee6a3bfcf71b83a89
58,212
import random def roll(min, max): """ Returns a random number between `min` and `max`. """ return random.randint(min, max)
a7ddda0df3bede0295b234527ecbc288163ada28
376,894
def get_volume_shape(vol): """Returns the shape of a gridded volume""" return ( len(vol.tensor_u), len(vol.tensor_v), len(vol.tensor_w))
410522d5b3fef0f1bc9dcd3d24ba0afe4f42de69
371,429
def find_bucket_key(s3path): """ This is a helper function that given an s3 path such that the path is of the form: bucket/key It will return the bucket and the key represented by the s3 path, eg if s3path == s3://bmsrd-ngs-data/P-234 """ if s3path.startswith('s3://'): s3path = s3path[5:] s3components = s3path.split('/') bucket = s3components[0] s3key = "" if len(s3components) > 1: s3key = '/'.join(s3components[1:]) return bucket, s3key
3b3305f8f4d3e1b2f24387f45f4a8664bd1d021a
156,722
def _get_datetime_beginning_of_day(dt): """ Truncates hours, minutes, seconds, and microseconds to zero on given datetime. """ return dt.replace(hour=0, minute=0, second=0, microsecond=0)
12ddcaed68db08740e4edc851a299aa08c23f91c
24,243
import torch def decode_landm(pre, priors, variances): """Decode landm from predictions using priors to undo the encoding we did for offset regression at train time. Args: pre (tensor): landm predictions for loc layers, Shape: [N, num_priors,10] priors (tensor): Prior boxes in center-offset form. Shape: [num_priors,4]. variances: (list[float]) Variances of priorboxes Return: decoded landm predictions """ priors = priors[None] landms = torch.cat((priors[:, :, :2] + pre[:, :, :2] * variances[0] * priors[:, :, 2:], priors[:, :, :2] + pre[:, :, 2:4] * variances[0] * priors[:, :, 2:], priors[:, :, :2] + pre[:, :, 4:6] * variances[0] * priors[:, :, 2:], priors[:, :, :2] + pre[:, :, 6:8] * variances[0] * priors[:, :, 2:], priors[:, :, :2] + pre[:, :, 8:10] * variances[0] * priors[:, :, 2:], ), dim=2) return landms
c301c309b924a2cf1b43a2a998d67eaf5e916df9
157,151
def replicaset_members(replicaset_document): """ Returns the members section of the MongoDB replicaset document """ return replicaset_document["members"]
927cbfd7b88c7b1a1d45118745ce8f84e1c4e108
540,779