content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import torch
def btranspose(tensor: torch.Tensor) -> torch.Tensor:
"""Batch-wise transpose. Assumes that tensor has dimension of 3: [batch, features, samples]"""
if tensor.dim() != 3:
raise ValueError("The given shape is not supported.")
return torch.transpose(tensor, 1, 2)
|
4b238672a2cfca33abb86116949acd6d392434f0
| 38,266 |
def _comp_brat_ann(ann_a, ann_b):
"""Compare brat annotation records by their border indices.
:param ann_a: annotation A
:param ann_b: annotation B
:return: 1 a < b; 0 a == b; -1 a > b
:rtype: int
"""
hash_a = (ann_a.get_left_border(), ann_a.get_right_border())
hash_b = (ann_b.get_left_border(), ann_b.get_right_border())
if hash_a < hash_b:
return 1
elif hash_a == hash_b:
return 0
else:
return -1
|
a2a13cb1d0292e85d26befb538ad63b7c4643953
| 565,874 |
def _set_visible_scopes_for_type_definition(type_definition, visible_scopes):
"""Sets current_scope and visible_scopes for the given type_definition."""
return {
"current_scope": type_definition.name.canonical_name,
# In order to ensure that the iteration through scopes in
# _find_target_of_reference will go from innermost to outermost, it is
# important that the current scope (type_definition.name.canonical_name)
# precedes the previous visible_scopes here.
"visible_scopes": (type_definition.name.canonical_name,) + visible_scopes,
}
|
23a0ee76a5e2f09c185599942a132f19ef29b570
| 216,869 |
def insert_query(connection, query):
"""
Inserts a query into the corresponding database table
:param connection: database connection
:param query: user query
:return: last row of the table
"""
cursor = connection.cursor()
sql = ("INSERT INTO Query(Topic, StartDate, EndDate, MinLikes, MinRetweets)\n"
"VALUES(?, ?, ?, ?, ?);")
values = query
cursor.execute(sql, values)
return cursor.lastrowid
|
cb12f9ee8cca167222ba0bed5492a69f96816777
| 165,189 |
async def consume_aiter(iterable):
"""consume an async iterable to a list"""
result = []
async for item in iterable:
result.append(item)
return result
|
897c8e9380f9c631f2dd8721884ec1ccbe462d48
| 12,769 |
def gettext_noop(value):
"""Mark a string for translation without translating it. Returns
value.
"""
return value
|
ea738b7b93b31015ab9c2d02d569b90053fba59d
| 212,995 |
import logging
from typing import Any
def _value(record: logging.LogRecord, field_name_or_value: Any) -> Any:
"""
Retrieve value from record if possible. Otherwise use value.
:param record: The record to extract a field named as in field_name_or_value.
:param field_name_or_value: The field name to extract from record or the default value to use if not present.
"""
try:
return getattr(record, field_name_or_value)
except AttributeError:
return field_name_or_value
|
994c306ad15e972817128f70ac61c8de7071e719
| 672,404 |
def solve(captcha):
"""Solve captcha.
:input: captcha string
:return: sum of all paired digits that match
>>> solve('1212')
6
>>> solve('1221')
0
>>> solve('123425')
4
>>> solve('123123')
12
>>> solve('12131415')
4
"""
a = len(captcha) // 2
return sum(int(x) for x, y in
zip(captcha, captcha[a:] + captcha[:a]) if x == y)
|
edd3b556db7bb22ea9180c93bb4e5043ce884283
| 628,596 |
def gpsDecimalToDMS(decimal, loc):
"""Returns a GPS coordinate in DMS format.
Keyword arguments:
decimal -- a real number containing the lat or lon
loc -- an array of strings representing lat or lon
-- must be one of ["S", "N"] or ["W", "E"]
"""
if decimal < 0:
latlonRef = loc[0]
elif decimal > 0:
latlonRef = loc[1]
else:
latlonRef = ""
abs_value = abs(decimal)
deg = int(abs_value)
t = (abs_value-deg)*60
min = int(t)
sec = round((t - min) * 60, 6)
return (deg, min, sec, latlonRef)
|
9c5efcb38ee7c52cec5683e531a3e27721906985
| 394,560 |
def merge(basepath, relpath, has_authority):
"""Merge two URI path components into a single path component.
Follows rules specified in Section 5.2.3 of RFC 3986.
The algorithm in the RFC treats the empty basepath edge case
differently for URIs with and without an authority section, which
is why the third argument is necessary.
"""
if has_authority and basepath == '':
return '/' + relpath
slash = basepath.rfind('/')
return basepath[:slash+1] + relpath
|
70b10eaff290f95cff5719ae272df3ff3cc605a9
| 535,432 |
def get_default_arg_name(pos: int) -> str:
"""
Generate the default name for argument.
"""
return f"arg{pos}"
|
8a5f3347fe768a8385fd70c206ef183e7d6312d1
| 409,089 |
import torch
def mask_out(tensor, start_ind, end_ind, value, dim=1):
""" Set the elements before start_ind and after end_ind (both inclusive) to the value. """
if dim != 1:
raise NotImplementedError
batch_size, time = list(tensor.shape)[:2]
# (oleg) This creates the indices every time, but doesn't seem to affect the speed a lot.
inds = torch.arange(time, device=tensor.device, dtype=start_ind.dtype).expand(batch_size, -1)
mask = (inds >= end_ind[:, None]) | (inds <= start_ind[:, None])
tensor[mask] = value
return tensor, mask
|
1326220320679c32d728ce727d174e00065eaa0a
| 45,467 |
from typing import Dict
import torch
def get_emb_matrix(vocab: Dict, emb_path: str) -> torch.Tensor:
"""Takes a embedding file and convert them into embedding matrix using the
vocab dictionary. Eg. if the word 'the' is mapped to id 5 in the vocab, then
the 5th row of the embedding matrix will represent the embedding vector
of the word 'the'.
Args:
vocab (Dict): mapping of words to id used by tokenizer
emb_path (str): path of embedding file
Returns:
torch.Tensor: torch tensor of embedding matrix
"""
with open(emb_path, "r") as emb_file:
file_lines = emb_file.readlines()
emb_dict = {}
first_line = file_lines[0].rstrip().split()
if len(first_line) != 2:
raise ValueError(
"The first line in W2V embeddings must be the pair (vocab_size, emb_dim)"
)
emb_dim = int(first_line[1])
emb_vocab_size = int(first_line[0])
for line in file_lines[1:]:
tokens = line.rstrip().split()
word = tokens[0]
vector = [float(token) for token in tokens[1:]]
if len(vector) != emb_dim:
raise ValueError("The number of dimensions does not match the header info")
emb_dict[word] = vector
if emb_vocab_size != len(emb_dict.keys()):
raise ValueError("Vocab size does not match the header info")
vocab_len = len(vocab.keys())
emb_matrix = torch.zeros((vocab_len, emb_dim))
for word, index in vocab.items():
if word in emb_dict.keys():
emb_matrix[index] = torch.Tensor(emb_dict[word])
return emb_matrix
|
01ec0b37b9567eb6435ff240b08d30a02f6f2e70
| 295,205 |
from typing import List
from typing import OrderedDict
import itertools
import math
def distributedConnectionMap(names: List[str]) -> OrderedDict:
"""
Create a map where every node is connected every other node.
Assume each key in the returned dictionary to be connected to each item in
its value(list).
:param names: a list of node names
:return: a dictionary of name -> list(name).
"""
names.sort()
combos = list(itertools.combinations(names, 2))
maxPer = math.ceil(len(list(combos)) / len(names))
# maxconns = math.ceil(len(names) / 2)
connmap = OrderedDict((n, []) for n in names)
for a, b in combos:
if len(connmap[a]) < maxPer:
connmap[a].append(b)
else:
connmap[b].append(a)
return connmap
|
511fbcf4fc48c675923d00fb87ed185080e20278
| 283,946 |
def switch_month(month: str):
"""
Translates an english month to a french one. For example: 'Jan' becomes '(01)Janvier'.
month : the month that will be translated
"""
return {
"Jan": "(01)Janvier",
"Feb": "(02)Fevrier",
"Mar": "(03)Mars",
"Apr": "(04)Avril",
"May": "(05)Mai",
"Jun": "(06)Juin",
"Jul": "(07)Juillet",
"Aug": "(08)Aout",
"Sep": "(09)Septembre",
"Oct": "(10)Octobre",
"Nov": "(11)Novembre",
"Dec": "(12)Decembre",
}[month]
|
2a292c64333d9455ca6ce5b7f41b9e83eee2cb82
| 674,133 |
def isdigit(str: str) -> bool:
"""
Same as `str.isdigit()` except it supports negative numbers (x < 0)
"""
return str.isdigit() or (str.startswith('-') and str[1:].isdigit())
|
c723a20e72e3b4c498cfe1fba7956bfc687e4932
| 353,556 |
def parse_url(url):
"""
Parse a 'swift://CONTAINER/OBJECT' style URL
:param url:
:return: dictionary with "container" and "obj" keys
"""
url = url.replace("swift://", "")
if url.find("/") == -1:
raise ValueError("Swift url must be 'swift://container/object'")
pieces = url.split("/")
containername = pieces[0]
objname = "/".join(pieces[1:])
return {
"container": containername,
"obj": objname,
}
|
90f8cc6d26cbaf2f277c806c4f1beb4cb0e3cbef
| 218,576 |
from typing import Optional
import random
def random_subset(a, *, prob=0.5, min_size: Optional[int] = None, max_size: Optional[int] = None):
"""Choose a random subset of a.
Args:
a: the input sequence
prob: Probability to pick an element
(when called without size constraints).
min_size: Minimal size of the result.
max_size: Maximal size of the result.
Returns:
A random subset of a
"""
def subset(a, prob):
res = []
remaining = []
for el in a:
if random.random() < prob:
res.append(el)
else:
remaining.append(el)
return res
if min_size is None:
min_size = 0
if max_size is None:
max_size = len(a)
min_size = max(min_size, 0)
max_size = min(max_size, len(a))
if min_size > max_size:
raise ValueError(f'Invalid size range for random_subset: [{min_size}, {max_size}]')
tmp = random.sample(a, max_size)
return tmp[:min_size] + subset(tmp[min_size:], prob)
|
23a019e1a797b90252ec7fc034b6be7196a245a9
| 219,997 |
def _process_dbn(process_tuple):
"""
Extract the best path through the state space in an observation sequence.
This proxy function is necessary to process different sequences in parallel
using the multiprocessing module.
Parameters
----------
process_tuple : tuple
Tuple with (HMM, observations).
Returns
-------
path : numpy array
Best path through the state space.
log_prob : float
Log probability of the path.
"""
# pylint: disable=no-name-in-module
return process_tuple[0].viterbi(process_tuple[1])
|
94627952cebd420c9870c90b06b8ea322819432c
| 569,476 |
def configuration_(env):
"""
Connectx configuration.
"""
return env.configuration
|
59bc68d54fb09dea6d4545444b7f489b732c8534
| 436,761 |
def doc_returns(f):
"""
Decorator that adds the "Returns" section at the end of the
docstrings of the decorated function.
Parameters
----------
f : function
function to decorate.
Returns
-------
function
returns the same function with the docstring modified.
"""
description = f.__doc__
item_fmt = "{} : {}\n {}".format
outputs = [
("mol_objects", "list", "Embedded molecules."),
("name_returns", "list", "Names for the embedded molecules"),
(
"coord_maps",
"list",
"Mappings to the Idx of the atoms afected by the embedding",
),
("alg_maps", "list", "Mappings to the Idx of the core to do alignments"),
("mol_templates", "list", "List of template molecules used"),
]
outs_txt = "\n".join([item_fmt(*items) for items in outputs])
f.__doc__ = f"{description}\nReturns\n-------\n{outs_txt}\n"
return f
|
2bb935429cfb5180cfa94caa4b4eacf3b93d2e2d
| 434,172 |
def UpdateBoardConfigs(board_configs, boards, *args, **kwargs):
"""Update "board_configs" for selected boards.
Args:
board_configs: Dict in CreateBoardConfigs format to filter from.
boards: Iterable of boards to update in the dict.
args: List of templates to apply.
kwargs: Individual keys to update.
Returns:
Copy of board_configs dict with boards boards update with templates
and values applied.
"""
result = board_configs.copy()
for b in boards:
result[b] = result[b].derive(*args, **kwargs)
return result
|
ff8f673a15e6e0f9652477c2c24e4d9fe5b21b9d
| 226,935 |
def _wls_linearfit_predict(x, w, wx, wy, wxx, wxy, select):
"""
Predict a point according to a weighted least squares linear fit of the
data
This function is a helper function for :py:func:`quality`. It is not
supposed to be called directly.
Parameters
----------
x : float
The position for which to predict the function value
w : ndarray
The pre-calculated weights :math:`w_l`
wx : ndarray
The pre-calculated weighted `x` data :math:`w_l x_l`
wy : ndarray
The pre-calculated weighted `y` data :math:`w_l y_l`
wxx : ndarray
The pre-calculated weighted :math:`x^2` data :math:`w_l x_l^2`
wxy : ndarray
The pre-calculated weighted `x y` data :math:`w_l x_l y_l`
select : indexing array
To select the subset from the `w`, `wx`, `wy`, `wxx`, `wxy` data
Returns
-------
float, float
The estimated value of the master curve for the selected subset and the
squared standard error
"""
# linear fit
k = w[select].sum()
kx = wx[select].sum()
ky = wy[select].sum()
kxx = wxx[select].sum()
kxy = wxy[select].sum()
delta = k * kxx - kx ** 2
m = 1. / delta * (k * kxy - kx * ky)
b = 1. / delta * (kxx * ky - kx * kxy)
b_var = kxx / delta
m_var = k / delta
bm_covar = - kx / delta
# estimation
y = b + m * x
dy2 = b_var + 2 * bm_covar * x + m_var * x**2
return y, dy2
|
9f89bbb2757a70740bfd60711576d754c5026285
| 518,955 |
def strip(text):
"""strip str without Exception"""
try:
return text.strip()
except AttributeError:
return text
|
eeadc7ad19f33f107edd129727cf6487be850ea7
| 528,386 |
def side_of_line(l, p):
"""Returns on which side of line `l` point `p` lies.
Line `l` must be a tuple of two tuples, which are the start and end
point of the line. Point `p` is a single tuple.
Returned value is negative, 0, or positive when the point is right,
collinear, or left from the line, respectively. If the line is horizontal,
then the returned value is positive.
Source: http://stackoverflow.com/a/3461533/466781
"""
a,b = l
return ((b[0] - a[0]) * (p[1] - a[1]) - (b[1] - a[1]) * (p[0] - a[0]))
|
c3cdf2bcf5f4bb083b884e69e8209375831b4f6e
| 572,339 |
def message(file_checker, which=0):
"""Return a string of the message lines associated with the message of a file checker."""
assert(len(file_checker.messages) > which)
return "\n".join(file_checker.messages[which].message)
|
8575f96d756e19b2a4cceb205d28e59cf0ac0d34
| 583,543 |
def api_package(pytester):
"""Create package with structure
api \
mobile.py
web.py
mobile.py and web.py has similar content except entrypoint path
"""
# Re-use our infrastructure layer
pytester.copy_example('tests/conftest.py')
# Create api/web.py and api/mobile.py files with same methods
entrypoint_tpl = """
from fastapi import Body
from typing import List
import fastapi_jsonrpc as jsonrpc
api_v1 = jsonrpc.Entrypoint(
'{ep_path}',
)
@api_v1.method()
def probe(
data: List[str] = Body(..., example=['111', '222']),
amount: int = Body(..., gt=5, example=10),
) -> List[int]:
return [1, 2, 3]
"""
api_dir = pytester.mkpydir('api')
mobile_py = api_dir.joinpath('mobile.py')
mobile_py.write_text(
entrypoint_tpl.format(ep_path='/api/v1/mobile/jsonrpc'),
)
web_py = api_dir.joinpath('web.py')
web_py.write_text(
entrypoint_tpl.format(ep_path='/api/v1/web/jsonrpc'),
)
return api_dir
|
72fed02f8f3cd7071efc7d68c157334062da15ee
| 185,328 |
def Prandtl(**kwargs):
"""
Calculates Prandtl number based upon either of two input variable sets.
First method:
Pr(C_p = specific heat capacity,
mu = dynamic viscosity,
k = thermal conductivity)
Second method:
Pr(nu = kinematic viscosity,
alpha = thermal diffusivity)
"""
if ('C_p' in kwargs) and ('k' in kwargs) and ('mu' in kwargs):
prandtl_number = (kwargs['C_p'] * kwargs['mu']) / kwargs['k']
elif ('nu' in kwargs) and ('alpha' in kwargs):
prandtl_number = kwargs['nu'] / kwargs['alpha']
else:
raise KeyError('Incorrect variable assignment')
return prandtl_number
|
a5326aaa6590dd18b37cce738c68a80c657ffe9f
| 442,158 |
def _get_comm_key_send_recv(my_rank, my_gpu_idx, peer_rank, peer_gpu_idx):
"""Return a key given source and destination ranks for p2p tasks.
The p2p key is in the following form:
[min_rank]_[gpu_index]:[max_rank]_[gpu_index].
Args:
my_rank (int): the rank of the source process.
my_gpu_idx (int): the source gpu index on the process.
peer_rank (int): the rank of the destination process.
peer_gpu_idx (int): the destination gpu index on the process.
Returns:
comm_key (str): a string key to query the communication cache.
"""
if my_rank < peer_rank:
lower_key = str(my_rank) + "_" + str(my_gpu_idx)
higher_key = str(peer_rank) + "_" + str(peer_gpu_idx)
elif my_rank > peer_rank:
lower_key = str(peer_rank) + "_" + str(peer_gpu_idx)
higher_key = str(my_rank) + "_" + str(my_gpu_idx)
else:
raise RuntimeError(
"Send and recv happens on the same process. alpa.collective "
"does not support this case as of now. Alternatively, consider "
"doing GPU to GPU memcpy?")
comm_key = lower_key + ":" + higher_key
return comm_key
|
607110079c2aa92ccb3a156d6fe723f01c524f6d
| 188,516 |
def combine_token_label_pairs(pairs):
"""Combines a list of [(token, label), (token, label)] to give
(token,label,label).
"""
return pairs[0][0:] + tuple(pair[1] for pair in pairs[1:])
|
f2420d96ea9bc06fbcc8f593913d20ef72ef7f12
| 252,949 |
import logging
def num_beats(data):
""" Counts number of heart beats found
:param data: list of indices where heart-beat events were detected
:return: Number of heart beats
"""
logging.info("Counting Number of Beats...\n")
return len(data)
|
d93e8374e0419e15f526c8ed2ab27915441a76d2
| 268,749 |
def from_binary(bin_data: str, delimiter: str = " ") -> bytes:
"""Converts binary string into bytes object"""
if delimiter == "":
data = [bin_data[i:i+8] for i in range(0, len(bin_data), 8)]
else:
data = bin_data.split(delimiter)
data = [int(byte, 2) for byte in data]
return bytes(data)
|
f16706da2d5b9ae5984a35a13ebd02ae94581153
| 3,567 |
from typing import Optional
from typing import Dict
from typing import Any
def update_draft(
access_key: str,
url: str,
owner: str,
dataset: str,
*,
draft_number: int,
state: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
) -> Dict[str, Any]:
"""Execute the OpenAPI `PATCH /v2/datasets/{owner}/{dataset}/drafts/{draft_number}`.
Arguments:
access_key: User's access key.
url: The URL of the graviti website.
owner: The owner of the dataset.
dataset: Name of the dataset, unique for a user.
draft_number: The updated draft number.
state: The updated draft state which could be "CLOSED" or None.
Where None means no change in state.
title: The draft title.
description: The draft description.
Returns:
The response of OpenAPI.
Examples:
Update the title or description of the draft:
>>> update_draft(
... "ACCESSKEY-********",
... "https://api.graviti.com",
... "MNIST",
... draft_number=2,
... title="draft-3"
... )
{
"number": 2,
"title": "draft-3",
"description": "",
"branch": "main",
"state": "OPEN",
"parent_commit_id": "85c57a7f03804ccc906632248dc8c359",
"creator": "graviti-example",
"created_at": "2021-03-03T18:58:10Z",
"updated_at": "2021-03-04T18:58:10Z"
}
Close the draft:
>>> update_draft(
... "ACCESSKEY-********",
... "https://api.graviti.com",
... "MNIST",
... draft_number=2,
... state="CLOSED"
... )
{
"number": 2,
"title": "draft-3",
"description": "",
"branch": "main",
"state": "CLOSED",
"parent_commit_id": "85c57a7f03804ccc906632248dc8c359",
"creator": "graviti-example",
"created_at": "2021-03-03T18:58:10Z",
"updated_at": "2021-03-05T18:58:10Z"
}
"""
url = f"{url}/v2/datasets/{owner}/{dataset}/drafts/{draft_number}"
patch_data: Dict[str, Any] = {"draft_number": draft_number}
if state:
patch_data["state"] = state
if title is not None:
patch_data["title"] = title
if description is not None:
patch_data["description"] = description
return open_api_do( # type: ignore[no-any-return]
"PATCH", access_key, url, json=patch_data
).json()
|
57566bdf3d953ec52134dcb177b49ed62e955f8b
| 609,044 |
def _cpp_field_name(name):
"""Returns the C++ name for the given field name."""
if name.startswith("$"):
dollar_field_names = {
"$size_in_bits": "IntrinsicSizeInBits",
"$size_in_bytes": "IntrinsicSizeInBytes",
"$max_size_in_bits": "MaxSizeInBits",
"$min_size_in_bits": "MinSizeInBits",
"$max_size_in_bytes": "MaxSizeInBytes",
"$min_size_in_bytes": "MinSizeInBytes",
}
return dollar_field_names[name]
else:
return name
|
accbdfeb2447564607437a58c68137a8716b07c8
| 504,376 |
def collapse_lists(list1,list2,compf,pathf):
"""Function to collapse two lists into a single list based on comparison
and path functions.
:param list1: First list
:param list2: Second list
:param compf: Comparator function to compare values returned by
path function.
:param pathf: Function that takes a list of elements and returns a
value of the same type.
:returns: A list with length = len(list2)
.. note::
* Both lists must have values of the same type 'T'
* Comparison function should take a list of values of type 'T' and
return a value of type 'T'.
* Path function should take a list of values of type 'T' and return a
value of type 'T'.
* The function calculates path totals based on paths from list1 to
list2.
* The difference between lengths of list1 and list2 should be 1
For example: To calculate maximum sum of path values -
path function => sum, comparison function => max::
>>> list1 = [12,37,53,46]
>>> list2 = [23,34,47]
>>> compf = max
>>> pathf = sum
>>> collapse_lists(list1,list2,compf,pathf)
>>> [60, 87, 100]
"""
result = []
len1 = len(list1)
len2 = len(list2)
if (abs(len1-len2) != 1):
pass
elif(list1 == []):
result = list(list2)
elif(list2 == []):
result = list(list1)
else:
small2large = True
if(len1 > len2):
small2large = False
for j in range(len2):
if(small2large):
if(j == 0):
val = pathf([list2[j],list1[0]])
elif(j == len2-1):
val = pathf([list2[j],list1[len1-1]])
else:
pv1 = pathf([list2[j],list1[j-1]])
pv2 = pathf([list2[j],list1[j]])
val = compf([pv1, pv2])
result.append(val)
else:
pv1 = pathf([list2[j],list1[j+1]])
pv2 = pathf([list2[j],list1[j]])
val = compf([pv1, pv2])
result.append(val)
return result
|
ce9db32b8807909d7a59b24940a025a1ebf26b01
| 526,829 |
import math
def get_num_jobs(total_events, batch_size):
"""
Get the number of jobs to use for MC simulation
:param total_events: total number of events to generate
:param batch_size: number of events to generate per job
:return: number of jobs to use
"""
num_jobs = int(math.ceil(total_events / float(batch_size)))
return num_jobs
|
ead6f86f017896f6b81e549d1e84ce3870ca6431
| 129,987 |
def parse_bool(b):
"""Parsers string boolean into boolean."""
if not b: return False
if b.lower() in ('false', 'off', 'no', '0'): return False
return True
|
10b78f7f4afcc658c526e1626456375eb9ccc3c2
| 216,773 |
def typeset_latex_math(variable) -> str:
"""
Returns `variable` as a string with LaTeX math typesetting.
Args:
variable: The python variable to be saved.
Returns:
str: String accoring to LaTeX typesetting.
"""
has_unit = hasattr(variable, 'u')
if has_unit:
if str(variable.u) == 'dimensionless':
suffix = None
else:
suffix = r'\,' + '{:Lx}'.format(variable.u)
value = variable.magnitude
else:
suffix = None
value = variable
has_error = hasattr(value, 'std_dev')
if has_error:
payload = '{:L}'.format(value)
else:
payload = str(value)
if suffix is not None:
if has_error:
payload = '({}){}'.format(payload, suffix)
else:
payload += suffix
return payload
|
c7638ba3818c44f3f1d25d85ee19f2153e361c57
| 115,042 |
def get_panel_groups_at_depth(group, depth=0):
"""Return a list of the panel groups at a certain depth below the node group"""
assert depth >= 0
if depth == 0:
return [group]
else:
assert group.is_group()
return [
p
for gp in group.children()
for p in get_panel_groups_at_depth(gp, depth - 1)
]
|
1bbd8b94f39d738587262995953c4d7cd5f34dbe
| 676,314 |
import torch
def has_tensor(obj) -> bool:
"""
Given a possibly complex data structure,
check if it has any torch.Tensors in it.
"""
if isinstance(obj, torch.Tensor):
return True
elif isinstance(obj, dict):
return any(has_tensor(value) for value in obj.values())
elif isinstance(obj, (list, tuple)):
return any(has_tensor(item) for item in obj)
else:
return False
|
ed70ce8443625ab66d7b36b3aa57c799855b7fc5
| 641,640 |
import torch
def loc2bbox(bbox_a, loc):
""" compute bounding boxes from offsets and scales
Args:
bbox_a: (R, 4) contains (x1, y1, x2, y2)
loc: (R, 4) contains (tx, ty, tw, th)
Returns:
(R, 4)
"""
assert bbox_a.size(1) == loc.size(1) == 4 and bbox_a.size(0) == loc.size(0)
w_a = bbox_a[:, 2] - bbox_a[:, 0]
h_a = bbox_a[:, 3] - bbox_a[:, 1]
ctr_x_a = bbox_a[:, 0] + 0.5 * w_a
ctr_y_a = bbox_a[:, 1] + 0.5 * h_a
w = w_a * torch.exp(loc[:, 2])
h = h_a * torch.exp(loc[:, 3])
ctr_x = w_a * loc[:, 0] + ctr_x_a
ctr_y = h_a * loc[:, 1] + ctr_y_a
bbox = torch.zeros_like(bbox_a)
bbox[:, 0] = ctr_x - 0.5 * w
bbox[:, 1] = ctr_y - 0.5 * h
bbox[:, 2] = ctr_x + 0.5 * w
bbox[:, 3] = ctr_y + 0.5 * h
return bbox
|
9577f8f8358fbef37aeee26ce9a6236da6527927
| 169,086 |
import re
def github_qualify_references(log, repo_userorg, repo_name):
""" Replace "unqualified" GitHub references with "fully qualified" one
GitHub automatically links issues and pull requests if they have a specific
format. Links can be qualified with the user/org name and the repository
name, or unqualified, if they only contain the issue or pull request number.
This function converts all unqualified references to qualified ones.
See https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests
for a documentation of all supported formats.
"""
r = re.compile(r"(^|[^\w])(?:#|[gG][hH]-)(\d+)\b")
repl_str = r'\1%s/%s#\2' % (repo_userorg, repo_name)
return [r.sub(repl_str, l) for l in log]
|
b6b8421ace259c19f0adeba61e1dc3c686a5cb23
| 593,850 |
def convert_character_dict_to_str(character_dict: dict) -> str:
"""
Given a character dict return it as a string that is comparable to how the
set of characters are encoded in a character string.
"""
return (
str(character_dict["Teams"])
.replace("{", "")
.replace("}", "")
.replace("'", "")
.replace(",", ";")
)
|
c046e0e10d251d850982165666fbba224a16e95c
| 230,897 |
def lrstrip(line: str) -> str:
"""
Do lstrip and rstrip on a string
"""
return line.lstrip().rstrip()
|
c32b24384d324a6243509c6b4a472bebbc4d5052
| 167,894 |
from typing import Optional
from typing import Tuple
import re
def _parse_addr(addr: str, allow_wildcard: bool = False) -> Optional[Tuple[str, str]]:
"""Safely parse an email, returning first component and domain."""
m = re.match(
(
r'([a-zA-Z0-9\-_\+\.]+)' +
('?' if allow_wildcard else '') +
r'@([a-zA-Z0-9\-_\+\.]+)$'
),
addr,
)
if not m:
return None
name, domain = m.group(1), m.group(2)
if '.' in domain:
return name, domain
return None
|
ab62e2a99b9ad460163f08997e5c9bd9d6096bac
| 668,704 |
def bar(a, /, b, *, c=1):
"""Calculate c * (a ** b), with c defaulting to one.
`a` is positional-only, `b` is general,
`c` is keyword-only.
"""
return c * (a ** b)
|
38cd34323eb73b55aae1900753432a06c24bc459
| 331,567 |
def combine_cols(cols):
"""Combine column names into one list of names.
Args:
cols (list{str | list{str}}):
A list of names of columns or list of column names.
Return:
list{str}:
Combined names of columns.
"""
combined_cols = []
for col in cols:
if isinstance(col, str):
combined_cols.append(col)
else:
combined_cols += col
return combined_cols
|
3280f6dd317be8ddb60498c386fdf6a7ae5cdedf
| 562,762 |
import decimal
def get_decimal_quantum(precision):
"""Return minimal quantum of a number, as defined by precision."""
assert isinstance(precision, (int, decimal.Decimal))
return decimal.Decimal(10) ** (-precision)
|
551f3171f4c878ce63b1ef673627b6057ffc1e27
| 142,470 |
def remaining_evals(cur_step, epoch, train_steps_per_epoch, evals_per_epoch):
"""Helper function to calculate remaining evaluations for a trainer.
Args:
cur_step: current step of the supervised trainer
epoch: current epoch of the RL trainer
train_steps_per_epoch: supervised trainer steps per RL epoch
evals_per_epoch: supervised trainer evals per RL epoch
Returns:
number of remaining evals to do this epoch
Raises:
ValueError if the provided numbers indicate a step mismatch
"""
if epoch < 1:
raise ValueError('Epoch must be at least 1, got %d' % epoch)
prev_steps = (epoch - 1) * train_steps_per_epoch
done_steps_this_epoch = cur_step - prev_steps
if done_steps_this_epoch < 0:
raise ValueError('Current step (%d) < previously done steps (%d).'
% (cur_step, prev_steps))
train_steps_per_eval = train_steps_per_epoch // evals_per_epoch
if done_steps_this_epoch % train_steps_per_eval != 0:
raise ValueError('Done steps (%d) must divide train steps per eval (%d).'
% (done_steps_this_epoch, train_steps_per_eval))
return evals_per_epoch - (done_steps_this_epoch // train_steps_per_eval)
|
b897af4f78e9a6946326f28392834d2d9e085471
| 671,900 |
import math
def torad(deg):
""" convert degrees to radians """
return math.pi/180.0*deg
|
655fdb5377f93eae1ecc8736f341b3683d41493a
| 89,030 |
def get_trial_params(trial_id, experiment):
"""Get params from trial_id in given experiment"""
best_trial = experiment.get_trial(uid=trial_id)
if not best_trial:
return {}
return best_trial.params
|
33a96478299ce67e4a09f1df861efcbfc37efd67
| 475,624 |
def get_center_of_geometry(atoms):
"""
Computes center of geometry.
Args:
atoms (ase.Atoms): atoms object of molecule
Returns:
center of geometry
"""
return atoms.arrays["positions"].mean(0)
|
a464887cd0753bd02601f08042c537d8cbf317dd
| 381,703 |
def lagrange_four_point(x, y0, y1, y2, y3):
"""The third order polynomial p(x) with p(-1)=y0, p(0)=y1, p(1)=y2, p(2)=y3."""
a2 = 3 * (y0 + y2 - y1 - y1)
a3 = 3 * (y1 - y2) + y3 - y0
a1 = -a3 + 3 * (y2 - y0)
return y1 + x * (a1 + x * (a2 + x * a3)) * 0.166666666666666666666666
|
b60da1f8567c5b9babbc9e158b1444e30424bb1f
| 701,581 |
def lifetime_init(rule='1/e'):
"""Initialize a lifetime object.
Parameters
----------
rule : str {'1/e', 'trapz', 'simpson'}, optional
Name of the method to integrate the correlation curve. \n
'1/e' uses the 1/e rule and assumes an exponential decay. It linearly
interpolates the time when the correlation goes below the value 1/e.
When all values are > 1/e it returns the max lead time.
When all values are < 1/e it returns the min lead time.\n
'trapz' uses the trapezoidal rule for integration.\n
'simpson' uses the Simpson's rule for integration.
Returns
-------
out : dict
The lifetime object.
"""
list_rules = ['trapz', 'simpson', '1/e']
if rule not in list_rules:
raise ValueError("Unknown rule %s for integration.\n" % rule
+ "The available methods are: "
+ str(list_rules))
lifetime = {}
lifetime["lifetime_sum"] = 0.0
lifetime["n"] = 0.0
lifetime["rule"] = rule
return lifetime
|
246bf31672fcd98a7ccd272ec7967c8a5e51817b
| 643,897 |
def make_layout(rows, columns):
"""Create a layout of rooms represented by a set of coordinates."""
locations = set()
for y in range(rows):
for x in range(columns):
locations.add((x, y))
return locations
|
ac70739f019c187d4431fb5e1860f3a7eed23939
| 353,797 |
def get_essential( m ):
""" Get the "essential" leds, along with the associated tri.
Format: 'ess[led] = tri'
An essential LED is an LED that lights a triangle surface, where that LED is the only LED to light that triangle surface.
i.e. without that LED, the given triangle will never be lit.
"""
ess = {}
for k in m:
if len(m[k]) == 1:
led = m[k][0]
ess[led] = k #ess[led] = tri
return ess
|
3cc4570724aa98e6a7d1b2b73cbc45abb53304fd
| 116,572 |
def intersects(box_a, box_b, grace, dynamic_grace):
"""
Checks whether two rectangles intersect or inside in other.
Args:
box_a <tuple> : First rectangle (x, y, w, h)
box_b <tuple> : Second rectangle (x, y, w, h)
grace <list> : Relaxation for intersecting rectangles, [x_grace, y_grace]
dynamic_grace <boolean> : Set True if using relaxation based on rectangle's height otherwise False
Returns:
<Boolean> : True if rectangles intersect or inside in other considering grace values otherwise False
"""
ax1, ay1, aw, ah = box_a
ax2, ay2 = ax1+aw-1, ay1+ah-1
bx1, by1, bw, bh = box_b
bx2, by2 = bx1+bw-1, by1+bh-1
x_grace = y_grace = 0
if dynamic_grace:
x_grace = float(grace[0] * (ah+bh)/2) # min(ah, bh) # note: here height is used instead of width
y_grace = float(grace[1] * min(ah, bh))
x_grace = round(x_grace)
y_grace = round(y_grace)
if ax1-bx2 > x_grace or bx1-ax2 > x_grace or ay1-by2 > y_grace or by1-ay2 > y_grace:
return False
else:
return True
|
2e047459ca8b3d5cc596e53890bdf14e81e262bb
| 307,006 |
def get_children(node):
"""
Return the children of the node. The children are all the elements of the
except the first
:param node: The node
:type node: list
:return: The children of the node
:rtype: list
"""
# Take a slice of the list except the head
return node[1:]
|
d53edbf5f54c9a1e25ec85d412203b31a10ca621
| 291,546 |
def batch(lst, n=5):
"""Yield successive n-sized chunks from list lst
adapted from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
Input
lst: list
n: selected batch size
Return
List: Nested list that contains batches of len(lst)/n lists
"""
batched_list = []
for i in range(0, len(lst), n):
batched_list.append(lst[i:i + n])
return batched_list
|
691ad93cefc4761507113fecb22520f79688d8b9
| 305,726 |
import importlib
def find_class_using_name(module, class_name):
"""
module: example `deepcage.models`
class_name: example `shapes`
"""
# the file "module/class_name.py"
# will be imported.
class_filename = "." + class_name.lower()
modellib = importlib.import_module(class_filename, module)
# In the file, the class called ClassName() will
# be instantiated. It is case-insensitive.
matched_class = None
target_class_name = class_name.replace('_', '')
for name, found_class in modellib.__dict__.items():
if name.lower() == target_class_name.lower():
matched_class = found_class
if matched_class is None:
raise ImportError("In %s.py, there should be a class with a name that matches %s in lowercase." % (class_filename, target_class_name))
return matched_class
|
c0affeae822578c1006bfcd2a48e31cebed9c2d2
| 242,358 |
def _obj2dict(obj):
""" Convert plain object to dictionary (for json dump) """
d = {}
for attr in dir(obj):
if not attr.startswith('__'):
d[attr] = getattr(obj, attr)
return d
|
6eff6f7fa301d100b43601f2675272da5f0f267a
| 379,858 |
import re
def sanitize_filename(filename: str) -> str:
"""
Ensure valid filenames.
Given a filename, remove all characters that are
potentially hazardous in a filename.
The only chars allowed are
- Word characters ([a-zA-Z0-9_])
- Dashes
- Periods
- Spaces
- Parenthesis
Parameters
----------
filename : str
The filename to clean.
Returns
-------
str
The cleaned up filename.
Examples
--------
>>> sanitize_filename('python is fun 🐍.py')
'python is fun _.py'
"""
return re.sub(r'[^\w\-_. ()]', '_', filename)
|
2ac35868be52f5757655ad8311a2e1f00b7c3f36
| 234,727 |
def get_text_between_substrings(input_str, left_sub, right_sub):
"""Returns the text located between two given substrings.
Args:
input_str: String. The text will be extracted from it
left_sub: String. The substring on the left of the text
right_sub: String. The substring on the right of the text
Returns:
String: the text between the substrings (or '' if not found)
"""
res = ""
if left_sub in input_str and right_sub in input_str:
if input_str.find(left_sub) < input_str.find(right_sub):
right_part = input_str.split(left_sub)[1]
res = right_part.split(right_sub)[0].strip()
return res
|
c5e10abc6767eac28f72ab8cb49b6349cfd0b4c7
| 550,831 |
def raw(text):
"""
Just return the message in the same state that it was submitted.
"""
return text
|
f231d5211582c665a1f591a830c4ef98432bb364
| 523,658 |
import torch
def make_valid_from_train(dataset, cut=0.9):
"""
Split training data to get validation set
:param dataset: Training dataset
:param cut: Percentage of dataset to be kept for training purpose
"""
tr_ds, val_ds = [], []
for task_ds in dataset:
x_t, y_t = task_ds
# shuffle before splitting
perm = torch.randperm(len(x_t))
x_t, y_t = x_t[perm], y_t[perm]
split = int(len(x_t) * cut)
x_tr, y_tr = x_t[:split], y_t[:split]
x_val, y_val = x_t[split:], y_t[split:]
tr_ds += [(x_tr, y_tr)]
val_ds += [(x_val, y_val)]
return tr_ds, val_ds
|
3f6fa8fe97af132e9dd575df9416145874717f4a
| 668,211 |
def stream_word_matches_symbol(stream, word_number, *, symbol):
""" Returns an nMigen conditional that evaluates true if the given word of a stream matches the given symbol. """
return (
stream.valid &
(stream.data.word_select(word_number, 8) == symbol.value) &
(stream.ctrl[word_number] == symbol.ctrl)
)
|
b73473810710ee3e0fcc6738d0fcff88d8b06133
| 325,171 |
import socket
def get_addr_info(addr):
"""Use getaddrinfo to lookup all addresses for each address.
Returns a list of tuples or an empty list:
[(family, address)]
"""
results = set()
try:
tmp = socket.getaddrinfo(addr, 'www')
except socket.gaierror:
return []
for el in tmp:
results.add((el[0], el[4][0]))
return results
|
9d8e1ca9070bf4f036f83e038e28d3c12e2fa6fc
| 156,667 |
def str_to_int_v2(s: str) -> int:
"""Convert the given string into it's integer form.
Args:
s: The string whose integer form to be gotten.
Returns:
The integer form of the given string.
"""
res = 0
initial_point = 0 if s[0] is not '-' else 1
for i in range(initial_point, len(s)):
cur_digit = ord(s[i]) - ord('0')
res = res * 10 + cur_digit
return (res if not initial_point else -res)
|
3c2ad54bb05b9e631299bcf68c52595a645f221c
| 524,263 |
def is_closed(instance, *args, **kwargs):
"""
Helper function to test if some instance is closed (to be used with
``raise_if`` decorator)
:param instance: some instance with ``is_closed`` property accessible
:returns: value of ``instance.is_closed`` or False
"""
return getattr(instance, 'is_closed', False)
|
307178eacc07a39743243fd693af414b40619c74
| 641,590 |
def _is_fft_sgn_negative(sicd, dimension):
"""
Check if the sicd structure has negative fft sign along the given dimension.
Parameters
----------
sicd : SICDType
dimension : int
Returns
-------
bool
"""
if dimension == 0:
if sicd.Grid is None or sicd.Grid.Row is None or sicd.Grid.Row.Sgn is None:
return True
return sicd.Grid.Row.Sgn == -1
else:
if sicd.Grid is None or sicd.Grid.Col is None or sicd.Grid.Col.Sgn is None:
return True
return sicd.Grid.Col.Sgn == -1
|
4414836fc6b9d6ab355c3b478776b6802d8e6154
| 413,798 |
def findall_text(node, path):
"""Find all n.text elements from a path.
"""
return [n.text for n in node.findall(path)]
|
2a40b2442e50e58a64320539153e27acfde15a8f
| 49,533 |
import yaml
def load_yaml(filename):
"""
Load a generic YAML file
"""
with open(filename) as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
return data
|
75279c4e993f8e7c20de2c5c2171393feba2ca34
| 402,055 |
def num_physical_imei_shards(conn):
"""
Helper function to return the number of physical shards for IMEI-shared partitions.
Arguments:
conn: dirbs db connection object
Returns:
number of physical imei shard in the schema
"""
with conn.cursor() as cursor:
cursor.execute('SELECT phys_shards FROM schema_metadata')
return cursor.fetchone()[0]
|
ef31d9bff061c006c636840c387b29ed81d96fea
| 89,323 |
def getSortOnName(prefix=None):
"""convert the table prefix to the 'sort on' name used in forms"""
# useful for code that wants to get the sort on values themselves
sort_on_name = 'sort_on'
if prefix is not None:
if not prefix.endswith('.'):
prefix += '.'
sort_on_name = prefix + sort_on_name
return sort_on_name
|
e8a68d283cb189abab2784a1cc198255133e4c1d
| 97,496 |
def from_key_to_line_number(key):
"""
Takes a key and returns the line number in it
:param key: The key to parse
:return: line number
"""
n = key.split(".", 2)[1]
# Sometimes line number contains a redundant "l" at the end ("Q005624.1l" for example), so we ignore it.
if n[-1] == "l":
n = n[:-1]
if not n.isdigit():
return -1
line_number = int(n)
return line_number
|
a9876d9779284e1c7882cd3ac669da294e835919
| 90,293 |
from typing import Iterable
from functools import reduce
import operator
def prod(iterable: Iterable) -> float:
"""Returns the product of the elements of an iterable."""
return reduce(operator.mul, iterable, 1)
|
5bb7d725003a3b696d3aef45377e6d682a4be022
| 660,647 |
def trunc(s, n):
"""
Truncate a string to N characters, appending '...' if truncated.
trunc('1234567890', 10) -> '1234567890'
trunc('12345678901', 10) -> '1234567890...'
"""
if not s:
return s
return s[:n] + "..." if len(s) > n else s
|
fab4565ce4973e8374e842c7d3a469b34b698f33
| 445,815 |
from typing import List
from typing import Any
def _get_avg(values: List[Any]):
"""Get average value of a list.
Args:
values: A list of values.
Returns:
The average value in the list.
"""
return sum(values) / len(values)
|
6f22e60ecf6af15d5e0b885925b913673b33ee1d
| 576,606 |
def _seconds_to_hours(time):
"""Convert time: seconds to hours"""
return time / 3600.0
|
d6abd9144882587833601e64d5c2226446f1bbdc
| 705,526 |
def get_unit_concept_ids(df, measurement_concept_id=None):
"""
Retrieve a unique set of unit concept ids for a given df
:param df: dataframe
:param measurement_concept_id: an option measurement_concept_id
:return: a unique set of unit_concept_ids
"""
unit_concept_ids = []
if measurement_concept_id is None:
unit_concept_ids = df['unit_concept_id'].unique()
else:
unit_concept_ids = df.loc[df['measurement_concept_id'] == measurement_concept_id, 'unit_concept_id'].unique()
return unit_concept_ids
|
c9827ba15b6491544887bbd3c7c04a3493f644c6
| 322,127 |
def adjustrow(row):
"""
Convert a grid row to a list-table row.
:param row: a row of grid table text
:type row: str
:return: a row of list-table text
:rtype: str
"""
if row.startswith('+') is True:
return('\n')
row = row.split('|')
new_row = []
for entry in row:
new_row.append(entry)
try:
new_row.pop(new_row.index(''))
except:
pass
convert = []
convert.append(' * - ' + new_row[0].strip())
for entry in new_row[1:]:
convert.append(('\n - ' + entry.strip()).rstrip())
result = ''.join(convert)
return(result)
|
c27685fa5498d2943dc07be6f9cafaf83eac0056
| 448,678 |
def unflatten_dict(flat_dict):
"""Convert a flattened dict to a nested dict.
Inverse of flatten_config.
Args:
flat_dict: A dictionary to unflatten.
Returns:
A dictionary with all keys containing `.` split into nested dicts.
{'a.b.c': 1} --> {'a': {'b': {'c': 1}}}
"""
result = {}
for key, val in flat_dict.iteritems():
parts = key.split('.')
cur = result
for part in parts[:-1]:
if part not in cur:
cur[part] = {}
cur = cur[part]
cur[parts[-1]] = val
return result
|
de01c934239eb29f33fe6ad8846cd828cabea61d
| 64,192 |
def cov_parse_rms_unit_of_weight(line: str) -> float:
"""
Læs 'RMS unit of weight' fra COV-fil.
Værdien find filens linje 7, der ser ud i stil med:
'RMS OF UNIT WEIGHT: 0.0010 # OBS: 328817 # UNKNOWNS: 5424'
"""
return float(line[21:27])
|
ff339cba1ac2ea01802300ba2699ce9d994c92be
| 125,315 |
def accuracy_score(truth, pred):
""" Returns accuracy score for input truth and predictions. """
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
|
5dabafdf88bee663afecbd9170e390055122322d
| 203,570 |
def snake_to_text(x: str) -> str:
"""Convert snake case to regular text, with each word capitalized."""
return " ".join([w.capitalize() for w in x.split("_")])
|
2aa2d3af53c0ffac43971d51e174c24f0566a6a4
| 566,880 |
def _connector_classes_str_to_dict(classes_conf_str):
"""
Input: 'foo:bar, baz'
Result: {'foo': 'bar', 'baz': 'baz'}
"""
if len(classes_conf_str) == 0:
return {}
classes_conf_dict = {}
for cc in classes_conf_str.split(','):
cc_t = cc.strip().split(':')
if len(cc_t) == 1:
classes_conf_dict[cc_t[0]] = cc_t[0]
else:
classes_conf_dict[cc_t[0]] = cc_t[1]
return classes_conf_dict
|
8a73c01b2ea74a463f6a8128466f5ea740a1b9c7
| 199,553 |
import mimetypes
def check_gzip_path(file_path):
"""Check if we have a gzipped file path"""
_, ftype = mimetypes.guess_type(file_path)
return ftype == 'gzip'
|
70da135199f87f0c318a2ff9ca610f60f081a860
| 431,858 |
def compute_target(difficulty_target_bits):
"""
Calculate a target hash given a difficulty.
"""
return 2 ** (256 - difficulty_target_bits)
|
65f4a5c9e01acf5f829edc949154171b6de96c19
| 26,796 |
def to_bytes(maybe_bytestring):
"""
Encode string to bytes.
Convenience function to do a simple encode('utf-8') if the input is not
already bytes. Returns the data unmodified if the input is bytes.
"""
if isinstance(maybe_bytestring, bytes):
return maybe_bytestring
else:
return maybe_bytestring.encode('utf-8')
|
8cb0db75d17c59a69dbc789534246f68f6dcdfe7
| 314,848 |
def get_all_descendants(root, children_map):
"""
Returns all descendants in the tree of a given root node, recursively
visiting them based on the map from parents to children.
"""
return {root}.union(
*[get_all_descendants(child, children_map)
for child in children_map.get(root, [])]
)
|
8a6485c25f05a572e97ec88b35223c0171a90128
| 673,415 |
def covariance(x, mean_x, y, mean_y):
"""
This function calculates the co-variance of x and y
Args:
x (np array): array
mean_x (float): mean of x array
y (np array): array
mean_y (float): mean of y array
Returns:
float: co-variance
"""
covar = 0.0
for i in range(x.shape[1]):
covar += (x[0][i]-mean_x) * (y[0][i]-mean_y)
return covar
|
50037ef544e19cc9fa64dd922174e3b48312a4ca
| 436,393 |
def stringify_tokens(tokens):
"""
Given a list or set of tokens, return them as a single string
"""
return ''.join(tokens)
|
357a8e3c99d87bbb31b60dd4c973de33f800e7ef
| 416,224 |
from typing import List
def remove_duplicates(tag_list: List[dict]) -> List[dict]:
"""Remove duoplicate elements from `tag_list`"""
no_duplicates = []
for i in tag_list:
if i not in no_duplicates:
no_duplicates.append(i)
return no_duplicates
|
9ed038daa66f703858207637a0a29a0bbb1f5ba2
| 485,618 |
import torch
def mape(target, predictions:list, total = True):
"""
Calculate root mean absolute error (mean absolute percentage error in %)
Parameters
----------
target : torch.Tensor
true values of the target variable
predictions : list
- predictions[0] = predicted expected values of the target variable (torch.Tensor)
total : bool, default = True
Used in other loss functions to specify whether to return overall loss or loss over
the horizon. This function only supports the former.
Returns
-------
torch.Tensor
A scalar with the mean absolute percentage error in % (lower the better)
Raises
------
NotImplementedError
When 'total' is set to False, as MAPE does not support loss over the horizon
"""
if not total:
raise NotImplementedError("MAPE does not support loss over the horizon")
return torch.mean(torch.abs((target - predictions[0]) / target)) * 100
|
79428dd9e8dc1edea65f6be44ab08d56af10366b
| 368,393 |
import math
def calculate_probability(x, mean, stdev):
"""
Calculates the probability that a given tier is a word or phone
Parameters
----------
x : float
duration of the object in question
mean : float
mean duration of that type of object
stdev : float
standard deviation from mean
"""
exponent = math.exp(-(math.pow(x - mean, 2) / (2 * math.pow(stdev, 2))))
return (1 / (math.sqrt(2 * math.pi) * stdev)) * exponent
|
ad229914f73adb5c8af1661c0cf0a71451404792
| 306,658 |
def _get_column_index(i, inputs):
"""
Taken from https://github.com/onnx/sklearn-onnx/blob/9939c089a467676f4ffe9f3cb91098c4841f89d8/skl2onnx/common/utils.py#L50.
Returns a tuples (variable index, column index in that variable).
The function has two different behaviours, one when *i* (column index)
is an integer, another one when *i* is a string (column name).
If *i* is a string, the function looks for input name with this name and returns (index, 0).
If *i* is an integer, let's assume first we have two inputs
*I0 = FloatTensorType([None, 2])* and *I1 = FloatTensorType([None, 3])*,
in this case, here are the results:
::
get_column_index(0, inputs) -> (0, 0)
get_column_index(1, inputs) -> (0, 1)
get_column_index(2, inputs) -> (1, 0)
get_column_index(3, inputs) -> (1, 1)
get_column_index(4, inputs) -> (1, 2)
"""
if isinstance(i, int):
if i == 0:
# Useful shortcut, skips the case when end is None
# (unknown dimension)
return 0, 0
vi = 0
return (vi, i)
else:
raise RuntimeError("Hummingbird currently support only int columns, {} is not supported.".format(i))
|
9a46a6314777ef1538a7056f998110d615abcd8c
| 586,279 |
def invertDictMapping(d):
""" Invert mapping of dictionary (i.e. map values to list of keys) """
inv_map = {}
for k, v in d.iteritems():
inv_map[v] = inv_map.get(v, [])
inv_map[v].append(k)
return inv_map
|
1fb43ece9a3e45724ba46a8d55469b95848f426b
| 195,302 |
def spaces(text):
"""Returns whitespace equal to the length of the given text.
This is useful for making things line up.
"""
return ' ' * len(text)
|
ba9ecdcaab19884521fbd297d7afc500e667a277
| 112,280 |
def dict_key_checker(current_dict, current_key):
"""
Function to check if a dictionary contains a key.
Parameters:
current_dict (dict): The dictionary.
current_key (str): They key.
Returns:
True if the dictionary contains the key. False otherwise.
"""
if current_key in current_dict.keys():
return True
else:
return False
|
029e9a8640326e0001b9aac85f4a261de25ea96b
| 374,810 |
def char_tokenizer(string):
"""
Splits a string into individual character symbols.
Args:
string (string): The string to be split into characters.
Returns:
list: The characters of the string.
"""
return list(string)
|
2694f670bf862321e42768d63970f04b76782f07
| 72,580 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.