content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import re
def remove_underlines(word):
"""Removes both underlines and the occasional grammar mark from words"""
return re.sub("/.*$", "", word).replace("_", " ")
|
6d3f56df2d97953e27957587f4537aafc1846998
| 74,165 |
import re
def _split_regexp(restr):
"""Return a 2-tuple consisting of a compiled regular expression
object and a boolean flag indicating if that object should be
interpreted inversely."""
if restr[0] == "!":
return re.compile(restr[1:]), 1
return re.compile(restr), 0
|
48b8bb22429c09fa93fe1d70d96a9388a45a61ee
| 64,789 |
def _try_format_numeric(text):
"""remove leading/trailing zeros, leading "+", etc. from numbers. Non numeric values are left untouched."""
try:
numeric = float(text)
if int(numeric) == numeric: # remove trailing .0
numeric = int(numeric)
text = str(numeric)
except ValueError:
pass
return text
|
c21760fccf6e4f340a5f47ec79af925508df9b84
| 103,687 |
def parse_content(content):
"""
Method to parse data into a hash map,
where key is a word (str) and val it's number representation (int)
"""
items = content.split("\n")
words = {item.split()[0]: int(item.split()[1]) for item in items}
return words
|
2c5b8ee55502c5313dffb8db7ae4a4f00cfedb78
| 529,969 |
import json
def create_label_json(path, labels=[], start_time=None):
""" Write a json file from a list of labels
Parameters
----------
path: str
Path of the future json file.
labels: list
List of labels, each label is a dictionary with keys 'id', 'start', 'end' and 'annotation'
start_time: float
Start time of audio extract in seconds
"""
data_dict = []
for label in labels:
data_dict.append({
'id': 'none',
'start': label['start'] - start_time,
'end': label['end'] - start_time,
'annotation': 'bird'
})
with open(path, 'w') as outfile:
json.dump(data_dict, outfile)
return None
|
4a5892d4831e248ac091683134f4c7b813612d3c
| 420,381 |
def read_header_and_channels(filename, chtrig):
"""
Reads a txt file with a header and channels and separates them
Parameters
----------
filename: str
path to the txt Labchart file
chtrig : int
index of trigger channel
Returns
-------
header: list
header lines
channel_list:list
channel lines in list
"""
header = []
channel_list = []
with open(filename, 'r') as f:
for line in f:
line = line.rstrip('\n').split('\t')
while line[-1] == '':
line.remove('') # sometimes there is an extra space
for item in line:
if '#' == item[0]: # detecting comments
line.remove(item)
if line[-1] == '':
line.remove('')
try:
float(line[0])
except ValueError:
header.append(line)
continue
line = [float(i) for i in line]
channel_list.append(line)
return header, channel_list
|
649d252709cd036264cb9a93abebe5bcf2b1d6b2
| 665,102 |
def merge_dict(base, patch):
"""Recursively merge `patch` into `base`
If a key exists in both `base` and `patch`, then:
- if the values are dicts, they are merged recursively
- if the values are lists, the value from `patch` is used,
but if the string `'+merge'` occurs in the list, it is replaced
with the value from `base`.
"""
result = dict(base)
for key, value in patch.items():
if key not in result:
result[key] = value
continue
previous = base[key]
if isinstance(value, dict):
result[key] = merge_dict(previous, value)
elif isinstance(value, list):
result[key] = new = []
for item in value:
if item == '+merge':
new.extend(previous)
else:
new.append(item)
else:
result[key] = value
return result
|
ac8814741a95517fe2b4f7bfdcdf0ee975322dfd
| 355,607 |
import hashlib
def gimmeh_sha1(buffer):
"""
Returns a SHA1 for a buffer
"""
sha1 = hashlib.sha1()
sha1.update(buffer)
return sha1.hexdigest()
|
c385bf753eef67080c2242eb3fb8205ad23eb8ed
| 348,854 |
def permute_unique_i(nums: list[int]) -> list[list[int]]:
"""Compute all the *unique* permutations of the elements in a given input array
Args:
nums: array of possibly non-distinct elements
Returns: all *unique* permutations of elements in `nums`
Examples:
>>> sorted(permute_unique_i([1,1,2]))
[[1, 1, 2], [1, 2, 1], [2, 1, 1]]
>>> sorted(permute_unique_i([1,2,1,1]))
[[1, 1, 1, 2], [1, 1, 2, 1], [1, 2, 1, 1], [2, 1, 1, 1]]
>>> sorted(permute_unique_i([1,2,3]))
[[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1]]
"""
"""ALGORITHM"""
def get_last_valid_insertion_idx(perm, item): # nosemgrep
"""Restrict insertion past the first occurrence of a duplicate value (if one exists)"""
# equivalent to `(perm + [item]).index(item)`
try:
return perm.index(item)
except ValueError:
return len(perm)
# DS's/res
uniq_perms = [[]]
# Build unique permutations
# increasing the permutation size by 1 at each iteration
for curr_num in nums:
np = []
for perm in uniq_perms:
for insertion_idx in range(
get_last_valid_insertion_idx(perm, curr_num) + 1
):
new_perm = perm.copy()
new_perm.insert(insertion_idx, curr_num)
np.append(new_perm)
uniq_perms = np
return uniq_perms
|
9ba00cdd30c818a18bfa7c637b515ba404551dc8
| 454,647 |
def translate(array, del_x, del_y):
"""
Translates a numpy array
Args:
array: array to be translated
del_x: the change in x
del_y: the change in y
Returns:
Translated array
"""
c = array.copy()
c[:, 0] += del_x
c[:, 1] += del_y
return c
|
8e2f2a3bf3d8e8103e4228a310be50b76b12e064
| 282,215 |
import torch
def add_coord_channels(image_tensor: torch.Tensor, device: str = 'cpu') -> torch.Tensor:
""" Adds channels containing pixel indices (x and y coordinates) to an image. """
B, C, H, W = image_tensor.shape
x_grid = torch.linspace(start=-1.0, end=1.0, steps=H).view((1, 1, H, 1)).to(device)
x_map = torch.tile(x_grid, (B, 1, 1, W)).to(device)
y_grid = torch.linspace(start=1.0, end=-1.0, steps=W).view((1, 1, 1, W)).to(device)
y_map = torch.tile(y_grid, (B, 1, H, 1)).to(device)
return torch.cat([image_tensor, x_map, y_map], dim=1).to(device)
|
6b7ff0332464dc959980b44aa70a7afad0a453b7
| 375,767 |
import yaml
def read_yml_config(path):
"""Read YAML formatted configuration file.
Args:
path (str): path to configuration file to read
Returns:
dict: machines with attributes
"""
with open(path, 'rt') as f:
data = yaml.load(f.read(), Loader=yaml.Loader)
# Machines are entries with a 'mac' attribute
data = {machine: desc for machine, desc in data.items() if 'mac' in desc}
return data
|
9e0cc7223f6eb196a21c950c8b4e68e496d88150
| 416,629 |
def calc_g(r, aa):
"""
DESCRIPTION TBD (FUNCTION USED IN GENERAL CONSTANTS).
Parameters:
r (float): radius
aa (float): spin parameter (0, 1)
Returns:
g (float)
"""
return 2 * aa * r
|
d4dd9918ae3ec3912e74dfbd314afa858179005b
| 328,102 |
import socket
def connect(url, port):
""" Creates and returns a socket that is connected to the specified URL.
This socket should be used for only ONE transfer (one send & one receive).
Parameters
----------
url : string
the URL that a socket will attempt to bind to
port : int
the port that the socket will attempt to bind to
Returns
-------
clientSocket : socket, None
the socket that is binded to the URL, or None if it could not be created
"""
try:
client_socket = socket.create_connection((url,port),timeout=10)
return client_socket
except Exception as err:
return None
|
896e9d67625e9672a1b0c3d42072ef76740e4f50
| 203,665 |
import re
def _remove_curly_braces(text):
"""Remove everything in curly braces.
Curly braces may be nested, so we keep track of depth.
Args:
text: a string
Returns:
a string
"""
current_pos = 0
depth = 0
ret = ""
for match in re.finditer("[{}]", text):
if depth == 0:
ret += text[current_pos:match.start()]
depth += 1 if text[match.start()] == "{" else -1
current_pos = match.end()
if depth != 0:
# Many articles have mismatched braces, but it still seems better to remove
# them than not.
pass
else:
ret += text[current_pos:]
return ret
|
aa7f42718b29f892be7e74cf5318720582e70f9f
| 178,741 |
def get_condition_immunities(monster_data) -> str:
"""Returns a string list of conditions to which the monster is
immune.
"""
return ", ".join([imm['index'] for imm in monster_data['condition_immunities']])
|
3fa61d06e706046874d6cedcd9b7d53aaa8529c8
| 406,404 |
def chunker(seq, size):
"""
originally from
https://stackoverflow.com/questions/434287/what-is-the-most-pythonic-way-to-iterate-over-a-list-in-chunks
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
|
6c412621a2fadf91df2e659eb700b5552e6972f3
| 271,281 |
import inspect
def get_valid_instance_from_class(obj_or_class, valid_class_types=None, *instance_args, **instance_kwargs):
"""
2-Step function to instantiate/validate a given parameter.
First step: check if `obj_or_class` is a class or an instance.
If it is a class, create a new instance of that class.
Second step: (optional) validate if the instance is of type `valid_class_types`.
:param obj_or_class:
:param valid_class_types: list or tuple of valid class types.
:param instance_args:
:param instance_kwargs:
:raise ValueError: if valid_class_types is given and isinstance fails.
:return: instance of type `obj_or_class`/`obj_or_class.__class__`
"""
if obj_or_class is None:
raise ValueError("`obj_or_class` may not be None.")
# check if we already have an instance
if inspect.isclass(obj_or_class):
instance = obj_or_class(*instance_args, **instance_kwargs)
else:
instance = obj_or_class
# check if we need to validate for specific classes
if valid_class_types and not isinstance(instance, valid_class_types):
raise ValueError("%s is not of of type %s" % (instance, valid_class_types))
return instance
|
288e8d7fbb3964f2f344ad146379271e9f9e2b2c
| 569,955 |
def _get_instance_list(mig, field='name', filter_list=None):
"""
Helper to grab field from instances response.
:param mig: Managed Instance Group Object from libcloud.
:type mig: :class: `GCEInstanceGroupManager`
:param field: Field name in list_managed_instances response. Defaults
to 'name'.
:type field: ``str``
:param filter_list: list of 'currentAction' strings to filter on. Only
items that match a currentAction in this list will
be returned. Default is "['NONE']".
:type filter_list: ``list`` of ``str``
:return: List of strings from list_managed_instances response.
:rtype: ``list``
"""
filter_list = ['NONE'] if filter_list is None else filter_list
return [x[field] for x in mig.list_managed_instances()
if x['currentAction'] in filter_list]
|
efcb7c948583e7433ff30030cd934903f9953632
| 48,133 |
from typing import Tuple
def list_to_sql_string(lst: Tuple[str]) -> str:
"""turns python lists into a string that can be put into a postgres tuple and serve as a list in postgres
:param lst: a list of strings like ``['pg_catalog','information_schema']``
:return: a string of items in SQL format ``"'pg_catalog','information_schema','hi'"``
"""
return "'" + "','".join(lst) + "'"
|
e44443e3b814a506c50810b245da088cda132f9c
| 178,104 |
def get_package_name(manifest_tree):
"""Analyze manifest to see package name of app."""
root = manifest_tree.getroot()
return root.attrib['package']
|
f50105968bffa33c7293c8abacd06210ec3c7e6a
| 288,242 |
def _vpres(T):
""" Polynomial approximation of saturated water vapour pressure as
a function of temperature.
Parameters
----------
T : float
Ambient temperature, in Kelvin
Returns
-------
float
Saturated water vapor pressure expressed in mb
See Also
--------
es
"""
# Coefficients for vapor pressure approximation
A = [
6.107799610e0,
4.436518521e-1,
1.428945805e-2,
2.650648471e-4,
3.031240396e-6,
2.034080948e-8,
6.136820929e-11,
]
T -= 273 # Convert from Kelvin to C
vp = A[-1] * T
for ai in reversed(A[1:-1]):
vp = (vp + ai) * T
vp += A[0]
return vp
|
d26e3b3d6220b14fd2d05a444f9c51b10a08ab48
| 481,736 |
def xyz2lab(X, Y, Z, th=0.008856):
"""transform XYZ tristimulus arrays to Lab values
Parameters
----------
X : np.array, size=(m,n)
Y : np.array, size=(m,n)
Z : np.array, size=(m,n)
Returns
-------
L : np.array, size=(m,n)
a : np.array, size=(m,n)
b : np.array, size=(m,n)
See also
--------
rgb2xyz, xyz2lms, lms2lch
Notes
-----
.. [1] Ford & Roberts. "Color space conversion", pp. 1--31, 1998.
.. [2] Silva et al. "Near real-time shadow detection and removal in aerial
motion imagery application" ISPRS journal of photogrammetry and remote
sensing, vol.140 pp.104--121, 2018.
"""
Xn,Yn,Zn = 95.047, 100.00, 108.883 # D65 illuminant
YYn = Y/Yn
L_1 = 116* YYn**(1/3.)
L_2 = 903.3 * YYn
L = L_1
L[YYn<=th] = L_2[YYn<=th]
def f(tau, th):
fx = X**(1/3.)
fx[X<=th] = 7.787*X[X<th] + 16/116
return fx
a = 500*( f(X/Xn, th) - f(Z/Zn, th) )
b = 200*( f(Y/Yn, th) - f(Z/Zn, th) )
return L, a, b
|
217031277139873120c7c25d59c8be2730c481a9
| 189,948 |
def dedent_initial(s, n=4):
# type: (str, int) -> str
"""Remove identation from first line of text."""
return s[n:] if s[:n] == ' ' * n else s
|
cf09c9aa846b2aeb3278401f921d00912b90bb1f
| 42,386 |
import re
def safe_subn(pattern, repl, target, *args, **kwargs):
"""
There are Unicode conversion problems with re.subn. We try to smooth
that over by casting the pattern and replacement to strings. We really
need a better solution that is aware of the actual content ecoding.
"""
return re.subn(str(pattern), str(repl), target, *args, **kwargs)
|
83cb77c93bf4589a14ed87f308250f0ebb2430f3
| 539,404 |
def KSA(key: bytes) -> list:
"""
Key Scheduling algorithm
is the first process in rc4. It
returns an 'extended' key which
has a higher enthropy then the
simple concatenation.
params:
key: IV + Wi-Fi password
returns:
list of bytes, 'extended' key
"""
table = list(range(256))
y = 0
for x in range(256):
y = (y + table[x] + key[x % len(key)]) % 256
table[x], table[y] = table[y], table[x]
return table
|
027bd1f2e8577929c8f82fa1a48c8ba57d9fa63b
| 123,536 |
def create_deets_message(time, size, image):
"""Creates message of image details for the GUI client
Image details returned include the time the image was
uploaded or processed and the image size in pixels. If
the image was original, the upload time is returned. If
the image was inverted, the processed time is returned.
Args:
time (str): timestamp of upload/processing
size (tuple): width, height of image in pixels
image (str): name of image
Returns:
str: message to be shown to user
"""
if "inverted" in image:
time_type = "processed"
else:
time_type = "uploaded"
width, height = size
deets_message = "Time {}: {}\n" \
"Image size: {} x {}" \
.format(time_type, time, width, height)
return deets_message
|
f9989d85f1cdd10df7901c2cfd879fe25fc6cf39
| 683,233 |
import time
def random_time_range(start, end, file_format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formated in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, file_format))
etime = time.mktime(time.strptime(end, file_format))
ptime = stime + prop * (etime - stime)
return time.strftime(file_format, time.localtime(ptime))
|
40d8e84b2d2c44d932967430a7e585f287a1ac9b
| 623,421 |
import requests
def get_page(url: str) -> str:
"""Gets the html content of a web page
"""
req = requests.get(url)
return req.text
|
7ccc113b546a49dd9db3164c76116be32ec4f9ac
| 362,930 |
import re
def cfg_to_group(cfg, return_list=False):
"""Return a wandb-safe group name for logging. Optionally returns group name as list."""
lst = [cfg.task, cfg.modality, re.sub('[^0-9a-zA-Z]+', '-', cfg.exp_name)]
return lst if return_list else '-'.join(lst)
|
39e551ac14e0d6f2eaa15054cda21230fe2b4e32
| 511,445 |
from typing import List
def get_union_args(tp) -> List:
""" Return a list of typing.Union args. """
return list(tp.__args__)
|
49614904ca7eef7ad2ccc11bd92b14f2e120b593
| 591,654 |
def parse_time(timestring, type_of=int):
"""
Given a time string will return a tuple of ints,
i.e. "09:44" returns [9, 44] with the default args,
you can pass any function to the type argument.
:param timestring: String such as '09:44'
:param type_of: A type which the split string should be converted to,
suitable types are: :class:`int`, :class:`str` and
:class:`float`.
"""
return map(type_of, timestring.split(":"))
|
b3c33e67b962538affc431a91cd43cc2a62f3d44
| 262,283 |
def _remove_namespace(subject: str) -> str:
"""If a namespace is present in the subject string, remove it."""
return subject.rsplit('}').pop()
|
084894a5a7501f631d62130d822ec2618a23e5ec
| 315,572 |
import torch
def reconstruction_loss(x, x_hat, *args, **kwargs):
"""
Returns the binary cross entropy loss
Parameters
----------
x : Tensor
the autoencoder input tensor
x_hat : Tensor
the autoencoder output tensor
Returns
-------
Tensor
the (1,) loss tensor
"""
return torch.nn.BCELoss()(x_hat, x)
|
ca1bb562ed11262e4ebdb5a45d1ca5669c828b1d
| 600,183 |
def filter_before(df, date_):
"""Return df excluding rows where index dates are before input date_"""
try:
return df[df.index >= date_]
except (AttributeError, TypeError):
return df
|
2c4bfcd95e64e558f977c206e9ebe62d72114783
| 578,861 |
def _get_group(username: str) -> str:
"""
Get the group from a username
Usernames are in the format <initials>-<group>. The group names are
therefore extracted by splitting the usernames about the hyphen.
"""
return username.split("-")[-1]
|
db43503520979487793b4a0cb71094d3214b103d
| 397,350 |
def declaration_path(decl, with_defaults=True):
"""
Returns a list of parent declarations names.
:param decl: declaration for which declaration path should be calculated
:type decl: :class:`declaration_t`
:rtype: [names], where first item contains top parent name and last item
contains the `decl` name
"""
if not decl:
return []
if not decl.cache.declaration_path:
result = [decl.name]
parent = decl.parent
while parent:
if parent.cache.declaration_path:
result.reverse()
decl.cache.declaration_path = parent.cache.declaration_path + \
result
return decl.cache.declaration_path
else:
result.append(parent.name)
parent = parent.parent
result.reverse()
decl.cache.declaration_path = result
return result
else:
return decl.cache.declaration_path
|
f1b9eeb4148b23d185fe016319206f58f5cda3e9
| 480,319 |
import math
def haversine(lat1, lon1, lat2, lon2):
"""
compute the distance in meters between two points in latlon
Parameters
----------
lat1: int or float
the latitude of point 1
lon1: int or float
the longitude of point 1
lat2: int or float
the latitude of point 2
lon2: int or float
the longitude of point 2
Returns
-------
float
the distance between point 1 and point2 in meters
"""
radius = 6371000
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
a = math.sin((lat2-lat1)/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin((lon2-lon1)/2)**2
c = 2 * math.asin(math.sqrt(a))
return radius * c
|
38e27ed24615f5d099b79eecfbd1e918f4e1101c
| 488,165 |
def _transpose2Dlist(mat):
"""transpose a matrix that is constructed as a list of lists in pure
Python
The inner lists represents rows.
Parameters
----------
mat : list of lists (2-d list)
the 2D list represented as
| [[a_00, a_01, ..., a_0n],
| [a_10, a_11, ..., a_1n],
| ... ,
| [a_m0, a_m1, ..., a_mn]]
Returns
-------
matT : list of lists (2-d list)
transposed of ``mat``
Notes
-----
The function assumes that all the inner lists are of the same lengths.
It doesn't do any error checking.
"""
cols = len(mat[0])
matT = []
for i in range(cols):
matT.append([row[i] for row in mat])
return matT
|
55cccdeb5f061f96900e680c7e30558a71278bc2
| 224,547 |
import re
def regex_cnt(regex, text):
"""
simple way to get the number of occurence of a regex
Parameters
----------
regex: str, regex of an interested pattern
text: str
Return
------
count of occurence: int
"""
return len(re.findall(regex, text))
|
47893cea6dfc6f25133b069a125090d5f6dc1091
| 557,179 |
def edge_subgraph(G, edges):
"""Returns a independent deep copy subgraph induced by the specified edges.
The induced subgraph contains each edge in `edges` and each
node incident to any of those edges.
Parameters
----------
G : NetworkX Graph
edges : iterable
An iterable of edges. Edges not present in `G` are ignored.
Returns
-------
subgraph : SubGraph
A edge-induced subgraph of subgraph of `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> H = G.edge_subgraph([(0, 1), (3, 4)])
>>> list(H.nodes)
[0, 1, 3, 4]
>>> list(H.edges)
[(0, 1), (3, 4)]
"""
return G.edge_subgraph(edges)
|
d12d347ef98a632af11e96bd86f22aa5f6bd410d
| 173,050 |
def to_bytes(string: str) -> bytes:
"""Encodes the string to UTF-8."""
return string.encode("utf-8")
|
bc28913e34a8c5b741082a99312c8777930db691
| 582,240 |
def divide_by(value, arg):
"""
Returns the result of the division of value with arg.
"""
result = 0
try:
result = value / arg
except:
# error in division
pass
return result
|
f1ff4eee6f6392c9bb9ec8b3fd4fe5ec295c048f
| 626,888 |
def is_valid_month(month):
"""
Check if month's value is valid
:param month: int
:return: boolean
"""
return 12 >= month > 0
|
777fd1fd378c0c2ae742ffb2de385845a0abf6a6
| 240,420 |
from typing import Tuple
import io
import tokenize
def decode_bytes(src: bytes) -> Tuple[str, str, str]:
"""
Return a tuple of (decoded_contents, encoding, newline).
`newline` is either CRLF or LF but `decoded_contents` is decoded with
universal newlines (i.e. only contains LF).
"""
srcbuf = io.BytesIO(src)
encoding, lines = tokenize.detect_encoding(srcbuf.readline)
if not lines:
return "", encoding, "\n"
newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n"
srcbuf.seek(0)
with io.TextIOWrapper(srcbuf, encoding) as tiow:
return tiow.read(), encoding, newline
|
93ecef939ff22853271676264a4ef2c02843a2a9
| 300,248 |
def one_hot_encode(label):
"""
Given a label - "red", "green", or "yellow". Returns a one-hot encoded label
"""
if label == "red":
return [1, 0, 0]
if label == "green":
return [0, 0, 1]
return [0, 1, 0]
|
6c6c2e60fa90fcf1a33c8c96814910113def9699
| 621,362 |
def giip(area=40, res_height=20, porosity=0.25, avg_water_saturation=0.4, gas_vol_factor=0.00533):
"""Returns the estimate for gas initially in place (SCF) given the area (acres), reservoir height (ft),
porosity (fraction), average water saturation (fraction), and the gas formation volume factor (RCF/SCF)."""
return (43560*area*res_height*porosity*(1-avg_water_saturation))/gas_vol_factor
|
ac9e189eb91c9c000f6d8e20e134451580e50653
| 601,756 |
def formated_timedelta(total_seconds):
"""
Format time delta as H:M:S\
:param total_seconds: from timedelta.totalseconds
:return: formatted string
"""
if isinstance(total_seconds, str):
parts = total_seconds.split(':')
if len(parts) > 1:
# Assume already formatted
return total_seconds
try:
# Assume an integer as a string
total_seconds = int(total_seconds)
except ValueError:
# Don't know what this is, just return as is
return total_seconds
hours = int(total_seconds // 3600)
minutes = int((total_seconds % 3600) // 60)
seconds = int(total_seconds % 60)
return f'{hours}:{minutes}:{seconds}'
|
10950b760fb980ad2dab729ffe54c2f5abe0dd46
| 569,404 |
def extract_types(transaction):
"""
Extracts promo_type and service_type
Args:
transaction: input from FS chatplan
Returns:
promo_type: either U, B, D, G
service_type: either [local, outside] + [call, sms]
"""
needed_fields = ['U_', 'B_', 'D_', 'G_']
if transaction[:2] in needed_fields:
promo_type = transaction[:1]
service_type = transaction[2:]
else:
service_type = transaction
promo_type = ''
return promo_type, service_type
|
a22ec93da4ef24f6f34d98d7e2654f9b6ceedf05
| 209,238 |
def get_tool_info_from_test_id( test_id ):
"""
Test IDs come in the form test_tool_number
(functional.test_toolbox.TestForTool_toolshed_url/repos/owner/repository_name/tool_id/tool_version)
We want the tool ID and tool version.
"""
parts = test_id.replace( ')', '' ).split( '/' )
tool_version = parts[ -1 ]
tool_id = parts[ -2 ]
return tool_id, tool_version
|
50dbe084db963f425d3e3177d64ee54e3d47195d
| 506,346 |
from typing import List
def participant_de_dupe(participants) -> List:
""" Take a list of participants and return a deduped list (based on email). """
checked_emails = []
deduped = []
for participant in participants:
email = participant.email
if email not in checked_emails:
checked_emails.append(email)
deduped.append(participant)
return deduped
|
5c56ed8c16c3bc49a17ec3cf7465b9ca77811e27
| 134,911 |
def nonempty_region(r):
"""Return True if given region is nonempty.
>>> nonempty_region([1,2,3,4,5,6])
True
>>> nonempty_region([1,2,4,4,5,6])
False
"""
assert type(r) == list
assert len(r) == 6
return (r[0] != r[1] and
r[2] != r[3] and
r[4] != r[5])
|
9ab7c9b39498221acbe8662c8c983806e43dd00c
| 479,340 |
def deal_hands(deck, start):
"""
Deal hands from the deck starting at index i
"""
return (
[deck[start], deck[start + 2]],
[deck[start + 1], deck[start + 3]],
)
|
53c3c32eb8188d012cea2d9046c64f68a5cd28a8
| 666,806 |
def space_check(board, position):
"""
Returns a boolean indicating whether a space on the board is freely available.
:param board:
:param position:
:return:
"""
return board[position] == ' '
|
c09e93f3a51639e9a2fc94f8dfbd489745c0f40e
| 654,248 |
def add_visual_sphere(client, center=(0, 0, 0), radius=0.1,
rgba=(0.5, 0.5, 0.5, 0.5)):
"""Add a sphere to bullet scene (visual only, no physics).
Args:
client: pybullet client (or pybullet library handle).
center: Center of sphere.
radius: Sphere radius.
rgba: rgba color of sphere.
Returns:
Unique integer bullet id of constructed object.
"""
vis_obj_id = client.createVisualShape(
client.GEOM_SPHERE, radius=radius, rgbaColor=rgba)
obj_id = client.createMultiBody(
baseCollisionShapeIndex=-1, baseVisualShapeIndex=vis_obj_id,
basePosition=center)
return obj_id
|
f6d2d625517d5eac9d62d85490a7b4a67b540656
| 205,844 |
import tempfile
def _scrub_traceback_entry(code_name, entry):
"""Given the name of some code (e.g. "~ your sequence code ~") and
an entry from the trace list, scrubs any references to the names of
the files that contain the user code and replaces them with code_name.
"""
# The filenames in the trace will point to either Simulation/Vespa
# files or the user's pulse seq code. The latter is saved to the temp dir,
# so if the temp dir is present in the filename, it must refer to the
# user's code.
if tempfile.gettempdir() in entry[0]:
entry = tuple([code_name] + list(entry[1:]))
return entry
|
810c38ceae479db5dacdc29019fba4e96bcd55ec
| 313,028 |
def dcf_to_swap(dcf):
"""
Helper function transforms sorted discount factors to swap rates.
:param dcf: discount factors
:return: par swap rates
"""
num_dcf = len(dcf)
swap_rates = num_dcf * [0]
for index, dcf_ in enumerate(dcf):
if index == 0:
swap_rates[index] = (1 / dcf_) - 1
else:
swap_rates[index] = (1 - dcf_) / sum(dcf[0:index + 1])
return swap_rates
|
9b03b2b6e0e08515b1e1b78cb1a72810a343926f
| 662,325 |
def parse_concepts(api_concepts):
"""Parse the API concepts data."""
return {concept['name']: round(100.0*concept['value'], 2)
for concept in api_concepts}
|
b56713f54dd276bbbf08422027a0c1126b1b5f35
| 586,563 |
def split_text_by_delims(text):
"""Splits the string by special characters specified.
Filters out empty strings (as a result of the splitting) before
returning.
"""
for ch in [":", ".", "/", "'", "\"",
"(", ")", "\\", "[", "]", ",", "\t", "\n", "*", "-"]:
text = text.replace(ch, " ")
return text.split(" ")
|
da1cf1772184c84fd4b6f06500f6701c25686b86
| 78,161 |
def to_odd(n):
"""
Converts a number to an odd number by getting the nearest odd number (greater than or equal to the input)
:param n: the number
:return: and odd number
"""
return n if n % 2 == 1 else n + 1
|
c46020d8c10c19136491badd005059444da221b1
| 211,684 |
def zero_remove(line):
"""
This function just returns a sublist of the passed parameter after removing all zeros
It takes O(n) time --> linear runtime complexity
:param line:
:return non_zero:
"""
non_zero = []
for num in line:
if num != 0:
non_zero.append(num)
return non_zero
|
574e347e5e7656514bcd185dcd72851f406aad1c
| 427,711 |
def mk_inclusion_filter(include=(), key=None):
"""
Creates a function to perform inclusion filtering (i.e. "filter-in if x is in include list")
:param include: a list-like of elements for an inclusion filter
:param key: None (default), a callable, or a hashable. If
* None, the filter will be determined by: x in include
* callable, the filter will be determined by: key(x) in include
* hashable, the filter will be determined by: x[key] in include
:return: a filter function (a function that returns True or False)
>>> filt = mk_inclusion_filter(include=[2, 4])
>>> filter(filt, [1, 2, 3, 4, 2, 3, 4, 3, 4])
[2, 4, 2, 4, 4]
>>> filt = mk_inclusion_filter(include=[2, 4], key='val')
>>> filter(filt, [{'name': 'four', 'val': 4}, {'name': 'three', 'val': 3}, {'name': 'two', 'val': 2}])
[{'name': 'four', 'val': 4}, {'name': 'two', 'val': 2}]
>>> filt = mk_inclusion_filter(include=[2, 4], key=2)
>>> filter(filt, [(1, 2, 3), (1, 2, 4), (2, 7, 4), (1, 2, 7)])
[(1, 2, 4), (2, 7, 4)]
>>> filt = mk_inclusion_filter(include=[2, 4], key=lambda x: x[0] * x[1])
>>> filter(filt, [(1, 2, 'is 2'), (2, 2, 'is 4'), (2, 3, 'is 6'), (1, 4, 'is 4')])
[(1, 2, 'is 2'), (2, 2, 'is 4'), (1, 4, 'is 4')]
"""
include = set(include)
if key is None:
def filter_func(x):
return x in include
else:
if callable(key):
def filter_func(x):
return key(x) in include
else:
def filter_func(x):
return x[key] in include
return filter_func
|
4ba96f257cb2ba267b551f00bc573bee0969a839
| 675,089 |
def fix_output_name(name: str):
"""Removes the "Identity:0" of a tensor's name if it exists"""
return name.replace("/Identity:0", "", 1)
|
daca1fe0853ee4372cddcc3c597c38159e1a4334
| 420,488 |
def line_func(p1: list, p2: list):
"""Take two x,y points. Return a linear function through them both.
:param p1: list of [x1, y1] ordered pair (point)
:param p2: list of [x2, y2]
:return: (linear) function in 1 variable, which passes through both points.
"""
assert len(p1) == len(p2) == 2
x1, y1, = p1
x2, y2 = p2
m = (y1 - y2) / (x1 - x2) # let it raise error if x1 == x2
return lambda x: m * (x - x1) + y1
|
8f86675d409e14cbd351185e54402e05f7b26792
| 595,186 |
from typing import Mapping
import json
def load_categories(filename: str) -> Mapping[int, str]:
"""
Load categories from specified file.
:param filename: path to category filename
:return: a dictionary of flowers id and name
"""
with open(filename, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name
|
5abbc2cdb3218d7a59e30a536175a45d2e69f9e6
| 122,821 |
from typing import Any
def _get_log_name(var: Any) -> str:
"""Return var.__name__ if available, 'this object' otherwise."""
try:
return str(var.__name__)
except AttributeError:
return 'this object'
|
d66a5f9dfeedc77f5e6bef2630379124f69e7676
| 585,572 |
def token_seems_valid(token: str) -> bool:
"""check validity of an api token based on its characters and length
Args:
token (str): sure petcare api token
Returns:
bool: True if ``token`` seems valid
"""
return (
(token is not None) and token.isascii() and token.isprintable() and (320 < len(token) < 448)
)
|
82dfb01a5f06bbf1b3814f317b3df19c4badf1b5
| 262,001 |
def flatten(l: list):
"""
Accepts a list containing one or more nested lists and returns a flattened copy.
Parameters
----------
l: list
A list containing one or more lists. Can contain zero lists but there's no point
Returns
-------
list
A flattened version of the original list, no matter how deep. String elements
are preserved as to not spread them out into individual characters.
"""
new_list = []
for i in l:
if isinstance(i, list):
new_list.extend(flatten(i))
else:
new_list.append(i)
return new_list
|
31393053cc6e4c7fe8191cc1fa2890faa8ec3277
| 640,889 |
def map_to(num, a, b):
""" Map linearly num on the [-1, 1] range to the [a, b] range"""
return ((num + 1) / 2) * (b - a) + a
|
fa4bd607e0a2d511afc35db1e123c58edbb8b254
| 202,524 |
import datetime
from typing import Optional
def json_serialize_datetime(dt : datetime.datetime) -> Optional[str]:
"""
Serialize a datetime.datetime object into JSON (ISO format string).
"""
if isinstance(dt, datetime.datetime):
return dt.isoformat() + 'Z'
return None
|
a818d0544294c2adfd5c640e2ac03c33fab2f6e9
| 385,969 |
def solar_elevation_angle(solar_zenith_angle):
"""Returns Solar Angle in Degrees, with Solar Zenith Angle, solar_zenith_angle."""
solar_elevation_angle = 90 - solar_zenith_angle
return solar_elevation_angle
|
f896c5d0608171f3e5bd37cede1965fe57846d07
| 704,139 |
def get_offer_additional_assets(html_parser):
"""
This method returns information about the apartment's additional assets.
:param html_parser: a BeautifulSoup object
:rtype: list(string)
:return: A list containing the additional assets
"""
additional_group_assets = html_parser.findAll(class_="dotted-list")
assets = []
if additional_group_assets:
assets = [
asset.strip()
for group in additional_group_assets
for asset in group.text.split('\n')
if asset
]
return assets
|
73d9147672e93befb194f97ec625ad1c7d1b684d
| 315,015 |
def amdahls(x, p):
"""
Computes Amdal's Law speed-up
:param x: is the speedup of the part of the task that benefits from improved system resources
:param p: is the proportion of execution time that the part benefiting from improved resources originally occupied
:return: the computed speed-up
"""
return 1. / (1. - p + p/x)
|
9870a3738a8d7364b5ccd58ae87784bbcf64ab43
| 391,697 |
def build_allele_freq_map(allele_seq, freq_seq):
"""
Construct a <allele, freq> map, sorted by <freq> ascendingly.
0 in `allele_seq`, if exists, will be removed.
Returned data structure is a list of tuples, e.g.
getAlleleFreqMap(['A','G'], [0.2,0.8]) => [('A', 0.2), ('G', 0.8)]
"""
if len(allele_seq) == 0 or len(freq_seq) == 0:
# if `allele_seq` or `freq_seq` is empty list
return []
if '0' in allele_seq:
zero_index = allele_seq.index('0')
allele_seq.pop(zero_index)
freq_seq.pop(zero_index)
af_map = list(zip(allele_seq, freq_seq))
# import operator
# afMap = sorted(afMap, key=operator.itemgetter(1))
af_map = sorted(af_map, key=lambda x: x[1])
return af_map
|
943f2d1f4373737efc81c95aee5526ad9b20cb8a
| 142,579 |
def get_change_set_status(cf, cfn_stack_name, cfn_change_set_name):
"""Returns change set status
:param cf: cfn client
:param cfn_stack_name: stack name
:param cfn_change_set_name: chenge set name
:return: status
"""
details = cf.describe_change_set(ChangeSetName=cfn_change_set_name, StackName=cfn_stack_name)
return details['Status']
|
fd6d79a6cffe62be713c5ed4aca20da8a0fcc7c9
| 567,003 |
def uid_to_string(uid):
"""
Convert a UID to a printable string
:param uid: an uid value of a card
:return: string version of uid param
"""
string = ""
for i in uid:
string = "%02X" % i + string
return string
|
20a05dbed7dae9b5c54e141f0acd0f61742fdd11
| 514,786 |
def rgb_to_hex(rgb):
"""Convert RGB color to hex color."
:param rgb: RGB color
:type rgb: tuple
:return: Hex color
:rtype: str
"""
return "#{0:02x}{1:02x}{2:02x}".format(*rgb)
|
e2e82cbef4aaf657a313bd2aa0e897e3a37ddeba
| 250,106 |
def _parse_html_colour(colour):
"""
Return a thruple of the red, green and blue elements of an HTML colour string
"""
colour = colour.lstrip('#')
if len(colour) == 3:
colour = "{}{}{}".format(
colour[0] * 2,
colour[1] * 2,
colour[2] * 2,
)
return (
int(colour[0:2], base=16),
int(colour[2:4], base=16),
int(colour[4:6], base=16),
)
|
ca95c17da4213f1986dbce3493ad4ad371a577a8
| 455,468 |
def valid_integer(to_check):
"""
Check if the value in to_check is actually an integer. to_check can be a
string or already an integer.
returns True or False
"""
try:
int(to_check)
return True
except ValueError:
return False
|
6df6ecf39c115d9cac4a84d30e05e6d58b8c9e9b
| 578,329 |
def build_md_table(repos):
"""
Build a reference definition list in markdown syntax, such as:
| Name | Desc |
| :-- | :-- |
| [k3color][] | create colored text on terminal |
| [k3common][] | dependency manager |
"""
res = [
'| Name | Desc |',
'| :-- | :-- |',
]
for repo in repos:
res.append('| [{name}][] | {description} |'.format(**repo))
return '\n'.join(res)
|
e8069482d5f4240a96dece02c9b153fa5e4571d8
| 394,308 |
def friendly_number(number):
"""
Produce a human-readable value for file size.
:param number: bytes.
:return: human-readable string.
"""
template = '%.1f%sB'
powers = ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
base = 1000
number = float(number)
for power in powers[:-1]:
if number < base:
return template % (number, power)
number /= base
return template % (number, powers[-1])
|
55c37c59aed99f661d4dc110fb60e7f6cf2e0082
| 254,795 |
def calculate_bmi(height, weight):
"""calculate the BMI given the height in inches and the weight in pounds"""
m_weight = 703 * weight
m_height = height**2
bmi = m_weight / m_height
return bmi
|
8d8b7392970d2ef3f675e0869ecc78a39c1a433a
| 390,505 |
import hashlib
def md5file(path):
"""Computes the MD5 hash."""
m = hashlib.md5()
with open(path, 'rb') as f:
m.update(f.read())
return m.hexdigest()
|
f502b3a304a8a7d5bca2b760849d8bf82616573f
| 254,263 |
def scheme_true(val):
"""All values in Scheme are true except False."""
return val is not False
|
550342a39a3d1e06b9d8632f0517c2e30b5f9bb0
| 570,859 |
def hex(space, w_val):
"""Return the hexadecimal representation of an integer."""
return space.hex(w_val)
|
b67f6598c619f0e3023d0e4d75919f1f4f5a7d84
| 496,487 |
import torch
def _get_anchor_negative_triplet_mask(labels):
"""Return a 2D mask where mask[a, n] is True iff a and n have distinct labels.
Args:
labels: torch.Tensor with shape [batch_size]
Returns:
mask: Variable with torch.ByteTensor with shape [batch_size, batch_size]
"""
# Check if labels[i] != labels[k]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = torch.eq(torch.unsqueeze(labels, 0), torch.unsqueeze(labels, 1))
mask = ~labels_equal
return mask
|
a00bd917946886f7d8586bf6fe92f31b0c8b1891
| 665,662 |
def get_countersigners(advice_to_countersign):
"""Get a set of user ids representing the users that have already
countersigned the advice supplied by `advice_to_countersign`.
"""
countersigned_by = set()
for user_advice in advice_to_countersign.values():
for advice in user_advice:
if advice["countersigned_by"]:
countersigned_by.add(advice["countersigned_by"]["id"])
return countersigned_by
|
f67a6f42519790a7042adc536c1a13772679ce65
| 595,106 |
def find_different_char_index(start_string, edit_string):
"""Compares standard and modified string and identifies the index of the modified character
ex: find_char_difference_index("CCAGG","CFAGG") = 1
:param start_string: starting string
:param edit_string: string with a single character edit from start_string
:return: index of string difference
"""
assert len(start_string) == len(edit_string), ""
pos = [i for i in range(len(start_string)) if start_string[i] != edit_string[i]]
assert len(pos) == 1, "Only one character difference allowed. " \
"start_string={}, edit_string={}".format(start_string, edit_string)
return pos[0]
|
0fc22631419416144a1cd9a451e573453ed96582
| 137,228 |
def disable() -> dict:
"""Disables console domain, prevents further console messages from being reported to the client."""
return {"method": "Console.disable", "params": {}}
|
05a4c8fc3286bf4c6fed259b8e35d430d5e2d1ac
| 283,221 |
def any_key_from_list_in_dict(test_list, test_dict):
"""
Takes a list and a dictionary and checks if any of the keys from the list are
present in the dict. Raises a KeyError with the key if found, returns False
otherwise.
"""
for key in test_list:
if key in test_dict:
raise KeyError(key)
return False
|
4c9f9a52e4dc14e2bc77ad8354e93ca514a02d96
| 156,404 |
def get_iscam_mws(intensities, mw_intensity_line_pars=None):
"""
Calculate the molecular weights of the intensities with the line parameters
`mw_intensity_line_pars`
Parameters
----------
intensities : np.ndarray
inensities of an iscam measurements
mw_intensity_line_pars : array like
[slope, intercept] of the mw to intensities linear function.
"""
mw_intensity_line_pars = [1, 0] if mw_intensity_line_pars is None \
else mw_intensity_line_pars
slope, intercept = mw_intensity_line_pars
mws = (intensities - intercept) / slope
return mws
|
9fdc17f17fe7341c9567e8626f025bac1c42d625
| 689,494 |
def filter_bad_boxes(boxes_coor):
""" Filter the boxes with wrong coordinates """
filted_boxes = list()
for box_coor in boxes_coor:
[xmin, ymin, xmax, ymax] = box_coor
if xmin < xmax and ymin < ymax:
filted_boxes.append(box_coor)
return filted_boxes
|
33fe05771cc305e1f620917a73b807869bdc237c
| 561,152 |
def is_magic_packet(data):
"""
Checks if a packet is a magic packet, returns
True or False.
Args:
data (bytes): the payload from a packet
"""
# convert data to lowercase hex string
data = data.hex().lower()
# magic packets begin with 'f'*12 (called a synchronization stream)
sync = data[:12]
if sync == 'f'*12:
# the mac address follows (next 12 chars)
mac = data[12:24]
# and the mac address is repeated 16 times
magic = sync + mac*16
if len(data) == len(magic):
return magic == data
else:
# allow for a SecureON password, which adds another
# 12-character hex string to the end of the packet
return magic == data[:-12]
else:
return False
|
92876604102c649ed09cd9692dd3b057cf6c53c8
| 255,387 |
def vars_to_dic(vars):
""" Converts list of tf variables to a dictionary
where the var names are the keys
Args:
vars: list of variables
Returns:
dic
"""
return dict((var.name.split(":")[0], i)
for i, var in enumerate(vars))
|
0b74ac80c7d1b0018a7e5f1fe21596ef19fae2be
| 632,917 |
def get_window(hr, ft):
"""Estimate duration of cardiac cycle with heart rate and frame time.
(seconds/beat) / (seconds/frame) = frames/beat
"""
window = int(((60 / hr) / (ft / 1000)))
return window
|
922a24724f905e80fe8146a8aef3b22a207316c4
| 284,887 |
def get_last_revision_in_list(revision_list):
"""Gets the last revision in list."""
return revision_list[-1]
|
67b35032f1b54296a1e336b3b676ddb0e60592e7
| 522,811 |
def to_bio2(tags):
"""
Convert the original tag sequence to BIO2 format. If the input is already in BIO2 format,
the original input is returned.
Args:
tags: a list of tags in either BIO or BIO2 format
Returns:
new_tags: a list of tags in BIO2 format
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif tag[0] == 'I':
if i == 0 or tags[i-1] == 'O' or tags[i-1][1:] != tag[1:]:
new_tags.append('B' + tag[1:])
else:
new_tags.append(tag)
else:
new_tags.append(tag)
return new_tags
|
c0430d7f04c9a7b3e7399c35c26c4cee2a430c76
| 635,633 |
def hey(sentence):
"""Return bob's answer on sentence."""
sentence = sentence.strip()
if sentence.isupper():
return "Whoa, chill out!"
elif not sentence:
return "Fine. Be that way!"
elif sentence.endswith("?"):
return "Sure."
else:
return "Whatever."
|
ac6f9897bb23c8571c0069e7386f560a1724b437
| 173,468 |
def cool_number(value, num_decimals=6):
"""
Django template filter to convert regular numbers to a
cool format (ie: 2K, 434.4K, 33M...)
:param value: number
:param num_decimals: Number of decimal digits
"""
int_value = int(value)
formatted_number = '{{:.{}f}}'.format(num_decimals)
if int_value < 1000:
return str(int_value)
elif int_value < 1000000:
return formatted_number.format(int_value/1000).split(".")[0] + 'K'
elif int_value < 1000000000:
return formatted_number.format(int_value/1000000).split(".")[0] + 'M'
else:
return formatted_number.format(int_value/1000000000).split(".")[0] + 'B'
|
d17c3fe4c8a9b7f47ee80ae4e54da3b161713780
| 494,537 |
import re
def parse_keyslots_luks2(luks_dump):
"""Lists the used keyslots in a LUKS2 device. These may or may not be
bound to clevis.
Return: <used keyslots> <error>"""
if not luks_dump:
return None, {"msg": "Empty dump provided"}
# This is the pattern we are looking for:
# 0: clevis
# Keyslot: 3
pattern = r"^\s+(\d+): luks2$"
match = re.findall(pattern, luks_dump, re.MULTILINE | re.DOTALL)
if not match:
errmsg = "parse_keyslots_luks2: no used key slots"
return None, {"msg": errmsg}
return match, None
|
ae62f3b736723aff89aa4793a1f2c04a955611bf
| 52,229 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.