content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def get_vtkTableHeaders(vtk_table):
""" Returns the vtkTable headers (column names) as a Python list """
headers = []
for icol in range( vtk_table.GetNumberOfColumns() ):
headers.append( vtk_table.GetColumn(icol).GetName() )
return headers | fff7eb3728791e1a2064595dfdfa7490f480ba08 | 579,260 |
import re
def get_device_number(device):
"""Extract device number.
Ex: "D1000" → "1000"
"X0x1A" → "0x1A
"""
device_num = re.search(r"\d.*", device)
if device_num is None:
raise ValueError("Invalid device number, {}".format(device))
else:
device_num_str = device_num.group(0)
return device_num_str | 4a62aae822ed931a12574c31feafa3108bf783e3 | 80,792 |
def create_headers(bearer_token):
"""Returns authorization header
Args:
bearer_token: Bearer token
"""
headers = {"Authorization": "Bearer {}".format(bearer_token)}
return headers | ba99315ac65ff16996dbaff72c560a1905612e73 | 564,846 |
from typing import Any
def generate_nonetype(value: Any) -> Any:
"""
Filter for converting the string "None" to the None literal in Jinja templating.
Note that the string is converted to title-case before being tested, so any variation of casing works too, such as
"none," "NONE," "NoNe," etc.
Args:
value: The value to be adjusted.
Returns:
A literal None if "None" was passed in, otherwise returns the value that was passed in.
"""
if isinstance(value, str) and value.title() == "None":
result = None
else:
result = value
return result | 25d0a090abc0bd4340cba6254166c2e8721f8645 | 375,641 |
def _parse_repository_spec_list(repository_specs):
"""
Given a list containing either strings or repository maps (see above), returns a list containing repository maps.
"""
repos = []
for repo in repository_specs:
if type(repo) == "string":
repos.append({ "repo_url": repo })
else:
repos.append(repo)
return repos | 5b52876c080bc65245881ce07d8e616ecf0a4319 | 140,674 |
def set_nested_dict(dd, flatkey, val, sep=':', prefix='', verbose=0):
"""
Set a value inside nested dicts using a key string.
Example:
dd = {'key1':{'key2':{'key3':9}}}
set_nested_dict(dd, 'P:key1:key2:key3', 4, prefix='P')
will set dd in place as:
{'key1': {'key2': {'key3': 4}}}
"""
if flatkey.startswith(prefix+sep):
flatkey=flatkey[len(prefix+sep):]
keys = flatkey.split(':')
d = dd
# Go through nested dicts
for k in keys[0:-1]:
d = d[k]
final_key = keys[-1]
# Set
if final_key in d:
d[final_key] = val
return True
else:
if(verbose>0):
print(f'Error: flat key {flatkey} key does not exist:', final_key)
return False | de15cdb550d2bd3e0841b1f7f15ae5c94109b8c4 | 184,974 |
def view_kwargs(check_url):
"""Return a dict of valid kwargs to pass to SecurityView(**view_kwargs)."""
return {
"allow_all": False,
"allowed_referrers": ["lms.hypothes.is"],
"authentication_required": True,
"check_url": check_url,
} | 94fccd0fbc4897e7d4b5daed842211a7c2a7c4ca | 623,033 |
def list2set(seq):
"""
Return a new list without duplicates.
Preserves the order, unlike set(seq)
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)] | 6fcd073b366fb5b58a666ed994b3d5163a36916f | 589,606 |
import re
def is_cif(datastr):
"""Detect if `datastr` is a CIF file."""
assert isinstance(datastr, str), \
f'`datastr` is not str: {type(datastr)} instead'
cif_loop = re.compile('[lL][oO][oO][pP]_')
return bool(cif_loop.search(datastr)) | abcde9c7f36f30d0d7bf8d2f85a83a0179eecb6d | 404,606 |
def is_in_list(item, list_, kind):
"""Check whether an item is in a list; kind is just a string."""
if item not in list_:
raise KeyError(f"Specify {kind} from {list_}: got {item}")
return True | 25887a0b22061149777d2f6b5a2f792c6bcfc1a7 | 624,527 |
def pad(size, padding):
"""Apply padding to width and height.
:param size: two-tuple of width and height
:param padding: padding to apply to width and height
:returns: two-tuple of width and height with padding applied
"""
width = size[0] + padding.left + padding.right
height = size[1] + padding.top + padding.bottom
return (width, height) | 9e1021ec6dac2598e58779db440eae853418cbc4 | 679,502 |
def odds(mfrlist):
"""Return a new list with all the odd numbers in the list"""
return mfrlist.filter(lambda x: x % 2 == 1) | 822ead3e251afe6420d8ee34a3a9723b40f14d9c | 459,592 |
def _sentence_contains_after(sentence, index, phrase):
"""Returns sentence contains phrase after given index."""
for i in range(len(phrase)):
if len(sentence) <= index + i or phrase[i].lower() not in \
{sentence[index + i][field].lower() for field in ('text', 'lemma')}:
return False
return True | f8b412af4d074c19ea4144b18bdd24c461978df3 | 153,325 |
def levenshtein_distance(s1, s2):
"""
Performs Levenshtein distance on two strings
:param s1: String 1
:param s2: String 2
:return: L-Distance between two s1 and s2
"""
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for index2, char2 in enumerate(s2):
new_distances = [index2 + 1]
for index1, char1 in enumerate(s1):
if char1 == char2:
new_distances.append(distances[index1])
else:
new_distances.append(1 + min((distances[index1],
distances[index1 + 1],
new_distances[-1])))
distances = new_distances
return distances[-1] | ae264de708a808e99adff56a847c394db40a3387 | 607,005 |
def getImageCharacteristics(img):
"""Gets the height and width characteristics of an image
:param img:
:return:
"""
img_char = img.shape
return img_char | 2466d177c142b2ea8b1ef071f3aab010f5c7b9a7 | 213,077 |
def can_double_bet(player_bets, player_cash):
"""
If the player has at least the amount of money as the first bet
return True else return False
:param player_bets:
:param player_cash:
:return: True or False
"""
if player_cash < sum(player_bets[0]):
return False
else:
return True | f4e7890b3d180d0636411cfd2d79f916f0b43733 | 536,223 |
import hashlib
import logging
def uniquify_server_name(server_name: str, experiment_name: str) -> str:
"""Create a unique name for the server.
Args:
server_name: name of server. There could be multiple of these per single
training job.
experiment_name: name of the experiemnt. This is shared across all machines
for a given training job. Often this is the log directory.
Returns:
name: The name of the server.
"""
hmod = hashlib.sha256()
hmod.update(experiment_name.encode("utf-8"))
hval = hmod.hexdigest()[0:20]
logging.info(f"Hashing experiment name [{experiment_name}] => {str(hval)}") #. pylint: disable=logging-fstring-interpolation
return str(hval) + "__" + server_name | 777b913c0501ba7a9b9ea8b4c3de7ddf49747f06 | 350,649 |
def linear_function(x, a, b):
""" Equation for a line.
Parameters:
x: array
The independet variable where the data is measured.
a: float
The linear coefficient.
b: float
The angular coefficient.
Returns:
f: array
The linear function.
"""
f = a+b*x
return f | 542ff23efb35e273043d64f0f13027792d6394a5 | 691,993 |
def prepare_input_parameters(caller, default_parameters, custom_parameters=None):
"""Prepares an input parameter dictionary for operator benchmarks. Performs the union of default_parameters and
custom_parameters i.e., takes the parameters provided, if any, in custom_parameters and replaces them in the
default_parameters.
Throws ValueError if custom_parameters contains a key not found in default_parameters.
:param default_parameters: Dictionary of default parameters - name/value
:param custom_parameters: (Optional) Dictionary of custom parameters - name/value. That will replace the corresponding
parameter name/value in default_parameters.
:param caller: str, Name of the caller (Operator Name) to be used in error message.
:return: Dictionary of parameters which is a union of default_parameters and custom_parameters.
"""
if custom_parameters is None:
return default_parameters
for key, value in custom_parameters.items():
if key not in default_parameters.keys():
raise ValueError("Invalid parameter provided for benchmarking operator - '{}'. "
"Given - '{}'. Supported - '{}'".format(caller, key, default_parameters.keys()))
default_parameters[key] = value
return default_parameters | 738fdd196d9dfaecd286f69f9620e4c0a2385303 | 171,466 |
def yesno(boolean, yes, no):
"""ternary in python: boolean? yes:no"""
return (no, yes)[boolean] | 3db66f6aa3e3d243f6763ac1746e4e8f8aa4d97d | 495,525 |
def evaluate_request(request):
""" Evaluate a request and returns a tuple of application iterator,
status code and list of headers. This method is meant for testing
purposes.
"""
output = []
headers_set = []
def start_response(status, headers, exc_info=None):
headers_set[:] = [status, headers]
return output.append
result = request(request.environ, start_response)
# any output via (WSGI-deprecated) write-callable?
if output:
result = output
return (result, headers_set[0], headers_set[1]) | 7bd4c07cf7606b7a02e19feba26e32ce842669f1 | 381,570 |
def get_logging_params(verbose):
"""
Adjusts the logging verbosity based on the `verbose` parameter
0 - No logging
1 - Strategy Level logs
2 - Transaction Level logs
3 - Periodic Logs
"""
verbosity_args = dict(
strategy_logging=False,
transaction_logging=False,
periodic_logging=False,
)
if verbose > 0:
verbosity_args["strategy_logging"] = True
if verbose > 1:
verbosity_args["transaction_logging"] = True
if verbose > 2:
verbosity_args["periodic_logging"] = True
return verbosity_args | b71478a277509efa7c5be5e6c83267812b8186d8 | 619,544 |
from typing import Iterable
def is_iter(v):
"""Returns True only for non-string iterables.
>>> is_iter('abc')
False
>>> is_iter({'a': 1, 'b': 2, 'c': 3})
True
>>> is_iter(map(lambda x: x, 'abc'))
True
"""
return not isinstance(v, str) and isinstance(v, Iterable) | ede626edd664a14d575fd6b3e5600062799c2623 | 474,684 |
import torch
def calculate_gradient_penalty(model,step,alpha, real_images, fake_images, lambd ,device):
"""Calculates the gradient penalty loss for WGAN GP"""
eps = torch.rand(real_images.size(0), 1, 1, 1, device=device)
eps = eps.expand_as(real_images)
x_hat = eps * real_images + (1 - eps) * fake_images.detach()
x_hat.requires_grad = True
px_hat = model(x_hat,step=step,alpha=alpha)
grad = torch.autograd.grad(outputs = px_hat.sum(),
inputs = x_hat,
create_graph=True
)[0]
grad_norm = grad.view(real_images.size(0), -1).norm(2, dim=1)
gradient_penalty = lambd * ((grad_norm - 1)**2).mean()
return gradient_penalty | 3e95eb450e3eb1942100e25e6d004170d2b3250e | 52,828 |
def transition_func(settings):
"""A no-op transition function."""
return settings | 38ba47b2a8d911b5faccf7289e6c9e8383f26642 | 505,558 |
def kvhead(store, n=1):
"""Get the first item of a kv store, or a list of the first n items"""
if n == 1:
for k in store:
return k, store[k]
else:
return [(k, store[k]) for i, k in enumerate(store) if i < n] | 61b7b2f8915034d1fb384c26e4429c05e75a6880 | 210,406 |
def flatten_image(image):
"""
Flat the input 2D image into an 1D image while preserve the channels of the input image
with shape==[height x width, channels]
:param image: Input 2D image (either multi-channel color image or greyscale image)
:type image: numpy.ndarray
:return: The flatten 1D image. shape==(height x width, channels)
:rtype: numpy.ndarry
"""
assert len(image.shape) >= 2, "The input image must be a 2 Dimensional image"
if len(image.shape) == 3:
image = image.reshape((-1, image.shape[2]))
elif len(image.shape) == 2:
image = image.reshape((-1, 1))
return image | 6733b30adc9131fea237ab11afd3853bec0c2c44 | 691,043 |
def node2str(node, strip=True):
"""Return lilv.Node to string.
By default, strips whitespace surrounding string value.
If passed node is None, return None.
"""
if node is not None:
node = str(node)
if strip:
node = node.strip()
return node | 5f5c4a4ede5791ad88ed7c21024da9cad4abac86 | 346,758 |
def fullname(obj):
""" Resolve the full module-qualified name of an object. Typically used for logger naming. """
return obj.__module__ + "." + obj.__class__.__name__ | 30d69b5820596df9f30d1a9eeaf67acbc79b1d49 | 256,242 |
def from_keywords_format(keywords: list) -> dict:
"""Convert a list of keywords in a specific format into a dictionary.
Args:
keywords: A list in the format specified in wrapper/output_format.py
Returns:
The keywords as a dictionary with the keyword as key and its counter as
value.
"""
keywords_dict = {}
for keyword in keywords:
keywords_dict[keyword.get("text", "Unknown")] = keyword.get("value", 0)
return keywords_dict | 07ea893325a637c519505b725a3a132f32c8a82d | 539,904 |
def handle_500(error):
"""
Flask error handler for internal server error
"""
return str(error), 500 | 279be25674c8f9d6c3fea1633e93fdc8f78c04e3 | 139,951 |
def get_voxel_resolution(pc, patch_size):
"""
This function takes in a pointcloud and returns the resolution
of a voxel given that there will be a fixed number of voxels.
For example if patch_size is 40, then we are determining the
side length of a single voxel in meters. Sovoxel_resolution
may end up being something like 0.01 for a 1cm^3 voxel size
jaccard_distance
:type pc: numpy.ndarray
:param pc: nx3 numpy array representing a pointcloud
:type patch_size: int
:param patch_size: int, how many voxels are there going to be.
:rtype voxel_resolution: float
"""
if not pc.shape[1] == 3:
raise Exception("Invalid pointcloud size, should be nx3, but is {}".format(pc.shape))
min_x = pc[:, 0].min()
min_y = pc[:, 1].min()
min_z = pc[:, 2].min()
max_x = pc[:, 0].max()
max_y = pc[:, 1].max()
max_z = pc[:, 2].max()
max_dim = max((max_x - min_x),
(max_y - min_y),
(max_z - min_z))
voxel_resolution = (1.0 * max_dim) / patch_size
return voxel_resolution | e341bd6d52de9ae33288dec34d9475d2802990c2 | 215,230 |
import math
def EulerToQuaternion(roll_pitch_yaw):
"""Roll Pitch Yaw to Quaternion"""
roll, pitch, yaw = roll_pitch_yaw
sr = math.sin(roll/2.)
cr = math.cos(roll/2.)
sp = math.sin(pitch/2.)
cp = math.cos(pitch/2.)
sy = math.sin(yaw/2.)
cy = math.cos(yaw/2.)
qx = sr * cp * cy - cr * sp * sy
qy = cr * sp * cy + sr * cp * sy
qz = cr * cp * sy - sr * sp * cy
qw = cr * cp * cy + sr * sp * sy
return [qx, qy, qz, qw] | d6def3ebf31ddc09eb654e781bf6741aea1ef5db | 190,493 |
def cls_sets(cls, wanted_cls, registered=True):
""" Return a list of all `wanted_cls` attributes in this
class, where `wanted_cls` is the desired attribute type.
"""
sets = []
for attr in dir(cls):
if attr.startswith('_'):
continue
val = getattr(cls, attr, None)
if not isinstance(val, wanted_cls):
continue
if (not registered) and getattr(val, '_registered', False):
continue
sets.append(val)
return sets | 12b0a7e2c2855dfd44448a7d013d4fbab164c62c | 413,886 |
import re
def makeFeaClassName(name, existingClassNames=None):
"""Make a glyph class name which is legal to use in feature text.
Ensures the name only includes characters in "A-Za-z0-9._", and
isn't already defined.
"""
name = re.sub(r"[^A-Za-z0-9._]", r"", name)
if existingClassNames is None:
return name
i = 1
origName = name
while name in existingClassNames:
name = "%s_%d" % (origName, i)
i += 1
return name | 05fe6182961dff0ff4065b28a161f280422d67cc | 601,658 |
def merge_shapes(shape, merged_axis: int, target_axis: int) -> list:
"""
Merge two axes of a shape into one, removing `merged_axis` and multiplying the size of the `target_axis` by the size
of the `merged_axis`.
:param shape: The full shape, tuple or tf.TensorShape.
:param merged_axis: The index of the axis to remove.
:param target_axis: The index of the axis to add to.
:return: A list representing the merged shape.
"""
shape = list(shape)
shape[target_axis] *= shape[merged_axis]
shape.pop(merged_axis)
return shape | d2ca4f91f7a99f9cef232d163c36ccac0e89fa18 | 30,537 |
def is_should_create(exists, expected, create_anyway):
"""If we should create a resource even if it was supposed to exist.
:param exists: Whether we found the resource in target API.
:param expected: Whether we expected to find the resource in target API.
:param create_anyway: If we should create the resource, even though it
was supposed to exist and did not.
:return:
"""
return (not exists and not expected) or \
(not exists and expected and create_anyway) | 1e5db7f2691de95e1861b2fc77ba3803a48c93d6 | 522,481 |
def valid_eyr(eyr):
"""eyr (Expiration Year) - four digits; at least 2020 and at most 2030."""
if len(eyr) == 4 and int(eyr) >= 2020 and int(eyr) <= 2030:
return True
else:
return False | 7e63c4302123d03ee9d8bfd5e51703cd3c55ee96 | 126,362 |
import struct
def padandsplit(message):
"""
returns a two-dimensional array X[i][j] of 32-bit integers, where j ranges
from 0 to 16.
First pads the message to length in bytes is congruent to 56 (mod 64),
by first adding a byte 0x80, and then padding with 0x00 bytes until the
message length is congruent to 56 (mod 64). Then adds the little-endian
64-bit representation of the original length. Finally, splits the result
up into 64-byte blocks, which are further parsed as 32-bit integers.
"""
origlen = len(message)
padlength = 64 - ((origlen - 56) % 64) # minimum padding is 1!
message += b"\x80"
message += b"\x00" * (padlength - 1)
message += struct.pack("<Q", origlen * 8)
assert (len(message) % 64 == 0)
return [
[
struct.unpack("<L", message[i + j:i + j + 4])[0]
for j in range(0, 64, 4)
]
for i in range(0, len(message), 64)
] | ea06a3fc91e19ed0dbea6ddcc2ee6d554fb5a40f | 706,647 |
def find_license(classifiers, license_name):
"""Find `license_name` in `classifiers`."""
for classifier in classifiers:
if classifier.startswith("License ::") and license_name in classifier:
return classifier
return None | 75fe549331da9619c2365672af20a221080abe48 | 443,315 |
import re
def GSURLRegexHelper(gsurl):
"""Helper to do regex matching on a Google Storage URL
Args:
gsurl: Google Storage URL to match.
Returns:
Regex Match Object with groups(board, type, & build_name) or None if there
was no match.
"""
return re.match(r'gs://.*/(trybot-)?(?P<board>[\w-]+)-(?P<type>\w+)/'
r'(?P<build_name>R\d+-[\d.ab-]+)', gsurl) | 2899ef13fb79878aca65560c7cacf9659e5235c4 | 296,756 |
import pickle
def load(file_name):
"""A basic wrapper around Pickle to deserialize a list of prediction and/or
an algorithm that were dumped on drive using :func:`dump()
<surprise.dump.dump>`.
Args:
file_name(str): The path of the file from which the algorithm is
to be loaded
Returns:
A tuple ``(predictions, algo)`` where ``predictions`` is a list of
:class:`Prediction
<surprise.prediction_algorithms.predictions.Prediction>` objects and
``algo`` is an :class:`Algorithm
<surprise.prediction_algorithms.algo_base.AlgoBase>` object. Depending
on what was dumped, some of these may be ``None``.
"""
dump_obj = pickle.load(open(file_name, 'rb'))
return dump_obj['predictions'], dump_obj['algo'] | d1fb2469a1a773b635b254af4f3ffc8dd04e34db | 324,504 |
from typing import Tuple
from typing import Optional
from typing import List
import string
import random
def random2Dcharacters(
shape: Tuple[int, int], samples: Optional[List[str]] = None
) -> List[List[str]]:
"""A random 2D characters array is generated.
Args:
shape: (m, n) shape of the characters array.
samples: cumstomized letters for sampling. e.g., ['a', 'b'].
"""
m, n = shape
letters = string.ascii_lowercase
if samples:
letters = samples
return [[random.choice(letters) for i in range(n)] for _ in range(m)] | 92cfdbf7318a42284edf30aff5f7aca2210324df | 346,106 |
from pathlib import Path
def __base_cond(path: Path):
"""
Base condition for all link operations.
:param path: the base file/dir path
:return: True if the base file/dir name passed the base condition check
"""
return path.name.lower() not in (
'.git', '.gitignore', '.gitkeep', '.directory', '.gitmodules',
'.github', '.travis.yml'
) | 01896dfdfacf90584d30504a78e7d613856c0642 | 290,312 |
def find_listing(bs):
"""Get articles soups
Parameters
----------
bs : bs4 object
Returns
----------
listing : bs4 object
"""
return bs.find_all("div", {"data-context": "listing"}) | fc035688785c317614fbb13fd6f188b676506aad | 526,983 |
def _BackslashEscape(s):
"""Double up backslashes.
Useful for strings about to be globbed and strings about to be IFS escaped.
"""
return s.replace('\\', '\\\\')
# Similar to GlobEscape and splitter.Escape().
escaped = ''
for c in s:
if c == '\\':
escaped += '\\'
escaped += c
return escaped | 4c107203117d699c65fd00158913914ae6530b97 | 699,074 |
def ia(ta, r, a, b, c):
"""Chicago design storm equation - intensity after peak. Helper for i function.
Args:
ta: time after peak in minutes (measured from peak towards end)
r: time to peak ratio (peak time divided by total duration)
a: IDF A parameter - can be calculated from getABC
b: IDF B parameter - can be calculated from getABC
c: IDF C parameter - can be calculated from getABC
Returns:
Returns intensity in mm/hr.
"""
return a*((1-c)*ta/(1-r)+b)/((ta/(1-r))+b)**(c+1) | e522691484b81631901791ce2fc103f3f50fb435 | 374,885 |
def urlencode(s):
"""urlencode(s) -> str
URL-encodes a string.
Example:
>>> urlencode("test")
'%74%65%73%74'
"""
return ''.join(['%%%02x' % ord(c) for c in s]) | 09905ba226368b23c081ccc7845a7deb3b4f69bc | 600,003 |
def s_to_hms(seconds):
""" Get tuple (hours, minutes, seconds) from total seconds """
h, r = divmod(seconds, 3600)
m, s = divmod(r, 60)
return h, m, s | 10ca2a72128d9328744c282f902f5fb1e626e12f | 505,559 |
import pwd
def get_uid_name(uid):
"""Get defined login uid fullname"""
return pwd.getpwuid(uid)[4] | f8f06bcd6b2b679ec3546d3fe579fba549c2f01d | 313,188 |
def strip_extension(path):
"""Strip classical image extensions (.nii.gz, .nii, .mgz, .npz) from a filename."""
path = path.replace('.nii.gz', '')
path = path.replace('.nii', '')
path = path.replace('.mgz', '')
path = path.replace('.npz', '')
return path | c5d4d20c2d57a18aa823ff5cbb81c206493c4d35 | 519,596 |
def get_channel_id(data: dict) -> str:
"""Return channel id from payload"""
channel = data['channel']
return channel | 74b9ff9ad70bacd25d7ebe9ad93ef3cd827d627e | 681,968 |
import configparser
def parse_config_to_dict(cfg_file, section):
""" Reads config file and returns a dict of parameters.
Args:
cfg_file: <String> path to the configuration ini-file
section: <String> section of the configuration file to read
Returns:
cfg: <dict> configuration parameters of 'section' as a dict
"""
cfg = configparser.ConfigParser()
cfg.read(cfg_file)
if cfg.has_section(section):
return dict(cfg.items(section))
else:
print("Section '%s' not found in file %s!" % (section, cfg_file))
return None | 021e3594f3130e502934379c0f5c1ecea228017b | 709,820 |
def Q_deph(P_mass, r_dist, R):
"""
Calculates the heat load of dephlegmator.
Parameters
----------
P_mass : float
The mass flow rate of dist , [kg/s]
R : float
The reflux number [dimensionless]
r_dist : float
The heat vaporazation of dist, [J/kg]
Returns
-------
Q_deph : float
The heat load of dephlegmator, [W] , [J/s]
References
----------
Дытнерский, формула 2.2, стр.45
"""
return P_mass * (R + 1) * r_dist | ac8dc09d6b0a7513e32c47b4334ba4876de52daf | 692,161 |
import math
def Phi(x):
"""
Cumulative standard normal distribution.
"""
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# Save the sign of x
sign = 1
if x < 0:
sign = -1
x = abs(x)/math.sqrt(2.0)
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
return 0.5*(1.0 + sign*y) | 6a427a7d9f5d01d319de40ed3e248bf7100944f8 | 451,967 |
def target_types_from_symbol_table(symbol_table):
"""Given a LegacySymbolTable, return the concrete target types constructed for each alias."""
aliases = symbol_table.aliases()
target_types = dict(aliases.target_types)
for alias, factory in aliases.target_macro_factories.items():
target_type, = factory.target_types
target_types[alias] = target_type
return target_types | f9c0d9abe59b0b4debd8394bc6dc1bad584036e8 | 230,798 |
def _select_win_size(max_ref_len, select_win, win):
"""
Set smoothing window size
:param max_ref_len: length of reference
:param select_win: True if window size to be selected
:param win: window size
:return: window size, bool whther to select win
"""
if win == 0 or select_win:
win = int(max_ref_len / 30)
select_win = True
if win % 2 != 0:
win += 1
if win < 6:
win = 1
return win, select_win | 22d1388e58d300357e7b515f49caae1418f9acc0 | 231,935 |
def sieve_of_eratosthene(num):
"""
Computes prime numbers using sieve of Eratosthenes.
:param num: The number to which you need to find prime numbers.
:returns: List of prime numbers.
"""
sieve = list(range(num))
sieve[1] = 0 # All non-prime nums we'll replace by zeros.
for checked_num in sieve[2:]:
if checked_num != 0:
multiplier = checked_num * 2
while multiplier < num:
sieve[multiplier] = 0
multiplier += checked_num
return [n for n in sieve if n != 0] | 7fc8499b4f7d04a0cd94ce67b2a135cbd27faaa9 | 693,162 |
def shift_leftward(n, nbits):
"""Shift positive left, or negative right. Same as n * 2**nbits"""
if nbits < 0:
return n >> -nbits
else:
return n << nbits | d9f83bb8a3c6389d9b56ff4b2397f42ecabb327a | 465,730 |
def scalar_multiply(scalar, vect):
""" Multiply components by scalar """
return [scalar * v_i for v_i in vect] | a73d6cabc59833cee9b21a33e42ab9a4a979be95 | 392,859 |
def _get_author_weight(authors_in_paper):
""" Method that returns the weight of a single author in a paper"""
#I check if there is at least 1 author otherwise the weight is 0
if len(authors_in_paper) > 0:
return 1./len(authors_in_paper)
else:
return 0 | 7cf7c723572cd48e26672bcffb7916d044e4ee97 | 585,657 |
def list2dict_param(param):
"""Convert list to dictionary for quicker find."""
if isinstance(param, list):
out = {}
for p in param:
out[p["name"]] = p
return out
return param | 94faba92821dac3f8fae8618dbe197ab5ffe2ac0 | 206,431 |
import hashlib
def get_file_content_hash(file_path):
"""
Return the file content hash for a file.
"""
with open(file_path, 'rb') as content:
hasher = hashlib.sha256()
hasher.update(content.read())
return hasher.hexdigest() | 07f6ff32632a151699ab455e86527f69624a3cf5 | 608,906 |
def format_dimension(key, value, expiration_secs):
"""Formats a dimension to a string. Opposite of parse_dimension."""
if expiration_secs:
return '%d:%s:%s' % (expiration_secs, key, value)
return '%s:%s' % (key, value) | dd38693d9f8fdb63fedfae17751d97c326299e00 | 367,051 |
import re
def blockToGenome(block, startInBlock, endInBlock):
"""Transform block to genomic coordinates.
@param block: Block (chrom.blockStart-blockEnd)
@param startInBlock: Start coordinate in block
@param endInBlock: End coorinate in block
@returns: (chrom, gStart, gEnd)
"""
prog = re.compile('[\.|:]|-')
chrom,blockStart,blockEnd = prog.split(block)
gStart = int(blockStart)+startInBlock-1
gEnd = int(blockStart)+endInBlock-1
return chrom,gStart,gEnd | 9470b5ffcb4e5785af2c4162ea06c9bf74dade03 | 449,441 |
def parse_urn(urn):
"""
Parses a URN, returning a pair that contains a list of URN namespace parts, followed by the
URN's unique ID.
"""
if not urn.startswith("urn:"):
return None
parts = urn[len("urn:") :].split(":")
return (parts[0 : len(parts) - 1], parts[len(parts) - 1]) | bcfc311e678644719d6c23bc046b3eacb341560a | 278,633 |
def output_fn(prediction, accept='text/csv'):
"""Format prediction output.
Args:
prediction (pandas.core.frame.DataFrame): A DataFrame with predictions.
accept (str): A string expected to be 'text/csv'.
Returns:
df: str (in CSV format)
"""
df = prediction.to_csv(index=False, header=None)
return df | 438be66c8d897986ec117a9127aa080f31b6ce98 | 478,242 |
def type_name(obj):
"""Fetch the type name of an object.
This is a cosmetic shortcut. I find the normal method very ugly.
:param any obj: The object you want the type name of
:returns str: The type name
"""
return type(obj).__name__ | 080f4826de4af3c5f14e08973003416e37ae8dd0 | 676,214 |
def any(name, alternates):
"""Return a named group pattern matching list of alternates."""
return "(?P<%s>" % name + "|".join(alternates) + ")" | 147f0e7cc2efad00493ba983d0ef8766b24fb051 | 183,674 |
def get_temperature_number(temp_str):
"""
Given a temperature string of the form "48.8 °C", return the first number
(in this example, 48). Also handles strings of the form "48,8 °C" that
apparently can exist (at least when reading the response from a file)
:param temp_str: Temperature string
:return: Temperature number (in string form)
"""
if 'Â' in temp_str:
return temp_str[:-6]
else:
return temp_str[:-5] | 30b9985a1d2ff4471ad4286903b035f2d1c6d589 | 680,895 |
def lammps_copy_files(job):
"""Check if the submission scripts have been copied over for the job."""
return job.isfile("submit.pbs") | 9d801d379155f02b93389252fd3352712f55fb2c | 663,476 |
def _ascii_encode(ascii: str) -> bytes:
"""Encode string of ASCII charactors to bytes.
Args:
ascii (str): ASCII charactors
Returns:
str: Converted bytes
"""
return ascii.encode("ascii", "replace") | 8f02c632f72d2c715a1e6136edb832db77e961ee | 137,521 |
def base_info(name, player, occupation, age, gender, residence, birthplace):
"""Creates the basic info of the character.
Args:
name (str): The character's name.
player (str): The player's name.
occupation (str): The player's occupation.
age (int): The age of the character.
gender (string): The gender the character identifies as.
residence (string): The character's place of residence.
birthplace (string): The character's place of birth.
Return:
dict: The basic character information.
"""
return {
"base_info": {
"name": name,
"player": player,
"occupation": occupation,
"age": age,
"gender": gender,
"residence": residence,
"birthplace": birthplace,
}
} | 0a871629f6bbfff1167a3de4a5b103141ecfed0e | 344,089 |
import re
def get_protocol_and_host(url):
"""Validate a given url and extract the protocol and
host from given url.
If the url if invalid, a tuple of (None, None) is
returned to the caller.
param url: Input url to be parsed
returns: Tuple of protocol and host. So if input is
https://compute.jiocloud.com, the method returns
(https, compute.jiocloud.com)
"""
url_regex = re.compile('(http[s]?)://((?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
url_parts = url_regex.match(url)
if not url_parts:
return (None, None)
else:
return (url_parts.group(1), url_parts.group(2)) | 5a717812ac1d6cae1ecae6e2f7e1864d4793dd05 | 540,222 |
def isbit(integer, nth_bit):
"""Tests if nth bit (0,1,2..) is on for the input number.
Args:
integer (int): A number.
nth_bit (int): Investigated bit.
Returns:
bool: True if set, otherwise False.
Raises:
ValueError: negative bit as input.
Examples:
>>> isbit(4, 1)
False
>>> isbit(4, 2)
True
See also:
utils.setbit()
"""
if nth_bit < 0:
raise ValueError('Negative bit number.')
mask = 1 << nth_bit
return integer & mask > 0 | a94f6b81284ac484a8441f96ac8b374c2cb9b6ae | 644,964 |
def hash_code(key, HASH_SIZE):
"""
Return the hash value of given key and the size of hash table
:param key: given key
:type key: str
:param HASH_SIZE: size of hash table
:type HASH_SIZE: int
:return: hash value
:rtype: int
"""
n = len(key)
res = 0
for i in range(n):
# hash(abc) = (a * 33^2 + b * 33 + c) % M
# = (33(33(33 * 0 + a) + b) + c) % M
# = (33(33(33 * 0 + a) % M + b) % M + c) % M
res = 33 * res + ord(key[i])
res %= HASH_SIZE
return res | af890f853c4774551526518dcc0018fe46b1d266 | 53,969 |
def cell(content, colspan=None, css_class=None, cell_id=None):
""" Returns the given parameters as a well-formed HTML table cell tag.
content: (str) The cell's inner content. Ex: Hello World!
colspan: (int) HTML colspan tag content.
css_class : (str) HTML css class.
"""
cell_str = '<td'
if cell_id:
cell_str += ' id="' + cell_id + '"'
if colspan:
cell_str += ' colspan=' + str(colspan)
if css_class:
cell_str += ' class="' + css_class + '"'
cell_str += '>' + content + '</td>'
return cell_str | 48b55f0fab53c5a3579e6c3a495728a6ca662db1 | 437,964 |
def string_parse(input_str):
"""
Converts passed string, into *args, **kwargs:
Args:
input_str(str): input string in format -
"1, 2, 3, a\nvalue3=4, value1=arg1, value2=arg2"
Returns:
tuple(*args, **kwargs): parsed args, and kwargs values
"""
args_str, kwargs_str = input_str.split('\n')
args_raw = args_str.split(',')
kwargs_raw = kwargs_str.split(', ')
args = [item.strip() for item in args_raw]
kwargs = dict((item.split('=') for item in kwargs_raw))
return args, kwargs | 3c1dc3d81c0539019c6095c00741f0daf0f5c188 | 76,360 |
def ekstraksi_aman(sup):
"""Mengekstraksi sup dan mengembalikan .text.strip()-nya secara aman."""
if sup:
return sup.extract().text.strip()
return "" | f4572519454b508f23730b6734c5a16164e9720f | 185,545 |
def annotation_filter(annotations, condition):
"""
Filter annotations.
`annotations`: the annotations to filter
`condition`: the filter callback
return: the filtered annotations
"""
filtered = dict()
for (key, value) in annotations.items():
if condition(key, value):
filtered[key] = value
return filtered | 517e19ab9888964d77326a8bec8f4d469e22bc10 | 651,272 |
from typing import Dict
from typing import List
from typing import Union
def decode_json_table(json: Dict[str, List[Union[str, List[str]]]]) -> List[dict]:
"""Transforms a 'JSON-table' (of the format below) into a List[dict], with each dict containing
the data for a single object.
Args:
json (dict): A dictionary containing the json-table. Must be of the form:
{
'header': List[str],
'rows': List[List[str]]
}
Note:
See (https://github.com/servalproject/pyserval-dna/blob/development/doc/REST-API.md#json-table)
for detailed description
Returns:
List[dict]: List of dictionaries containing separate JSON-objects
"""
data = []
for row in json["rows"]:
# transform row of table
# into dictionary for single object
data.append(dict(zip(json["header"], row)))
return data | 21377ee8c81759148f5cb1593d6011ecfe9f1f3b | 451,253 |
import pickle
def get_earlier_cpds(month):
""" Finds all compounds which were inputted into SureChemBL prior to or equal
to a given month
Args:
month (string): Month, in the form YYYY-MM
Returns:
pandas dataframe: dataframe containing SureChemBL patent id, month of
first entry, and igraph index
"""
#Read in master compound-date-index dataframe
agave_fp = "Data/Cpd_Data/master_cpd_date_index_df.p"
#drive_fp = "G:/Shared drives/SureChemBL_Patents/Cpd_Data/master_cpd_date_index_df.p"
df = pickle.load(file=open(agave_fp, "rb"))
# #Small dataframe analysis
# check_indicies(df)
return df[df["Month"] <= month] | b6f7c976d523f3c308eb647bb31851b99a8b7856 | 33,341 |
def get_resolution(cv2_image):
"""takes a cv2 image and returns its resolution: height, width"""
height, width, _ = cv2_image.shape
return height, width | 723536a820680d046a5e35e0e043a9565d5d979e | 221,152 |
def map_ores_code_to_int(code):
"""
Takes a 1-2 letter code from OREs and turns in into an int
ORES Score map
Stub - 0
Start - 1
C - 2
B - 3
GA - 4
FA - 5
"""
return {
'Stub': 0,
'Start': 1,
'C': 2,
'B': 3,
'GA': 4,
'FA': 5,
}[code] | 5730b720ebab91db0d9a5708b458404c233d6b94 | 63,810 |
def center_bin(bins):
"""Shift a bin by half a step and remove the last item.
This function can be used to prepare a suitable x axis for plotting.
"""
return 0.5*(bins[1] - bins[0]) + bins[:-1] | 6d60a78c7b03f93f68c8005d226f5f4f098607ed | 82,565 |
def append_terminal_token(dataset, terminal_token='<e>'):
"""Appends end-of-sequence token to each sequence.
:param dataset - a 2-d array, contains sequences of tokens.
:param terminal_token (Optional) which symbol to append.
"""
return [sample + [terminal_token] for sample in dataset] | b6f83a0663f1690715f686e4a1115d4d3e8535ba | 357,601 |
def lerp(a, b, i):
"""Linearly interpolates from a to b
Args:
a: (float) The value to interpolate from
b: (float) The value to interpolate to
i: (float) Interpolation factor
Returns:
float: Interpolation result
"""
return a + (b-a)*i | 8422222416668a70feb2a75790a25d7ab1f0af14 | 19,512 |
def merge_sort(collection):
"""
Counts the number of comparisons (between two elements) and swaps
between elements while performing a merge sort
:param collection: a collection of comparable elements
:return: total number of comparisons and swaps, and the sorted list
"""
comparisons, swaps = 0, 0
mid = len(collection) // 2
left, right = collection[:mid], collection[mid:]
if len(left) > 1:
left_result = merge_sort(left)
comparisons += left_result[0]
swaps += left_result[1]
left = left_result[2]
if len(right) > 1:
right_result = merge_sort(right)
comparisons += right_result[0]
swaps += right_result[1]
right = right_result[2]
sorted_collection = list()
# while elements in both sub lists
while left and right:
if left[0] <= right[0]:
sorted_collection.append(left.pop(0))
else:
sorted_collection.append(right.pop(0))
comparisons, swaps = comparisons + 1, swaps + 1
# if elements are left in only one sublist, merge them (guaranteed sorted)
# this does not count as swapping as it is just a copy operation
# no need for swap += len(left or right)
sorted_collection += (left or right)
return comparisons, swaps, sorted_collection | 1886106c30b2aa44bac1a3a63b13e3029a65c48c | 495,601 |
def mockup_return(*args, **kwargs):
"""Mockup to replace regular functions for error injection."""
return False | 92172e58a11e48a09c8f181ac55aa717b5fbb94d | 703,312 |
import socket
def connect(host, port):
"""
create connection
:param host: server address
:param port: server port
:return: socket fd
"""
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_sock.connect((host, port))
return _sock | 2f570a59bde9e694fd8110e8dead18220a26688d | 405,285 |
import re
def hexclean(dirtyhex):
"""Remove whitespace, comments & newlines from hex string"""
return re.sub(r'\s', '', re.sub(r'#.*\n', '\n', dirtyhex)) | 470bc79fc742c4a5e4df5d8ca725530cd617ea91 | 454,755 |
def serialize_quantity(o):
"""
Serializes an :obj:`astropy.units.Quantity`, for JSONification.
Args:
o (:obj:`astropy.units.Quantity`): :obj:`Quantity` to be serialized.
Returns:
A dictionary that can be passed to :obj:`json.dumps`.
"""
return dict(
_type='astropy.units.Quantity',
value=o.value,
unit=o.unit.to_string()) | 47f52de2840ccfddbb1d9105ee254641443e8413 | 234,425 |
import token
import requests
def get_json_data(url):
"""
fetch dones from the iDoneThis api, return list of dones from the json response
"""
headers = {'content-type': 'application/json', 'authorization': 'token %s' % (token)}
r = requests.get(url, headers=headers)
data = r.json()
dones = data['results']
return dones | 57b1be965a6779f697765294cc908b0ba6aec795 | 178,810 |
def audio_acquisition(r, mic):
"""Return an input audio from a microphone.
Args:
r: Recognizer instance.
mic: Microphone instance.
Returns:
audio: Audio clip in the SpeechRecognition format.
"""
print("Say something!")
with mic as source:
audio = r.listen(source)
return audio | 58e11c0578c05e377f01e705eb0083efc8030b90 | 573,545 |
def parse_encoding_header(header):
"""
Parse the HTTP Accept-Encoding header into a dict of the form,
{encoding: qvalue}.
>>> parse_encoding_header('') == {'': 1, 'identity': 1.0}
True
>>> parse_encoding_header('*') == {'*': 1, 'identity': 1.0}
True
>>> expected = {'identity': 1.0, 'gzip': 1.0, 'compress': 0.5}
>>> parse_encoding_header('compress;q=0.5, gzip;q=1.0') == expected
True
>>> expected = {'*': 0.0, 'gzip': 1.0, 'identity': 0.5}
>>> parse_encoding_header('gzip;q=1.0, identity; q=0.5, *;q=0') == expected
True
"""
encodings = {'identity': 1.0}
for encoding in header.split(","):
encoding, sep, params = encoding.partition(';')
encoding = encoding.strip()
key, sep, qvalue = params.partition('=')
encodings[encoding] = float(qvalue or 1)
return encodings | 3d2d2e4018ac06b0fedc57cbbddb8d9e1bf95d49 | 77,064 |
def create_poll_options() -> list[str]:
"""Create options for poll"""
options = [
"18.00",
"19.00",
"20.00",
"21.00",
"No-op",
"Maybe Baby <3",
]
return options | 8300184caf855074b661b5e7f47ca5242efa951f | 603,982 |
def make_bigrams(bigram_mod, texts):
"""Form bigrams in text
Args:
bigram_mod (gensim.models.phrases.Phraser): Gensim bigram model
texts (list): 2-dimensional list of words
Returns:
(list): bigrams formed from texts
"""
return [bigram_mod[doc] for doc in texts] | 8a1aafb0c51852ee98432012743aa21494fb86c0 | 57,018 |
import re
def clean_text(text):
"""
Removes apostrophes, non-alphabet letters, newlines and sets
everything to lowercase
Parameters
----------
text (string):
Input text string
Returns
-------
text (string):
Cleaned text string
"""
# Remove apostrophes
text = re.sub("\'", "", text)
# Leave only alphabet letters
text = re.sub("[^a-zA-Z]"," ", text)
# Remove new-lines
text = re.sub("\\n", "", text)
# Convert to lower-case
text = text.lower()
return text | 8778be6a94e875e85767a78d15be058edd037282 | 516,537 |
def active_queues(state):
"""Return information about the queues a worker consumes from."""
return [dict(queue.as_dict(recurse=True))
for queue in state.consumer.task_consumer.queues] | 27fe834c6812010980934ec85abf7c4e2efb393b | 194,086 |
def srf_dxy(srf):
"""
Retrieve SRF dx and dy.
Assumes all planes have same dx, dy.
srf: SRF file path to read from
"""
with open(srf, "r") as sf:
# version
sf.readline()
# planes definition
sf.readline()
# first plane
elon, elat, nstk, ndip, length, width = sf.readline().split()
return (
float("%.2f" % (float(length) / int(nstk))),
float("%.2f" % (float(width) / int(ndip))),
) | 3ad653a15cd4460262cfb3d8f5203d070401c01f | 584,806 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.