content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
from typing import Callable
import math
def binom_tree(s: float,
k: float,
r: float,
sig: float,
t: float,
n: int,
iv: Callable[[float, float], float],
fv: Callable) -> float:
"""
Price of an option from a binomial tree.
The tree is represented as dictionaries:
- the keys (i, u, d) denote the step (from i=0, present to i=n at time t in equal increments) and the number of
times there has been a move up (u) and down (d). While this is overspecified (i=u+d) this should make a subsequent
implementation of trinomial trees easier.
- the Values of the dictionary f holds the value of the option
- the values of the dictionary p holds the probability of being in a node (probabilities sum to 1 within a time
index i)
The values of the underlying at each node are computed at decision time based on the keys.
Source: Chapter 13
:param s: Price of the underlying at time 0
:param k: Strike
:param r: Risk free rate
:param sig: volatility (non-negative)
:param t: Time to maturity
:param n: number of steps
:param iv: fucntion(s, k) -> float yielding the intrinsic value of the option given underlying s and strike k
:param fv: function (*, i, pu, i_u, pd, i_d, f, iv) -> float yielding value of the option given
i: time index i
n: number of time steps
t: total time at i=n
r: risk free rate
pu: (risk neutral) probability of up move
i_u: # of up moves at the node
pd: (risk neutral) probability of down move
i_d: # of down moves at node
f: dictionary with option values
iv: intrinsic value function
and downs at the node, value dictionary f and intrinsiv value function iv.
Function is allowed to depend on only a subset, but in that case must accept **kwargs
:return: option price
"""
if sig == 0:
return iv(s, k)
f = dict() # value 'tree'"
u = math.exp(sig * math.sqrt(t / n)) # up factor
d = math.exp(-sig * math.sqrt(t / n)) # down factor
pu = (math.exp(r * t / n) - d) / (u - d) # probability of up move
pd = 1 - pu # probability of down move
for i_u in range(n+1): # intrinsic value at T
f[(n, i_u, n - i_u)] = iv(s * u ** i_u * d ** (n - i_u), k)
for i in range(n-1, -1, -1): # update backward in time
for i_u in range(i+1):
f[(i, i_u, i - i_u)] = fv(s0=s, i=i, n=n, t=t, r=r, u=u, pu=pu, i_u=i_u, d=d, pd=pd,
i_d=i - i_u, f=f, iv=iv, k=k) # update
return f[(0, 0, 0)] | 93644712d6284db34b38c24ef1e23e347f346096 | 549,739 |
import functools
def rgetattr(obj, attr):
"""
>>> from types import SimpleNamespace
>>> args = SimpleNamespace(a=1, b=SimpleNamespace(c=2, d='e'))
>>> rgetattr(args, "a")
1
>>> rgetattr(args, "b.c")
2
"""
return functools.reduce(getattr, [obj] + attr.split('.')) | a3994923a6027c7c62cbe05e673a76de7988d76a | 401,674 |
def load_plain_file(file_path):
"""
Reads a path and returns the string information
Arguments:
- file_path (str): path to the file
Returns:
- text (str): the information to be retrieved
"""
with open(file_path, "r") as fp:
info = fp.read()
return info | d205b139cc7c2078e91fc133a87b3959aeb422e1 | 434,483 |
def _vpos(da):
"""
vPOS = Value at peak of season
"""
return da.max("time") | b245bbdb0eb8879301dc8c9e4e16a21a1a44d7e0 | 441,874 |
def parsehmmoutput(hmmresult):
"""parses the hmm output for housekeeping gene locations
parameters
----------
hmmresult
filename of parsable hmmsearch output
returns
----------
genelocs = dict, {gene:location}
"""
genelocs = {}
with open(hmmresult, "r") as f:
for line in f:
line = line.strip()
if not line.startswith("#"):
els = line.split()
loc = els[0]
gene = els[2]
if not gene in genelocs:
genelocs[gene] = loc
return (genelocs) | 2b87c097db6706cb543cd0dbeecd421750f5da7a | 357,115 |
def get_mapping(series):
"""
Map a series of n elements to 0, ..., n.
Parameters
----------
series : Pandas series (e.g. user-ids)
Returns
-------
mapping : Dict[]
"""
mapping = {}
i = 0
for element in series:
if element not in mapping:
mapping[element] = i
i += 1
return mapping | 452542dee5de04f6e50cc36ee6878dcd7c067bc6 | 201,767 |
def layer_size(X, Y):
"""
Get number of input and output size, and set hidden layer size
:param X: input dataset's shape(m, 784)
:param Y: input labels's shape(m,1)
:return:
n_x -- the size of the input layer
n_h -- the size of the hidden layer
n_y -- the size of the output layer
"""
n_x = X.T.shape[0]
n_h = 10
n_y = Y.T.shape[0]
return n_x, n_h, n_y | 81add6bf528cfe872e62f622161bb25fb7dcb1d3 | 48,800 |
def _parse_volumes_kwarg(cli_style_volumes):
"""
The Python API for mounting volumes is tedious.
https://github.com/docker/docker-py/blob/master/docs/volumes.md
This makes it work like the CLI
"""
binds = {}
volumes = []
for volume in cli_style_volumes:
split = volume.split(':')
if len(split) == 1:
volume.append(split[0])
continue
host_path = split[0]
mountpoint = split[1]
if len(split) == 3:
read_only = split[2] == 'ro'
else:
read_only = False
volumes.append(mountpoint)
binds[host_path] = {'bind': mountpoint, 'ro': read_only}
return volumes, binds | 4c8d083ec2be02616491af42c725e2c5c14d419f | 280,810 |
import base64
def base64_encode(data: bytes) -> bytes:
"""Does a URL-safe base64 encoding without padding."""
return base64.urlsafe_b64encode(data).rstrip(b'=') | 341a6382a655ceb007bdba71e743d6c8656d4f7f | 578,773 |
def dict_to_list(d):
"""Converts an ordered dict into a list."""
# make sure it's a dict, that way dict_to_list can be used as an
# array_hook.
d = dict(d)
return [x[-1] for x in sorted(d.items())] | 6a56f890d3a5e6e9cb8a19fc5af8598bf3411d33 | 690,124 |
import re
def _remove_emoticons(tweet):
"""finds all emoticons, removes them from the
tweet, and then returns the tweet with emoticons
removed as well as a list of emoticons
Parameters:
-----------
tweet: str
contents of a tweet
Returns
-------
tweet_no_emoticons:
string of tweet with emoticons removed
emoticons:
list of emoticons
"""
emoticons_re = r'(?:[:;])(?:[-<])?(?:[()/\\|<>])'
emoticons = re.findall(emoticons_re, tweet)
tweet = re.sub(emoticons_re, '', tweet)
return tweet.strip(), emoticons | bbe7e1abed0228ccfd4aef6f191662a9a674a6ce | 64,629 |
def _color_strip(color):
""" 去除字符串中的多余空格
Parameters
----------
color : str
Returns
-------
str
返回去除了空格的颜色字符串
"""
return color.strip().replace(' ', '') | 8b7f815c64d1a9bf57cbd76db260e64f450783de | 71,093 |
import inspect
def form_of(state):
"""Return the form of the given state."""
if hasattr(state, "__form__"):
if callable(state.__form__) and not inspect.isclass(state.__form__):
return state.__form__()
else:
return state.__form__
else:
raise ValueError(f"{state} has no form") | e39aa7db7b324ab38b65232b34b987b862812c54 | 709,484 |
def extract_crs(da):
"""
Takes an xarray Dataset pulled from opendatacube and extracts crs metadata
if exists. Returns None if not found.
Parameters
----------
da: xarray Dataset
A single- or multi-dimensional array containing (or not) crs metadata.
Returns
-------
crs: str
A crs object.
"""
# notify user
print('Beginning extraction of CRS metadata.')
try:
# notify user
print('> Extracting CRS metadata.')
# extract crs metadata
crs = da.geobox.crs
# notify user
print('> Success!\n')
except:
# notify user
print('> No CRS metadata found. Returning None.\n')
crs = None
return crs | d6c7432663d8606f9ff9346a9eeb82ea40b84ab6 | 429,675 |
def _validate_float(value):
"""Validate value is a float."""
try:
value = float(value)
except ValueError as err:
raise ValueError("Could not convert to float") from err
return value | d22281d79c440ba9c28ff79cbd3321dddd77831a | 381,274 |
import math
def sum_log_scores(s1: float, s2: float) -> float:
"""Sum log odds in a numerically stable way."""
# this is slightly faster than using max
if s1 >= s2:
log_sum = s1 + math.log(1 + math.exp(s2 - s1))
else:
log_sum = s2 + math.log(1 + math.exp(s1 - s2))
return log_sum | 7e41d5e0838a3bf78b0dd7a5c2b01ddcd1216247 | 659,869 |
import torch
def reverse_padded_sequence(inputs, lengths, batch_first=False):
"""Reverses sequences according to their lengths.
Inputs should have size ``T x B x *`` if ``batch_first`` is False, or
``B x T x *`` if True. T is the length of the longest sequence (or larger),
B is the batch size, and * is any number of dimensions (including 0).
Arguments:
inputs (Variable): padded batch of variable length sequences.
lengths (list[int]): list of sequence lengths
batch_first (bool, optional): if True, inputs should be B x T x *.
Returns:
A Variable with the same size as inputs, but with each sequence
reversed according to its length.
"""
if batch_first:
inputs = inputs.transpose(0, 1)
max_length, batch_size = inputs.size(0), inputs.size(1)
if len(lengths) != batch_size:
raise ValueError('inputs is incompatible with lengths.')
ind = [list(reversed(range(0, length))) + list(range(length, max_length))
for length in lengths]
ind = torch.LongTensor(ind).transpose(0, 1)
for dim in range(2, inputs.dim()):
ind = ind.unsqueeze(dim)
ind = ind.expand_as(inputs)
if inputs.is_cuda:
ind = ind.cuda(inputs.get_device())
reversed_inputs = torch.gather(inputs, 0, ind)
if batch_first:
reversed_inputs = reversed_inputs.transpose(0, 1)
return reversed_inputs | 2794e1effb4227e509d97aa942a87f329a61b7b8 | 51,953 |
def get_first_and_last(input_string):
"""Get the first 10, and last 10 characters,
and returns them as a tuple
Args:
input_string (str): string that will be used as source
Returns:
(str, str): tuple of first and last 10 chars in specified string
"""
return input_string[0: 10], input_string[-10:] | 895859f64b394b0f778ccb6c33ffdc731ce299ca | 510,207 |
def clean_and_split_input(input):
""" Removes carriage return and line feed characters and splits input on a single whitespace. """
input = input.strip()
input = input.split(' ')
return input | 232f254db9083c23f49eed9305122f1cf9f5b936 | 212,558 |
def is_scalar(obj):
"""
Return True if the input is a scalar
"""
return obj.dims == [] | 6c1fae8c98758ed9f4da6140afbdef60b10d5cd0 | 534,635 |
def flatten(list_of_lists):
"""Flatten a given list of lists
Args:
list_of_lists (list): Given list of lists
Returns:
list: Flattened list
"""
return [x for each_list in list_of_lists for x in each_list] | fa1738d9ac2d9ec763a5461d7373a81fdef18b6a | 214,701 |
import re
from typing import List
def gen_flags_repr(flags: re.RegexFlag) -> str:
"""通过 RegexFlag 生成对应的字符串
Args:
flags (re.RegexFlag): 正则表达式的标记
Returns:
str: 对应的标记字符串
"""
flags_list: List[str] = []
if re.ASCII in flags:
flags_list.append("a")
if re.IGNORECASE in flags:
flags_list.append("i")
if re.LOCALE in flags:
flags_list.append("L")
if re.MULTILINE in flags:
flags_list.append("m")
if re.DOTALL in flags:
flags_list.append("s")
if re.UNICODE in flags:
flags_list.append("u")
if re.VERBOSE in flags:
flags_list.append("x")
return "".join(flags_list) | 60f5b6be4c46c04d4ed3a445ddec597505e50f54 | 549,046 |
def field_size(field_label):
"""
Helper function to determine the size of a binary
table field
Parameters
----------
field_label : PVLModule
The field label
Returns
-------
int :
The size of the one entry in bytes
"""
data_sizes = {
'Integer' : 4,
'Double' : 8,
'Real' : 4,
'Text' : 1
}
return data_sizes[field_label['Type']] * field_label['Size'] | afaff70bc18d4d9d023fb7c89d45560f1a691bcf | 699,111 |
def __date_to_iso8601(date):
"""Convert date to ISO-8601 format
"""
return '{year}-{month:02d}-{day:02d}T{hour:02d}:{minute:02d}:{second:02d}'.format(
year=date.year,
month=date.month,
day=date.day,
hour=date.hour,
minute=date.minute,
second=date.second) | c6e3f8bfd630f1d04656015013c32f3cdcf9dc1f | 474,510 |
def compute_gain(data,pol):
""" Compute the gain and apeture efficiency from the data.
Parameters
----------
data : heterogeneous record array containing 'calc_beam_height' and 'flux' records
Return
------
gain : The gains
"""
gain = data['beam_height_' + pol] / data['flux']
return gain | 740a90593f2d3e46d86ce9b9b4e9b9f92956e152 | 366,436 |
def dict2list(dict_: dict) -> list:
"""
Converts a dict into a list of key-value dictionaries and returns it.
Parameters
----------
dict_
Some dictionary to be converted.
Returns
-------
list_
Each element is a dict with one key-value pair. These key-value pairs
are those contained in dict_.
"""
if type(dict_) != dict:
raise TypeError(
f"Input argument must be of type 'dict', found '{type(dict_)}'."
)
list_ = []
for key, value in dict_.items():
list_.append({key: value})
return list_ | 16a066e41fe2dbb6dcc23e8d3e0b88da34aad84e | 331,744 |
import torch
def identity_grid(shape, dtype=None, device=None, jitter=False):
"""Returns an identity deformation field.
Parameters
----------
shape : (dim,) sequence of int
Spatial dimension of the field.
dtype : torch.dtype, default=`get_default_dtype()`
Data type.
device torch.device, optional
Device.
jitter : bool or 'reproducible', default=False
Jitter identity grid.
Returns
-------
grid : (*shape, dim) tensor
Transformation field
"""
mesh1d = [torch.arange(float(s), dtype=dtype, device=device)
for s in shape]
grid = torch.meshgrid(*mesh1d)
grid = torch.stack(grid, dim=-1)
if jitter:
reproducible = jitter == 'reproducible'
device_ids = [grid.device.index] if grid.device.type == 'cuda' else None
with torch.random.fork_rng(device_ids, enabled=reproducible):
if reproducible:
torch.manual_seed(0)
jitter = torch.rand_like(grid).sub_(0.5).mul_(0.1)
grid += jitter
return grid | 6c70d5f04bc15555d9c5369594acb9e1c50c1e56 | 381,320 |
def sdss2decam(g_sdss, r_sdss, i_sdss, z_sdss):
"""
Converts SDSS magnitudes to DECam magnitudes
Args:
[griz]_sdss: SDSS magnitudes (float or arrays of floats)
Returns:
g_decam, r_decam, z_decam
Note: SDSS griz are inputs, but only grz (no i) are output
"""
gr = g_sdss - r_sdss
ri = r_sdss - i_sdss
iz = i_sdss - z_sdss
# - DESI-1788v1 equations 4-6
g_decals = g_sdss + 0.01684 - 0.11169*gr
r_decals = r_sdss - 0.03587 - 0.14114*ri
z_decals = z_sdss - 0.00756 - 0.07692*iz
return g_decals, r_decals, z_decals | a0869eeb138c00eb30476dd34883eabe77066129 | 71,954 |
def get_options(num):
"""Convert bitmasked int options into a dict."""
return {
'require_id': bool(num & 1),
'register_id': bool(num & (1 << 1)),
} | 9141db068aa3e9ab909f37a99f851182ce69a1da | 146,953 |
def read_cfg_intrinsics(cfg, section, key, default):
"""
Read intrinsics from a config file
Args:
cfg: config file
section: [section] of the config file
key: key to be read
default: value if couldn't be read
Returns: resulting intrinsic dict
"""
if cfg.has_option(section, key):
str = cfg.get(section, key).split(',')
intinsics = {
"x0": 320,
"y0": 240,
"fx": float(str[0]),
"fy": float(str[2])
}
return intinsics
else:
return default | 6f419136e7851685308a1d43ce9585596ca7c802 | 405,768 |
def tile_to_quadkey(tile_x, tile_y, level):
"""This is a function that converts tile coordinates at a certain
level of detail of a Bing Map to a unique string identifier (QuadKey).
:param tile_x: The x axis coordinate of the tile at `level` level of detail
:param tile_y: The y axis coordinate of the tile at `level` level of detail
:param level: The level of detail of the Bing Map
:return: A `quadkey` string of length given by the `level` level of detail
"""
ql = []
for i in range(level, 0, -1):
digit = ord('0')
mask = 1 << (i-1)
if ((tile_x & mask) != 0):
digit+=1
if ((tile_y & mask) != 0):
digit+=2
ql.append(chr(digit))
quadkey = ''.join(ql)
return quadkey | 6416511a03e4bb65eef969fa223aacbb18b8ee32 | 624,657 |
def _is_fedora(distname):
"""detect Fedora-based distro (e.g Fedora, CentOS, RHEL)"""
distname = distname.lower()
for x in ["fedora", "centos", "red hat"]:
if x in distname:
return True
return False | 273407ec9eccd3ee5e5e209e3a9c6fe54187b188 | 260,837 |
def get_all_headers(rows_list):
"""Utility to get all the keys for a list of dicts"""
headers = []
for row_dict in rows_list:
[headers.append(h) for h in list(row_dict.keys()) if h not in headers]
return headers | 36d4f6e2804535c76ce087f69c17a916a467ae8a | 186,290 |
import math
def get_angle(x, y):
"""Get angle based on the unit circle.
Parameters
----------
x : float
x-value
y : float
y-value
Return: float
angle
"""
if x == 0:
if y > 0:
return 90
else:
return 270
elif x < 0:
return math.degrees(math.atan(y / x)) + 180
return math.degrees(math.atan(y / x)) | bf902232a1ae0aa5afff1b8ebfc27f744cdd0d3b | 242,774 |
def FindDocks(docks, dock_direction, dock_layer=-1, dock_row=-1, reverse=False):
"""
This is an internal function that returns a list of docks which meet
the specified conditions in the parameters and returns a sorted array
(sorted by layer and then row).
:param `docks`: a list of L{AuiDockInfo};
:param `dock_direction`: the L{AuiDockInfo} docking direction to analyze;
:param `dock_layer`: the L{AuiDockInfo} layer to analyze;
:param `dock_row`: the L{AuiDockInfo} row to analyze;
"""
matchDocks = [(d.dock_layer, d.dock_row, d.dock_direction, d) for d in docks if \
(dock_direction == -1 or dock_direction == d.dock_direction) and \
((dock_layer == -1 or dock_layer == d.dock_layer) and \
(dock_row == -1 or dock_row == d.dock_row))]
arr = [x[-1] for x in sorted(matchDocks, reverse=reverse)]
return arr | 188d9afacb05914247f4e47e835315a5a38b51df | 317,174 |
import click
def extended_help_option(extended_help=None, *param_decls, **attrs):
"""
Based on the click.help_option code.
Adds a ``--extended-help`` option which immediately ends the program
printing out the extended extended-help page. Defaults to using the
callback's doc string, but can be given an explicit value as well.
This is intended for use as a decorator on a command to provide a 3rd level
of help verbosity suitable for use as a manpage (though not formatted as such explicitly).
Like :func:`version_option`, this is implemented as eager option that
prints in the callback and exits.
All arguments are forwarded to :func:`option`.
"""
def decorator(f):
def callback(ctx, param, value):
if value and not ctx.resilient_parsing:
if not extended_help:
ctx.command.help = ctx.command.callback.__doc__
click.echo(ctx.get_help(), color=ctx.color)
else:
ctx.command.help = extended_help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
attrs.setdefault('is_flag', True)
attrs.setdefault('expose_value', False)
attrs.setdefault('help', 'Show extended help content, similar to manpage, and exit.')
attrs.setdefault('is_eager', True)
attrs['callback'] = callback
return click.option(*(param_decls or ('--extended-help',)), **attrs)(f)
return decorator | b7c4f623c252baf17cdc7daf645a15ec67451cde | 522,298 |
import typing
def is_basic_iterable(obj: typing.Any) -> bool:
"""Checks if an object is a basic iterable.
By basic iterable we want to mean objects that are iterable and from a
basic type.
:param obj: Object to be analysed.
:return: True if obj is a basic iterable (see list below). False otherwise.
"""
return isinstance(obj, (list, tuple, bytearray, set, frozenset)) | 48a87ea430021fe7086ba9c17d973da1ef5f7fdd | 645,138 |
def calc_nchk(n,k):
"""
Calculate n choose k
"""
accum = 1
for i in range(1,k+1):
accum = accum * (n-k+i)/i
return int(accum) | 3b176bd05147b0f9bc549c885fed303faeb97bbe | 307,528 |
def translate(s: str, src: str, dest: str) -> str:
"""
Converts characters from `s` that appear in `src` with the
characters at corresponding positions in `dest`
"""
if len(src) != len(dest):
raise RuntimeError("impossible error")
for a, b in zip(src, dest):
s = s.replace(a, b)
return s | 52695fd7a56f4f2ba0a91f8f64683e6bda3d0b1a | 79,436 |
def contain_disambig_symbol(phones):
"""Return true if the phone sequence contains disambiguation symbol.
Return false otherwise. Disambiguation symbol is at the end of phones
in the form of #1, #2... There is at most one disambiguation
symbol for each phone sequence"""
return True if phones[-1].startswith("#") else False | 807f81b999632164de13fdf2c0d182e2abb73f44 | 196,270 |
import json
def context_from_json_file(file_name):
"""
Load a json file into a dictionary.
Parameters
----------
file_name: str
Path to the json file.
Returns
dict
Content of the file as dictionary.
"""
with open(file_name, encoding='utf-8') as json_file:
data = json.load(json_file)
return data | 47968c0eb0f267421006ddaebf15c8a4ef643c25 | 589,841 |
def is_odd(n):
"""
Determine whether n is odd.
:parameter n: int, n > 0
:return: bool, if it's odd
"""
return n % 2 != 0 | b254da356b3aa6b87e3a546de2448b26d42a8636 | 266,501 |
def convert_epa_unit(df, obscolumn="SO2", unit="UG/M3"):
"""
converts ppb to ug/m3 for SO2 in aqs and airnow datasets
See 40 CFR Part 50.5, Appendix A-1 to part 50, appendix A=2 to Part 50.
to convert from ppb to ug/m3 multiply by 2.6178.
Also will convert from ug/m3 to ppb.
Parameters
----------
df : pandas dataframe
self.df attribute from aqs or airnow class.
obscolumn : string
name of column with SO2 data in it.
unit : string
either 'UG/M3' or 'PPB' (not case sensitive)
will convert data to this unit.
inplace : boolean
if TRUE then changes self.df attribute
Returns
-------
df : pandas dataframe
returns dataframe identical to original but with data converted to new
unit.
"""
factor = 2.6178
ppb = "ppb"
ugm3 = "ug/m3"
if unit.lower() == ugm3:
df = df[df["units"] == ppb] # find columns with units of 'ppb'
df["units"] = unit.upper()
df[obscolumn] = df[obscolumn] * factor
elif unit.lower() == ppb:
df = df[df["units"] == ugm3] # find columns with units of 'ppb'
df[obscolumn] = df[obscolumn] / factor
return df | 4af9811c3ae465904b3320cc6d5dd0e29f1ff598 | 69,221 |
from pathlib import Path
def get_raw_svg(path: str) -> str:
"""Get and cache SVG XML."""
return Path(path).read_text() | 60f85b0ff8f1d4478b7093acddef5255230197f9 | 349,653 |
def read_counts(counts_filename):
"""Reads counts file into dict of counts"""
with open(counts_filename) as f:
lines = f.read().splitlines()
lines.pop(0)
lines_proc = dict( (line.split()[1] , float(line.split()[8])) for line in lines if '*' not in line.split()[1] )
return lines_proc | f7b0ae671dd129470d3d8bd1ee44543519adde25 | 521,951 |
import re
def _include_exclude(
dictionary: dict,
include_pattern: str,
exclude_pattern: str,
) -> bool:
"""
Filters the items of a dictionary based on a include / exclude regexp pair.
Returns `True` if the size of the dictionary changed.
"""
incl, excl = re.compile(include_pattern), re.compile(exclude_pattern)
keys = list(dictionary.keys())
for k in keys:
if excl.match(k) or not incl.match(k):
del dictionary[k]
return len(dictionary) != len(keys) | c7b06f84463c7d003a6516d5fc943da12ac4968e | 680,224 |
import random
import math
def get_random_percents(number):
"""
retruns a list of n random percents that will sum to 100
"""
tanks = []
total = 0
for _ in range(number):
num = random.randint(1, 10)
total += num
tanks.append(num)
percents = []
for tank in tanks:
percents.append(math.floor(100 * tank / total))
dif = 100 - sum(percents)
if dif != 0:
percents[random.randint(0, len(percents)-1)] += dif
return percents | 1a8675448a7be1397c3993581feaf415b380604f | 291,092 |
def get_bbox(x_start, y_start, x_end, y_end):
"""
This method returns the bounding box of a face.
Parameters:
-------------
x_start: the x value of top-left corner of bounding box
y_start: the y value of top-left corner of bounding box
width : the x value of bottom-right corner of bounding box
height: the y value of bottom-right corner of bounding box
returns:
--------------
[x1, y1, x2, y2, x3, y3, x4, y4]
the list of x and y values starting from the top-left corner and going clock, or counter-clock wise
"""
x1 = x_start
y1 = y_start
x2 = x_end
y2 = y_start
x3 = x_end
y3 = y_end
x4 = x_start
y4 = y_end
return [x1, y1, x2, y2, x3, y3, x4, y4] | 79e440d4875f1e32d5f678d6715777ab525f4c69 | 46,572 |
def get_action_value(mdp, state_values, state, action, gamma):
""" Computes Q(s,a) as in formula above """
result = 0
for to_state in mdp.get_all_states():
transition_probability = mdp.get_transition_prob(state, action, to_state)
reward = mdp.get_reward(state, action, to_state)
result += transition_probability * (reward + gamma * state_values[to_state])
return result | 226d8e01054552ae1108d3d83e0e438ddc821df9 | 28,702 |
def _get_required_data(description=False):
"""
Provides a list of the required inputs for all possible ensembles.
Parameters
----------
description : bool, default = False.
If True, it prints the descriptions of the input_variables (i.e. dict),
If False, it only prints the input_variables without the descriptions (i.e. list)
Returns
---------
required_data : dict or list, default = list.
If the description = True then a dict is provided with the key and value.
if the description = False then a list of the dict keys is provided.
Note:
Variables and text extracted with permission from the GOMC manual version 2.60.
Some of the text was modified from its original version.
Cite: Potoff, Jeffrey; Schwiebert, Loren; et al. GOMC Documentation.
https://raw.githubusercontent.com/GOMC-WSU/GOMC/master/GOMC_Manual.pdf, 2021.
"""
required_data = {
"charmm_object": "Charmm object, "
"A Charmm object, which by definition has been parameterized "
"from the selected force field.",
"ensemble_type": "Required files or System Info (all ensembles): str, "
"(valid strings are 'NVT', 'NPT', 'GEMC_NPT', 'GCMC-NVT', or 'GCMC'), "
"the ensemble type for the simulation.",
"RunSteps": "Required files or System Info (all ensembles): int (> 0), "
"The number or run steps for the simulation.",
"Temperature": "Required files or System Info (all ensembles): float or integer (> 0), "
"Temperature of system in Kelvin (K)",
}
if description:
return required_data
else:
return list(required_data.keys()) | 0a37e425e1b9efc426c53a19522b2d0e9fc2edc1 | 155,079 |
def dim_col(d: int) -> str:
"""
Name of an dimension columns.
Parameters
----------
d
Dimension number.
Returns
-------
name: str
Dimension name.
Example
-------
>>> from rle_array.testing import dim_col
>>> dim_col(1)
'dim_1'
"""
return f"dim_{d}" | e4f76bdba041535993aaea1ba613f1596d8e32e9 | 652,019 |
def get_image_batch(generator,batch_size):
"""keras generators may generate an incomplete batch for the last batch"""
img_batch = generator.next()
if len(img_batch) != batch_size:
img_batch = generator.next()
assert len(img_batch) == batch_size
return img_batch | c9c9b61e1775b275157f4d979eb7117d8fbef0d2 | 425,011 |
def split_folds(X, y, fold_series, test_fold):
"""Take a dataset whose observations have been grouped into folds,
then perform a train-test split.
X, y: feature and target DataFrames.
fold_series: Series containing the fold numbers of the observations.
test_fold: Integer, the fold number that will be used as the test fold.
Returns: tuple of four DataFrames"""
if fold_series.dtype != "int64":
raise AttributeError("The fold list does not purely contain integers.")
test_mask = (fold_series == test_fold)
X_train = X.loc[~test_mask].copy()
y_train = y.loc[~test_mask].copy()
X_test = X.loc[test_mask].copy()
y_test = y.loc[test_mask].copy()
return X_train, X_test, y_train, y_test | 38051e584c427ffe77273fbbcdd764b6fe432b2f | 45,296 |
import torch
def resort_points(points, idx):
"""
Resort Set of points along G dim
:param points: [N, G, 3]
:param idx: [N, G]
:return: [N, G, 3]
"""
device = points.device
N, G, _ = points.shape
n_indices = torch.arange(N, dtype=torch.long).to(device).view([N, 1]).repeat([1, G])
new_points = points[n_indices, idx, :]
return new_points | a886b1c694a2c8c1ef5a3c2bdccf201aa7a53812 | 654,095 |
def ansiSameText(s1:str,s2:str)->bool:
"""Compare two strings case insensitive"""
return (s1.lower()==s2.lower()) | 2faad19d93fc60377418b9f6f0b56778f07b46be | 579,495 |
import hashlib
def md5sum(file_path):
""" Calculate md5 checksum of single file
Args:
file_path (str): Path to file
Returns:
str: md5 checksum
"""
md5sum = hashlib.md5()
with open(file_path, 'rb') as file_object:
for block in iter(lambda: file_object.read(md5sum.block_size), b''):
md5sum.update(block)
return md5sum.hexdigest() | 7bf5542334a752690bd8e7298ae67c9c14020a21 | 406,603 |
def get_readable_file_size(size):
"""
Returns a human-readable file size given a file size in bytes:
>>> get_readable_file_size(100)
'100.0 bytes'
>>> get_readable_file_size(1024)
'1.0 KB'
>>> get_readable_file_size(1024 * 1024 * 3)
'3.0 MB'
>>> get_readable_file_size(1024**3)
'1.0 GB'
"""
for t in ('bytes', 'KB', 'MB', 'GB', 'TB'):
if size < 1024.0:
return "%3.1f %s" % (size, t)
size /= 1024.0 | aa103e6c41195dbbcf37d83397570ac19410fdcb | 332,180 |
def decode_model_info_proto(model_info_proto):
"""Decodes the model_info_proto created by create_model_info_proto
Arguments:
model_info_proto (ModelInfo proto): model_info_proto created by create_model_info_proto
Returns:
list_of_model_info_dict (list): A list containing model_info_dicts
Note:
model_info_dict contains the following keys:
```python
{
"name": "model name as string"
"version": "version as string"
"status": "status string"
"misc": "string with miscellaneous info"
}
```
"""
list_of_model_info_dict = []
for model_info in model_info_proto.info:
model_info_dict = {}
model_info_dict["name"] = model_info.name
model_info_dict["version"] = model_info.version
model_info_dict["misc"] = model_info.misc
model_info_dict["status"] = model_info.status
list_of_model_info_dict.append(model_info_dict)
return list_of_model_info_dict | 34ce3043463dc8cf2b3716f208ab9758a6215a74 | 283,718 |
import random
def generate_addends(n_addend, minimum=1, maximum=9):
"""
n_addend : number of addends
minimum : minimum of an addend. 1 by default
maximum : maximum of an addend. 9 by default
"""
# generate addends
addends_list = []
for _ in range(n_addend):
addends_list.append(random.randint(1, 9))
return addends_list | 40af83dcc94092e7e0834fef3d22394385ff8d96 | 439,256 |
def render_warning(s) :
""" Make rst code for a warning. Nothing if empty """
return """
.. warning::
%s"""%(s) if s else '' | 2ba32b16e9f81b85ce51d4b2e7c9e34d2bd5c9e7 | 316,575 |
def compose_redis_key(vim_name, identifier, identifier_type="vdu"):
"""Compose the key for redis given vim name and vdu uuid
Args:
vim_name (str): The VIM name
identifier (str): The VDU or VNF uuid (NFVI based)
identifier_type (str): the identifier type. Default type is vdu. Also vnf is supported.
Returns:
str: the key for redis
"""
if identifier_type == "vnf":
return "{}:vnf#{}".format(vim_name.lower(), identifier)
else:
return "{}:{}".format(vim_name.lower(), identifier) | e9a03cf9ff704fea8b9cdf75c59695568e366649 | 709,026 |
from typing import List
from typing import Dict
from typing import ChainMap
def concat_stocks(stock_lists: List[Dict]) -> Dict[str, str]:
"""Concats a list of small dictionaries of `stock`:`price` into one dictionary"""
return dict(ChainMap(*stock_lists)) | b7e050f36b1ee42c969afe4c21b22bf43548e80c | 324,800 |
import re
def pick_address_string(src):
"""
Pick address string from input string with regex
Args:
src (str) : Text would you like get address
Return:
str: Address what you picked
"""
match = re.match(r'.*?[로(지하)?|길동리]\s?(\d+-*)+\s?((번*길)\s?(\d+-*)+)?', src)
return match.group().strip() if match else None | 1748ada790610e9904b758cc32c48fd059f9d51b | 529,929 |
def signed_leb128_decode(data) -> int:
"""Read variable length encoded 128 bits signed integer.
.. doctest::
>>> from ppci.utils.leb128 import signed_leb128_decode
>>> signed_leb128_decode(iter(bytes([0x9b, 0xf1, 0x59])))
-624485
"""
result = 0
shift = 0
while True:
byte = next(data)
result |= (byte & 0x7F) << shift
shift += 7
# Detect last byte:
if byte & 0x80 == 0:
break
if byte & 0x40:
# We have sign bit set!
mask = (1 << shift) - 1
result = result ^ mask
result = -result - 1
return result | 43419df92be9a1bc32f51c7b1dde13d6e1599424 | 536,037 |
def group_weight_decay(net, decay_factor, skip_list):
"""Set up weight decay groups.
skip_list is a list of module names to not apply decay to.
"""
decay, no_decay = [], []
for name, param in net.named_parameters():
if not param.requires_grad:
continue
if any([pattern in name for pattern in skip_list]):
no_decay.append(param)
else:
decay.append(param)
return [{'params': no_decay, 'weight_decay': 0.0},
{'params': decay, 'weight_decay': decay_factor}] | a3a9a0b3aa45963b0d0fb0aafc3be850e05060b4 | 591,696 |
def l_min(s, m):
""" Minimum allowed value of l for a given s, m.
The formula is l_min = max(\|m\|,\|s\|).
Parameters
----------
s: int
Spin-weight of interest
m: int
Magnetic quantum number
Returns
-------
int
l_min
"""
return max(abs(s), abs(m)) | 9eec996df3b8e026c8b58649fddbbbcc05c38372 | 11,672 |
import re
def generate_unique_name(base: str, items: list):
"""
Generates a unique name for a new item using a common base string
:param base: The generated name will be the base string, followed by a number if required
:param items: The named items to avoid generating a matching name with. Each must have a 'name' attribute
"""
regex = f"^{re.escape(base)}\\d*$"
similar_names = [item.name for item in items if re.match(regex, item.name)]
if len(similar_names) == 0 or base not in similar_names:
return base
# find the highest number in use, and go one higher
tailing_numbers = [int(name[len(base) :]) for name in similar_names if name != base]
return base + str(max(tailing_numbers) + 1) if tailing_numbers else base + "1" | 9c7093c59825862c1e15f8385e7207e277b6088d | 516,644 |
from datetime import datetime
def ymd2date(y, m, d):
"""
Convert (y, m, d) tuple to datetime.datetime object
where y, m, d are year, mont and date integers
Example (2013, 12, 20)
:param tpl: Tuple inf (y, m, d) format
:return: datetime.datetime object
Example:
>>> import dtime as d
>>> d.tdate_ymd((2013,12,20))
datetime.datetime(2013, 12, 20, 0, 0)
>>>
"""
#y, m, d = tpl
return datetime(y, m, d) | e4a2fa6a9534c10f887b8ae71e0eae0b13599ce4 | 138,372 |
def setup_with_context_manager(testcase, cm):
"""Use a contextmanager to setUp a test case."""
val = cm.__enter__()
testcase.addCleanup(cm.__exit__, None, None, None)
return val | ef60ebfe6ce00ea2a4784a61241dc22dd292b81d | 624,366 |
def make_get_request(client, endpoint):
"""
Makes a get request to the given endpoint from the client.
"""
return client.get(endpoint) | d64376e5e7b0ad42b3b093af48bfa97e3a137744 | 57,790 |
import re
def get_urls(string):
"""Get all url matching strings from string"""
regex = re.compile(r"""(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|
(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))""",
re.VERBOSE)
try:
url = regex.findall(string)
return [u[0] for u in url]
except TypeError:
return list() | 3b9f085e99f540d65d86e500f73c6c6607297b96 | 218,631 |
import inspect
def bound_args(func, args, kwargs):
"""Return a dictionary mapping parameter name to value.
Parameters
----------
func : Callable
this must be inspectable; mocks will require a spec
args : List
args list
kwargs : Dict
kwargs Dict
Returns
-------
Dict[str, Any] :
mapping of string name of function parameter to the value it would
be bound to
"""
sig = inspect.Signature.from_callable(func)
bound = sig.bind(*args, **kwargs)
return bound.arguments | 32abe2cdd621b52b0bb306830e542c92430962b9 | 454,704 |
def get_X_Y(lockin):
"""
Get X and Y (Measure)
args:
lockin (pyvisa.resources.gpib.GPIBInstrument): SRS830
returns:
(tuple): X, Y
"""
X, Y = lockin.query("SNAP? 1,2").split("\n")[0].split(",")
X, Y = float(X), float(Y)
return X, Y | 3d56151042682f86350a499ab639852fc6387887 | 15,198 |
def _adjmatType(adjMat):
"""(helper function) retruns <class 'int'> if the adjacency matrix is a (0,1)-matrix, and
returns <class 'list'> if the adjacency matrix contains edge weights, and returns None if
neither of the cases occurs.
Args:
adjMat (2D - nested - list): the adjacency matrix.
Returns:
(type) the type of the adjacency matrix as explained above.
"""
checktype = {all(isinstance(entry, list) for entry in row) for row in adjMat}
if len(checktype) == 1 and checktype.pop() == True: return list
checktype = {all(isinstance(entry, int) for entry in row) for row in adjMat}
if len(checktype) == 1 and checktype.pop() == True: return int
return None | f57e3a97c6d759a564bcf0e546c67ee975caf4c7 | 669,943 |
def get_square_centre_crop(img):
"""
This function calculates a centered square crop bounding box
as big as the image's smaller dimension allows.
:param img: PIL Image object.
:return: Tuple with bounding box of the square crop.
"""
width, height = img.size
if width > height:
x_min = int((width - height) / 2)
x_max = int(width - (width - height) / 2)
y_min = 0
y_max = height
else:
x_min = 0
x_max = width
y_min = int((height - width) / 2)
y_max = int(height - (height - width) / 2)
return x_min, y_min, x_max, y_max | 141691ea388d783bd51a85f55b9a03519231196a | 264,801 |
def fn2Test(pStrings, s, outputFile):
"""
Function concatenates the strings in pStrings and s, in that order, and writes the result to
the output file. Returns s.
"""
with open(outputFile, 'w') as fH:
fH.write(" ".join(pStrings) + " " + s)
return s | 1c1e811edd80c461c5173774f7bb0014b4ad9f92 | 399,280 |
def meta2idx(current_idxs, meta_per_dataset):
"""
Map meta information of a dataset to the indexes of its samples in the full data.
Parameters
----------
current_idxs: list of list of integers
each sublist corresponds to list of index of cells in each dataset
meta_per_dataset: list of list of strings
each sublist in the form of ['animal_id', 'bregma'] for each dataset
Returns
-------
meta2idx: dictionary
key as ['animal_id', 'bregma'], value as the start and end index + 1 of the samples in the full data
"""
meta2idx = dict.fromkeys(meta_per_dataset)
idx_start = 0
idx_end = 0
for i in range(len(current_idxs)):
current_idx = current_idxs[i]
current_meta = meta_per_dataset[i]
idx_end += len(current_idx)
meta2idx[current_meta] = [idx_start, idx_end]
idx_start = idx_end
return meta2idx | 9c74a6b778f04a8ff4035bf5601f3bc69cb81180 | 366,629 |
import re
def sentence_segmenter(paragr):
"""
Function to break a string 'paragraph' into a list of sentences based on
the following rules:
1. Look for terminal [.,?,!] followed by a space and [A-Z]
2. If ., check against abbreviation list ABBREV_LIST: Get the string
between the . and the previous blank, lower-case it, and see if it is in
the list. Also check for single-letter initials. If true, continue search
for terminal punctuation
3. Extend selection to balance (...) and "...". Reapply termination rules
4. Add to sentlist if the length of the string is between MIN_SENTLENGTH
and MAX_SENTLENGTH
5. Returns sentlist
Parameters
----------
paragr: String.
Content that will be split into constituent sentences.
Returns
-------
sentlist: List.
List of sentences.
"""
# this is relatively high because we are only looking for sentences that
# will have subject and object
MIN_SENTLENGTH = 100
MAX_SENTLENGTH = 512
# sentence termination pattern used in sentence_segmenter(paragr)
terpat = re.compile('[\.\?!]\s+[A-Z\"]')
# source: LbjNerTagger1.11.release/Data/KnownLists/known_title.lst from
# University of Illinois with editing
ABBREV_LIST = ['mrs.', 'ms.', 'mr.', 'dr.', 'gov.', 'sr.', 'rev.', 'r.n.',
'pres.', 'treas.', 'sect.', 'maj.', 'ph.d.', 'ed. psy.',
'proc.', 'fr.', 'asst.', 'p.f.c.', 'prof.', 'admr.',
'engr.', 'mgr.', 'supt.', 'admin.', 'assoc.', 'voc.',
'hon.', 'm.d.', 'dpty.', 'sec.', 'capt.', 'c.e.o.',
'c.f.o.', 'c.i.o.', 'c.o.o.', 'c.p.a.', 'c.n.a.', 'acct.',
'llc.', 'inc.', 'dir.', 'esq.', 'lt.', 'd.d.', 'ed.',
'revd.', 'psy.d.', 'v.p.', 'senr.', 'gen.', 'prov.',
'cmdr.', 'sgt.', 'sen.', 'col.', 'lieut.', 'cpl.', 'pfc.',
'k.p.h.', 'cent.', 'deg.', 'doz.', 'Fahr.', 'Cel.', 'F.',
'C.', 'K.', 'ft.', 'fur.', 'gal.', 'gr.', 'in.', 'kg.',
'km.', 'kw.', 'l.', 'lat.', 'lb.', 'lb per sq in.', 'long.',
'mg.', 'mm.,, m.p.g.', 'm.p.h.', 'cc.', 'qr.', 'qt.', 'sq.',
't.', 'vol.', 'w.', 'wt.']
sentlist = []
# controls skipping over non-terminal conditions
searchstart = 0
terloc = terpat.search(paragr)
while terloc:
isok = True
if paragr[terloc.start()] == '.':
if (paragr[terloc.start() - 1].isupper() and
paragr[terloc.start() - 2] == ' '):
isok = False # single initials
else:
# check abbreviations
loc = paragr.rfind(' ', 0, terloc.start() - 1)
if loc > 0:
if paragr[loc + 1:terloc.start() + 1].lower() in ABBREV_LIST:
isok = False
if paragr[:terloc.start()].count('(') != paragr[:terloc.start()].count(')'):
isok = False
if paragr[:terloc.start()].count('"') % 2 != 0:
isok = False
if isok:
if (len(paragr[:terloc.start()]) > MIN_SENTLENGTH and
len(paragr[:terloc.start()]) < MAX_SENTLENGTH):
sentlist.append(paragr[:terloc.start() + 2])
paragr = paragr[terloc.end() - 1:]
searchstart = 0
else:
searchstart = terloc.start() + 2
terloc = terpat.search(paragr, searchstart)
# add final sentence
if (len(paragr) > MIN_SENTLENGTH and len(paragr) < MAX_SENTLENGTH):
sentlist.append(paragr)
return sentlist | 127467d8128ee9e47d325ca20905c508fbd17d18 | 143,479 |
def _PyComplex_FromCComplex(space, v):
"""Create a new Python complex number object from a C Py_complex value."""
return space.newcomplex(v.c_real, v.c_imag) | fd980468f0df584ea2672edab40b7108798cd535 | 469,804 |
import re
def template_pattern_match(results, template_pattern):
"""
Return templates which match the template pattern. No pattern specified returns all template
:param results - list of all template
:param template_pattern - optional wildcard (*) name of template to match
:return: list of template
"""
if template_pattern:
# convert wildcard "*" to regex format ".*"
pattern = template_pattern.replace("*", ".*")
compiled = re.compile(pattern)
return [template for template in results if compiled.match(template['name'])]
return results | 2ad1c1d6b8700f9c2ca4bf67afd8c501b53825b7 | 481,982 |
from typing import List
from typing import Tuple
def group(lst: List, n: int) -> List[Tuple]:
"""
Splits a list into tuples of length n
https://stackoverflow.com/a/15480610/36061
eg
a = 'Moscow|city|London|city|Royston Vasey|vilage'
list(group(a.split('|'), 2))
gives
[('Moscow', 'city'), ('London', 'city'), ('Royston Vasey', 'vilage')]
Parameters
----------
lst : a list
n : int, number of entries per tuple
Returns
------
list of tuples, each of length n
"""
r = []
for i in range(0, len(lst), n):
val = lst[i:i + n]
if len(val) == n:
r.append(tuple(val))
return r | 61de96a908434e63423529a55194f7a7fa027d54 | 591,917 |
def mod_family_accession(family_accession):
"""Reduces family accession to everything prior to '.'."""
return family_accession[:family_accession.index('.')] | ec033cd33fccd8fbd7a0c4407d706ca32fb87fb2 | 658,789 |
def bollinger_band_generator(dataframe_name, closing_price_column_name = 'close', bollinger_band_window = 20, num_standard_deviation = 2):
"""Creates Bollinger Band function
Args:
dataframe_name (dict): Single security dataframe containing at least closing prices
closing_price_column_name (str): Name of column in dataframe containing closing prices
bollinger_band_window (int): Desired timeframe window used for rolling calculations
num_standard_deviation (int): Desired number of standard deviations to calculate
Returns:
A dataframe of:
original data passed to function,
bollinger_band_middle (flt): Column of values for middle band,
bollinger_band_std (flt): Column of values to calculate standard deviation,
bollinger_band_upper (flt): Column of values for upper band,
bollinger_band_lower (flt): Column of values for lower band,
"""
# Calculate mean and standard deviation
dataframe_name['bollinger_band_middle'] = dataframe_name[closing_price_column_name].rolling(window=bollinger_band_window).mean()
dataframe_name['bollinger_band_std'] = dataframe_name[closing_price_column_name].rolling(window=bollinger_band_window).std()
# Calculate upper bollinger band and lower bollinger band
dataframe_name['bollinger_band_upper'] = dataframe_name['bollinger_band_middle'] + (dataframe_name['bollinger_band_std'] * num_standard_deviation)
dataframe_name['bollinger_band_lower'] = dataframe_name['bollinger_band_middle'] - (dataframe_name['bollinger_band_std'] * num_standard_deviation)
# Drop NaN values
dataframe_name.dropna(inplace=True)
# Return dataframe with features and target
return dataframe_name | 43d585c72338fb050f1f595b0659de689b4b8c18 | 470,328 |
def levels_to_graph(levels):
"""
Convert an array of levels into a unicode string graph.
Each level in the levels array is an integer 0-3. Those levels will be
represented in the graph by 1-4 dots each.
The returned string will contain two levels per rune.
"""
if len(levels) % 2 == 1:
# Left pad uneven-length arrays with an empty column
levels = [-1] + levels
# From: http://stackoverflow.com/a/19177754/473672
unicodify = chr
try:
# Python 2
unicodify = unichr # type: ignore
except NameError:
# Python 3
pass
# https://en.wikipedia.org/wiki/Braille_Patterns#Identifying.2C_naming_and_ordering
LEFT_BAR = [0x00, 0x40, 0x44, 0x46, 0x47]
RIGHT_BAR = [0x00, 0x80, 0xA0, 0xB0, 0xB8]
graph = ""
for index in range(0, len(levels) - 1, 2):
left_level = levels[index] + 1
right_level = levels[index + 1] + 1
code = 0x2800 + LEFT_BAR[left_level] + RIGHT_BAR[right_level]
graph += unicodify(code)
return graph | 8994d6d6eea5c741f706b78e6754bab7850909b2 | 73,365 |
def delete_session_mock(mocker):
"""Mock for patching DELETE request"""
return mocker.patch("hydra_agent.agent.Session.delete") | 26c3073e2d51dec1f3dcd9bfee3db311fdcf6a66 | 76,284 |
import torch
def log_initial_entropy(log_probs: torch.Tensor):
"""
Calculates the log of the initial state posterior entropy from log_probs, with dimensions
-1: s_k, -2: s_1
:param log_probs:
:return:
"""
inits = log_probs.logsumexp(-1)
return ((-inits).log() + inits).logsumexp(-1) | d38a4359aef1f52117a33c806d04a26f4aee572e | 166,291 |
def enable_show_options(options):
"""Sets all show options to True."""
options.show_nagios = True
options.show_rrdfile = True
options.show_rawmetric = True
options.show_metric = True
options.show_skipped = True
return options | 6da8a5d4c1b381d5ab343c2035ff5262fbf69f23 | 127,027 |
def _bytes(packet):
"""
Returns a human-friendly representation of the bytes in a bytestring.
>>> _bytes('\x12\x34\x56')
'123456'
"""
return ''.join('%02x' % ord(c) for c in packet) | c3877c4081e191bcdcb95dd958beb0fd8356699e | 304,651 |
def profit_in_percentage(value, arg):
"""Profit Percentage with % sign
>>> profit_in_percentage(2, 1)
'100.0 %'
"""
val = value - arg
return str(round(val * 100, 2)) + " %" | cf22a48b5d9b2819883b47d1db558a6c1a3f8707 | 212,813 |
def getDestBinFormatByOS(destOS):
"""
Return the binary format based on the platform name.
Returns 'elf' if nothing is found.
"""
if destOS == 'darwin':
return 'mac-o'
if destOS in ('win32', 'cygwin', 'uwin', 'msys', 'windows'):
return 'pe'
return 'elf' | 99665fdbc0b83cde6c04d68591602699997746b8 | 361,870 |
from typing import Union
def wmi_object_2_list_of_dict(wmi_objects, depth: int = 1, root: bool = True) -> Union[dict, list]:
"""
Return a WMI object as a list of dicts, accepts multiple depth
Example for Win32_LoggedOnUser().Antecedent.AccountType return is [{'Antecedent': {'AccountType': 512}}]
Hence
wmi_handle.Win32_LoggedOnUser()[0].Antecedent.AccountType is equivalent of
res = wmi_object_2_list_of_dict(wmi_handle.Win32_LoggedOnUser(), 2)
res[0]['Antecedent']['AccountType']
"""
result = []
if root is False:
dictionary = {}
try:
for attribute in wmi_objects.properties:
try:
if depth > 1:
dictionary[attribute] = wmi_object_2_list_of_dict(
getattr(wmi_objects, attribute), (depth - 1), root=False)
else:
dictionary[attribute] = getattr(wmi_objects, attribute)
except TypeError:
dictionary[attribute] = None
return dictionary
# wmi_object.properties might just be a string depending on the depth. Just return as is in that case
except AttributeError:
return wmi_objects
for wmi_object in wmi_objects:
dictionary = {}
for key in wmi_object.properties.keys():
if depth <= 1:
try:
dictionary[key] = wmi_object.Properties_(key).Value
except TypeError:
dictionary[key] = None
else:
# noinspection PyBroadException
try:
dictionary[key] = wmi_object_2_list_of_dict(getattr(wmi_object, key), (depth - 1), root=False)
# Some keys won't have attributes and trigger pywintypes.com_error and others. Need for bare except
except Exception:
pass
result.append(dictionary)
return result | 6b54a7b75ee41c7bd3cedb7b38a2e2fe5a44cb15 | 310,152 |
from typing import Sequence
def prepend_parts(prefix_parts: Sequence[str], parts: Sequence[str]) -> Sequence[str]:
"""
Prepend `prefix_parts` before given `parts` (unless they are rooted).
Both `parts` & `prefix_parts` must have been produced by :meth:`.json_path()`
so that any root(``""``) must come first, and must not be empty
(except `prefix-parts`).
**Examples:**
>>> prepend_parts(["prefix"], ["b"])
['prefix', 'b']
>>> prepend_parts(("", "prefix"), ["b"])
['', 'prefix', 'b']
>>> prepend_parts(["prefix ignored due to rooted"], ("", "b"))
('', 'b')
>>> prepend_parts([], ["b"])
['b']
>>> prepend_parts(["prefix irrelevant"], [])
Traceback (most recent call last):
IndexError: list index out of range
"""
if "" != parts[0]:
parts = [*prefix_parts, *parts]
return parts | ced228055e64b0ef7877856ce536ed8884ce6819 | 181,817 |
def get_weekdays(first_weekday=0):
"""Get weekdays as numbers [0..6], starting with first_weekday"""
return list(list(range(0, 7)) * 2)[first_weekday: first_weekday + 7] | 8773a6b35f1104ca298074d90e639919353d357d | 133,303 |
def HOUR(expression):
"""
Returns the hour portion of a date as a number between 0 and 23.
See https://docs.mongodb.com/manual/reference/operator/aggregation/hour/
for more details
:param expression: expression or variable of a Date, a Timestamp, or an ObjectID
:return: Aggregation operator
"""
return {'$hour': expression} | f34b9d923ee9416174d3d2871cface57e539363e | 476,289 |
def map_per_image(label, predictions):
"""Computes the precision score of one image.
Parameters
----------
label : string
The true label of the image
predictions : list
A list of predicted elements (order does matter, 5 predictions allowed per image)
Returns
-------
score : double
"""
try:
return 1 / (predictions[:5].index(label) + 1)
except ValueError:
return 0.0 | 5874d669d83143a6e4dd5f32d15fb58da70fe9aa | 564,595 |
def seqcomp(s1, s2):
"""
Compares 2 sequences and returns a value with
how many differents elements they have.
"""
p = len(s1)
for x,y in zip(s1, s2): # Walk through 2 sequences.
if x==y:
p -= 1
return p | 1ef0e7fddc751225ba2030804b360daf8bf10abd | 670,035 |
import six
def to_string(s, encoding='utf-8'):
"""
Accept unicode(py2) or bytes(py3)
Returns:
py2 type: str
py3 type: str
"""
if six.PY2:
return s.encode(encoding)
if isinstance(s, bytes):
return s.decode(encoding)
return s | 3b2c5ea3e1de9724cb705a79c54f3eb700b0f16d | 319,054 |
def min_length(word, thresh):
""" Predicate for the length of a word """
return len(word) >= thresh | 5f6eff3e06726294a64ef7705e3797ab188202f3 | 625,640 |
def distance(rgb1, rgb2):
"""Return quasi-distance in 3D space of RGB colors."""
return (rgb1[0]-rgb2[0])**2 + (rgb1[1]-rgb2[1])**2 + (rgb1[2]-rgb2[2])**2 | d2db02322d355a0c86c279e5fb38af4438d8d754 | 353,469 |
def combination(a: int, b: int) -> int:
"""
Choose b from a. a >= b
"""
b = min(b, a - b)
numerator = 1
dominator = 1
for i in range(b):
numerator *= (a - i)
dominator *= (b - i)
return int(numerator / dominator) | 9e31462423b998b6a3f10b497ebe5a0a8fb0c0fb | 457,124 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.