content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import operator
def sort_values(values: list[tuple]) -> list[tuple]:
"""Returns a list of tuples sorted by x value (the value at index 0 of the inner tuple."""
return sorted(values, key=operator.itemgetter(0)) | f172f68418988d4e01dcc406de8ec467abfe7aa8 | 8,521 |
from datetime import datetime
def get_datetime_object(datetime_string):
"""
Interpret the UltraSuite prompt date and time string as a python datetime object
:param datetime_string:
:return:
"""
return datetime.strptime(datetime_string, '%d/%m/%Y %H:%M:%S') | 35fe2c9056f28d3d8dbe963121cd8ce93e36550f | 691,840 |
def remove_dups(linked_list):
"""Remove duplicate values from a linked list
Uses a hash table to store values previously found, and walks through
linked list, deleting duplicates vals.
Parameters
----------
linked_list : LinkedList
Linked list to remove duplicates from
Returns
-------
linked_list : LinkedList
Input linked list, with duplicates removed
"""
# Handle empty linked list
if linked_list.head is None:
return linked_list
# Initialise hash table to contain values found
vals_found = {}
# Start at head
current_node = linked_list.head
# Add head val to vals_found
vals_found[current_node.data] = True
# Walk through linked list, from head to tail
while current_node.next_node is not None:
# Check next node's value, to see if it is a duplicate
next_node = current_node.next_node
if next_node.data in vals_found:
# Delete next node
current_node.next_node = next_node.next_node
# Stick to current_node, don't update it, since its next_node has
# changed
else:
# Add val to vals_found
vals_found[next_node.data] = True
# Go to next node
current_node = next_node
return linked_list | dba61304ade467a05273b4e188f4adea731127f7 | 277,865 |
def call_reply(msg, call_return):
"""Construct message used by CtrlServer when replying to API calls.
:param msg: Description of API call.
:type msg: string
:param call_return: Return value of API call.
:type call_return: string
:returns: Constructed call_reply dict, ready to be sent over the wire.
"""
return {"type": "call_reply", "msg": msg, "call_return": call_return} | eadbaf02bf182af228e0c4a1a31603ca5bfa0eb2 | 624,358 |
import re
def _get_git_ref_from_chartpress_based_version(version):
"""
Get a git ref from a chartpress set version of format like
1.2.3-beta.1.n123.h1234567, 1.2.3-n123.h1234567, or 1.2.3.
"""
tag_hash_split = re.split("[\.|-]n\d\d\d\.h", version)
if len(tag_hash_split) == 2:
return tag_hash_split[1]
else:
return tag_hash_split[0] | 4eb00071157403f905bbeafd1b32449cbb48848d | 546,939 |
def co2Stom(co2: float, params: dict) -> float:
"""
CO2 concentration in the stomata
Parameters
----------
co2 : float
Carbon dioxide concentration on air [μmol {CO2} mol-1 {air}]
params : dict
n_convCO2: float
Conversion factor from the CO2-concentration of the greenhouse air
Returns
-------
float
CO2 concentration in the stomata [μmol {CO2} mol-1 {air}]
"""
co2Stom = params['n_convCO2']*co2
return co2Stom | c1a8ab0fbb3f71299e6a4303d1ec32df25835858 | 413,067 |
import binascii
def hex(b) -> str:
"""
Returns the hexily version of the binary datas.
:param b: binary datas
:return: the string
"""
return binascii.hexlify(b).decode('utf8') | 77141aefdde7af177cc36eb3317679a3f5743df0 | 640,204 |
import math
def computeRMS(y_true, y_pred, distances):
"""Helper function to compute RMS of a given multiclass problem.
Args:
y_true: The ground truth labels
y_pred: The predicted labels
distances: A symmetric matrix defining the distance between two labels.
"""
rms = 0.0;
for i in range(len(y_true)):
yt = y_true[i]
yp = y_pred[i]
rms += distances[yt][yp]**2
rms = math.sqrt(rms/len(y_true))
return rms | 3aec637a943a9c0fdc5d7591ee4e3d20fc78bad4 | 329,270 |
def sg_access_map(ip_perm_list):
"""Convert the ip_perm_list into a dictionary where the
key is the tuple (protocol, port-range) and the value is
the set of CIDRs that are allowed access for that protocol/port(s).
"""
access_map = {}
for ip_perm in ip_perm_list:
if ip_perm.from_port is not None:
from_port = int(ip_perm.from_port)
else:
from_port = None
if ip_perm.to_port is not None:
to_port = int(ip_perm.to_port)
else:
to_port = None
target = (ip_perm.ip_protocol.upper(), from_port, to_port)
for grant in ip_perm.grants:
if grant.cidr_ip:
if grant.cidr_ip.endswith("/32"):
grant_id = grant.cidr_ip.split('/', 1)[0]
else:
grant_id = grant.cidr_ip
elif grant.group_id:
grant_id = grant.group_id
else:
continue
if target not in access_map:
access_map[target] = set()
access_map[target].add(grant_id)
return access_map | fc5f89c6d763f80232482cb713d1b9d24a48a65a | 332,054 |
def _get_neighbor_species(m):
"""
Args:
m: A dictionary representing a molecule entry in LIBE
Returns:
A list of tuple (atom species, bonded atom species),
where `bonded_atom_species` is a list.
Each tuple represents an atom and its bonds.
"""
res = [(s, list()) for s in m["species"]]
for a1, a2 in m["bonds"]:
s1 = m["species"][a1]
s2 = m["species"][a2]
res[a1][1].append(s2)
res[a2][1].append(s1)
return res | d3015d9efef6137515e9196cb3806a295546b89b | 423,462 |
def function_3_args(first_argument, second_argument, third_argument):
"""three arguments function"""
return first_argument, second_argument, third_argument | 600cd2436ba47afa1335fa05bf9fa8aa60f4da34 | 520,514 |
import requests
def auth(gurl, beid, wskey):
""" Formats the provided TDX information into the appropriate format,
uses Requests to get a bearer token, then returns HTTP headers.
Args:
gurl (str): the API URL of the TeamDynamix environment
beid (str): TeamDynamix environment BEID
wskey (str): TeamDynamix environment Web Services Key
Returns:
headers (str): A string including the content-type and bearer token, used
for future API calls to TDX; bearer is only valid for this session
"""
data = {
"BEID": beid,
"WebServicesKey": wskey
}
bearer = requests.post(
url = gurl + "/api/auth/loginadmin",
json = data
)
token = str(bearer.content, "utf-8")
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + token
}
return headers | 26db96c1b83c6efaeed64b597bd2cb728c5becd9 | 162,511 |
def extend_schema_field(field):
"""
Decorator for the "field" kind. Can be used with ``SerializerMethodField`` (annotate the actual
method) or with custom ``serializers.Field`` implementations.
If your custom serializer field base class is already the desired type, decoration is not necessary.
To override the discovered base class type, you can decorate your custom field class.
Always takes precedence over other mechanisms (e.g. type hints, auto-discovery).
:param field: accepts a ``Serializer`` or :class:`~.types.OpenApiTypes`
"""
def decorator(f):
if not hasattr(f, '_spectacular_annotation'):
f._spectacular_annotation = {}
f._spectacular_annotation['field'] = field
return f
return decorator | 91f665f927357813f43963ce9c348f8da7a2c5c8 | 18,812 |
import torch
def box_corners_to_center(corners):
"""
Arguments:
corners (Tensor[N, 8]): boxes to be converted. They are
expected to be in (x0, y0, ..., x3, y3) format, where the corners are sorted counterclockwise.
Returns:
b (Tensor[N, 6]): converted boxes in centered
(x_c, y_c, w, h, c, s) format.
* c, s: sin, cos before sigmoid
"""
x0, y0, x1, y1, x2, y2, x3, y3 = corners.unbind(-1)
x_c = (x0 + x2) / 2
y_c = (y0 + y2) / 2
wsin, wcos, hsin, hcos = (y1 - y0, x1 - x0, x0 + x1, y2 + y3)
theta = torch.atan2(wsin, wcos)
c = torch.cos(theta)
s = torch.sin(theta)
b = [x_c, y_c, (wsin ** 2 + wcos ** 2).sqrt(), (hsin ** 2 + hcos ** 2).sqrt(), c, s]
return torch.stack(b, dim=-1) | 25e7a396175585ac82793666d1cd66bb80d5e7d6 | 347,555 |
import torch
def autograd_range(name):
""" Creates an autograd range for pytorch autograd profiling
"""
return torch.autograd.profiler.record_function(name) | fc781d79e5befbda86fad15ef2abb85d1f8403a7 | 61,788 |
from typing import AnyStr
def want_str(s: AnyStr) -> str:
"""Convert bytes to string."""
if isinstance(s, bytes):
return s.decode()
return s | 9c6634098a4f8949b1af56d3cfeb3303833d8270 | 353,910 |
from pathlib import Path
import click
def validate_existing_dir(ctx, param, value: Path) -> Path:
"""
Callback for click commands that checks that a given path is a directory
"""
if not value.is_dir():
raise click.BadParameter(f"{value} is not a directory")
return value | 76cb4b62a964a6192fb34d862005ec7d8ddca977 | 636,491 |
import re
def normalize_whitespace(text):
"""
Normalize whitespace. Strips the string of whitespace in both ends,
and replaces all consecutive whitespace characters (including tabs,
newlines and nonbreak space) with a single space.
"""
return re.sub('(\s|\\xa0)+', ' ', text).strip() | 60a09fad363395c06caa6363eaacc7c01a780416 | 341,992 |
def overlap(indices1, indices2):
"""Returns a boolean indicating whether two pairs of indices overlap."""
assert (len(indices1) == 2 and len(indices2) == 2)
indices1 = sorted(indices1)
indices2 = sorted(indices2)
if (indices2[0] <= indices1[0] <= indices2[1]) or \
(indices2[0] <= indices1[1] <= indices2[1]) or \
(indices1[0] <= indices2[0] <= indices1[1]) or \
(indices1[0] <= indices2[1] <= indices1[1]):
return True
else:
return False | 94ca141bd7320a617a51d0baf8a7f99f4e5231d4 | 579,490 |
def _out_of_str(n1: int, n2: int) -> str:
"""
:return A string in the format [n1 / n2], where "n1" and "n2" are the passed integers padded to the same length
"""
width = len(str(max(n1, n2)))
return '[%s / %s]' % (str(n1).rjust(width), str(n2).rjust(width)) | 70398ec476405122ff799e3924d6488cea372706 | 406,426 |
def compare_two_model_index(index_1, index_2):
"""
If the two input index's row and column and their parent is equal, then they are equal for test.
"""
return (index_1.row() == index_2.row()) \
and (index_1.column() == index_2.column()) \
and (index_1.parent() == index_2.parent()) | 5f199812456ae74d2a65528610ca752a2771aad1 | 677,775 |
from datetime import datetime
def GrADStime_to_datetime(gradsTime):
"""
Convert GrADS time string e.g., 00:00z01Jan2000 to datetime
Parameters
----------
gradsTime : str
Grads time in str format e.g., 00:00z01Jan2000.
Returns
----------
re : datetime
"""
lens = len(gradsTime)
if lens==15 or lens==14:
time = datetime.strptime(gradsTime, "%H:%Mz%d%b%Y")
elif lens==12 or lens==11:
time = datetime.strptime(gradsTime, "%Hz%d%b%Y" )
elif lens== 9 or lens== 8:
time = datetime.strptime(gradsTime, "%d%b%Y" )
elif lens== 7:
time = datetime.strptime(gradsTime, "%b%Y" )
else:
raise Exception('invalid length of GrADS date/time string')
return time | df21ac4532510e883245be67ba354f4048761800 | 33,651 |
def attrib_basic(_sample, class_id):
"""
Add basic attribute
Args:
_sample: data sample
class_id: class label asscociated with the data
(sometimes indicting from which subset the data are drawn)
"""
return {'class_id': class_id} | af2be3a187267ecaa817309b9f1d7e66375f79ea | 366,568 |
import random
def create_random_graph(nodes):
""" Creates a random (directed) graph with the given number of nodes """
graph = []
for i in range(0, nodes):
graph.append([])
for j in range(0, nodes):
rand = random.randint(1, 100)
if rand % 2 == 0 and i != j:
graph[i].append(rand)
else:
graph[i].append(-1)
return graph | 57401f41d0dbbd871dcefd369c22b82fc453b3f3 | 682,196 |
def question_node_shallow_copy(node):
"""
Create a copy of the question tree node
"""
new_node = {
'type': node['type'],
'inputs': node['inputs'],
}
if 'value_inputs' in node:
new_node['value_inputs'] = node['value_inputs']
else:
new_node['value_inputs'] = []
return new_node | 712f803fc30b702e50c4cdccbfdbd13cf3fa7588 | 442,150 |
def apply_update(content: dict, scenario_data: dict) -> dict:
"""
Sample update that look for each ADT datasets and count the number of entities/relation of a given type
:param content: Contains all datasets and parameters downloaded from the API
:param scenario_data: The data associated to your scenario downloaded from the API
:return: dict (str -> int) containing the number of entities/relations for each type in all ADT datasets
"""
ret = dict()
for dataset in content['datasets'].values():
if dataset['type'] is not "adt":
continue
data = dataset['content']
for item_type, list_items in data.items():
ret.setdefault(item_type, 0)
ret[item_type] += len(list_items)
return ret | ac6ed84e795ed06f1dd118b57640cfe5a472af3d | 157,963 |
def pos_to_float(pos):
"""
Convert a lat or long to a float
:param pos: A lat or a long
:type pos: str
:return: The float version
:rtype: float
"""
# N & E are positive
signs = {'N': '+', 'S': '-', 'E': '+', 'W': '-'}
degrees = float(pos.split(' ')[0]) + float(pos.split(' ')[1][:-1]) / 60
return float(signs[pos[-1:]] + str(degrees)) | 0ea779e3b72a953f9980edba4cc567b80ef4e67b | 326,059 |
def get_status(runs):
"""Get the most recent status of workflow for the current PR.
Parameters
----------
runs : list
List of comment objects sorted by the time of creation in decreasing order.
Returns
-------
status : string
The most recent status of workflow.
Can be 'success', 'failure' or 'in-progress'.
"""
status = 'success'
for run in runs:
body = run['body']
if "Status: " in body:
if "Status: skipped" in body:
continue
if "Status: failure" in body:
status = 'failure'
break
if "Status: success" in body:
status = 'success'
break
else:
status = 'in-progress'
break
return status | 081d5fb6e8218edcbf25adb9f8b1eeb2d2add1a1 | 655,532 |
def is_file_available (data, filename):
"""Return True is the given file is available in the input data file, otherwise return False"""
return True if filename in data else False | 39d4a4d8ca3c7b89a23a18ffaa1fb0d47b3e2f84 | 640,772 |
def reduce_aet_if_dry(aet, wat_lev, fc):
""" Reduce actual evapotranspiration if the soil is dry. If the water level
in a cell is less than 0.7*fc, the rate of evapo-transpiration is
reduced by a factor. This factor is 1 when wat_lev = 0.7*fc and
decreases linearly to reach 0 when wat_lev = 0 i.e. where
wat_lev < 0.7*fc, apply a correction factor of
wat_lev/(0.7*fc) to the aet grid.
Args:
aet: "Raw" actual evapotranspiration grid.
wat_lev: Water level grid
fc: Soil field capacity grid.
Returns:
Array (modified AET grid with AET reduced where necessary).
"""
# Get a boolean array showing which cells need correcting
bool_array = wat_lev < (0.7*fc)
# Calculate a correction factor for all cells, but subtract 1 from answer
cor_facts_minus1 = (wat_lev / (0.7*fc)) - 1
# Multiplying bool_array by cor_facts_minus1 gives a grid with values of
# (cor_fact - 1) for cells which need correcting and zero otherwise. Add 1
# to this to get a grid of cor_facts and ones
cor_facts = (bool_array * cor_facts_minus1) + 1
return aet*cor_facts | 170462a23c3903a390b89963aa6ce21839e5d44b | 705,839 |
def remove_dupes(car_list):
"""Remove duplicate dicts from list"""
x = set()
cars = []
for i in car_list:
if i['title'] not in x:
x.add(i['title'])
cars.append(i)
else:
continue
return cars | ab9636b7d830c9742aa543f2aa85a85a88ecf92a | 218,335 |
def time_str_from_datetime_str(date_string: str) -> str:
"""
Extracts the time parts of a datetime.
Example:
2019-12-03T09:00:00.12345 will be converted to:
09:00:00.12345
:param date_string:
:return:
"""
return date_string.split('T')[1] | cd08fc6cb55854cacf1620aea4b02692b7925cd7 | 30,322 |
def make_dic(doc_id=None, root=None, date=None,
author=None, text=None, favorite=None):
"""make dictionary form argument and return dictionary
Keyword Arguments:
doc_id {int} -- documnet ID (default: {None})
root {string} -- xml file name (default: {None})
date {string} -- date of document (default: {None})
author {string} -- author of document (default: {None})
text {string} -- text of document (default: {None})
favorite {string} -- favorite of document (default: {None})
Returns:
dictionary -- document data in dictionary
"""
return {
'docID': doc_id,
'root': root,
'date': date,
'author': author,
'text': text,
'favorite': favorite
} | 1948b9e20fe03a849cc072cc16330270730ee72d | 676,565 |
def get_short_labeler(prefix):
"""
Returns a labeling function that prepends
`prefix` to an assignment index.
"""
def labeler(index):
return f"{prefix} {index:02d}"
return labeler | a0037b8bb8e398efd8726309b914591ed6c6d75b | 696,776 |
from typing import Tuple
def get_namespace_and_name_from_role(role: str) -> Tuple[str, str]:
"""
Extract namespace and name for a role.
Args:
role (str): role in the form {{role_namespace}}.{{role_name}}
Returns:
Tuple[str, str]: namespace, name
"""
# role comes in the form {{role_namespace}}.{{role_name}}, so split by .
role_data = role.split(".")
role_namespace, role_name = role_data[0], role_data[1]
return role_namespace, role_name | c4682d8457b49c12fc7bf01279f8cd5583eea13c | 26,564 |
def DSER(results):
"""DA Segmentation Rate: number of segments of the
reference incorrectly segmented
over number of reference segments.
"""
assert len(results) == 2
CorrectSegs = results[0]
TotalSegs = results[1]
return ((TotalSegs-CorrectSegs)/TotalSegs) * 100 | 5d74c7bd3329448609fe36fa4d7778155c54d7c1 | 698,099 |
def get_filenames_in_folder(folder):
"""
Return list of filenames (stem + suffix) in folder
"""
files_in_folder = [p for p in folder.iterdir() if p.is_file()]
filenames_in_folder = [f.stem + f.suffix for f in files_in_folder]
return filenames_in_folder | 121e87aca8f56eefa51d63730d52efc3829dcbb1 | 430,383 |
def get_gps_data(content):
""" Parse latitude and longitude
:param content: raw html
:return: list with geographical coordinates or None if can't find
:rtype: list
"""
try:
return str(content).split('showMapDialog(')[1].split(')')[0].split(', ')[:2]
except IndexError:
return None | 0835b7c52138a2babcef8af080fdb51fcebb1b57 | 360,127 |
def raise_diacritic(some_diacritic: str) -> str:
"""
When given a diacritic that goes below,
replaces it with one that goes below, and
has the same meaning;
otherwise it does nothing.
"""
if some_diacritic == "̥":
return "̊"
else:
return some_diacritic | 855feaa797db4f96270fc43dcf4d305906477ebc | 442,645 |
def context_mod_args(parser, dsparsification_reg_type='l1',
dsparsification_reg_strength=1., dcontext_mod_init='constant'):
"""This is a helper function of function :func:`parse_cmd_arguments` to add
an argument group for options regarding the context modulation.
Arguments specified in this function:
- `use_context_mod`
- `no_context_mod_outputs`
- `context_mod_inputs`
- `context_mod_post_activation`
- `context_mod_last_step`
- `checkpoint_context_mod`
- `offset_gains`
- `dont_softplus_gains`
- `sparsify_context_mod`
- `sparsification_reg_strength`
Args:
parser: Object of class :class:`argparse.ArgumentParser`.
dsparsification_reg_type (str): Default value of option
`sparsification_reg_type`.
dsparsification_reg_strength (float): Default value of option
`sparsification_reg_strength`.
dcontext_mod_init (str): Default value of option `context_mod_init`.
Returns:
The created argument group, in case more options should be added.
"""
heading = 'Context modulation options'
agroup = parser.add_argument_group(heading)
# here we dont differenciate between the usage of hnet and context mod
# since we assume we only use the cm case
agroup.add_argument('--use_context_mod', action='store_true',
help='If True, hnet-based context-mod will be used.')
agroup.add_argument('--no_context_mod_outputs', action='store_true',
help='If True, context modulation will not be ' +
'applied to the output layer.')
agroup.add_argument('--context_mod_inputs', action='store_true',
help='If True, context modulation will be applied ' +
'to the input layer.')
agroup.add_argument('--context_mod_post_activation', action='store_true',
help='If True, context modulation will be applied ' +
'after computing the activation function, ' +
'else, it will be applied before.')
agroup.add_argument('--context_mod_last_step', action='store_true',
help='If True, context modulation will only be ' +
'applied in the last timestep of the sequence. ' +
'Else, it is applied at every timestep.')
agroup.add_argument('--checkpoint_context_mod', action='store_true',
help='Train context-modulation without a ' +
'hypernetwork. Instead, context-mod weights ' +
'will be part of the main network and will be ' +
'checkpointed after every task (linear memory ' +
'growth).')
agroup.add_argument('--context_mod_init', type=str,
default=dcontext_mod_init,
help='What method to use to initialize context-' +
'modulation weights. Note, this option is only ' +
'applicable in combination with ' +
'"checkpoint_context_mod". Reinitialization ' +
'will be performed after every task. If ' +
'"sparse" is used, then the option ' +
'"mask_fraction" is reused to determine the ' +
'sparsity level. Default: %(default)s.',
choices=['constant', 'normal', 'uniform', 'sparse'])
agroup.add_argument('--offset_gains', action='store_true',
help='If this option is activated, the modulatory ' +
'gains produced by the hypernetwork will be ' +
'shifted by 1. Note, requires ' +
'"dont_softplus_gain" to be set.')
agroup.add_argument('--dont_softplus_gains', action='store_true',
help='If this option is activated, the modulatory ' +
'gains produced by the hypernetwork will not ' +
'be send through a softplus. Therefore, they ' +
'might be positive and negative.')
agroup.add_argument('--context_mod_per_ts', action='store_true',
help='If True, a different context-mod pattern per ' +
'timestep will be learned.')
agroup.add_argument('--sparsify_context_mod', action='store_true',
help='If this option is activated, the modulatory ' +
'gains are pushed towards zero to sparsify the ' +
'backpropagation through neurons in order to ' +
'restrict the absolute model capacity used per ' +
'task as well to ensure that there are ' +
'"unimportant" weights reserved for future tasks.')
agroup.add_argument('--sparsification_reg_strength', type=float,
default=dsparsification_reg_strength,
help='The strength of the gain sparsification ' +
'regularizer. Default: %(default)s.')
agroup.add_argument('--sparsification_reg_type', type=str,
default=dsparsification_reg_type,
help='The type of regularizer to be used in order to ' +
'obtain sparse gain patterns. ' +
'Default: %(default)s.',
choices=['l1', 'log'])
return agroup | 736393c9fd7b2dd75e76fcabef2a0e51fb331d48 | 164,249 |
def slice_sparse(mat, slice0, slice1):
"""
Slice a block out of a sparse matrix, ie mat[slice0,slice1].
Scipy sparse matrices do not support slicing in this manner (unlike numpy)
Args:
mat (sparse): of any form
slice0 (bool,array): mask to apply on axis 0 (rows)
slice1 (bool,array): mask to apply on axis 1 (columns)
Returns:
CSR matrix
"""
out = mat.tocsc()[:,slice1]
return out.tocsr()[slice0,:] | 7940372d776efd4e1027556bf547ddfc025288f3 | 296,047 |
def process_img_paths(input):
"""Processes image paths
If one path is entered, return list of length 1 containing location.
If multiple (comma-separated) paths are entered, return list containing
all paths.
Args:
input (str): string containing image path(s)
Returns:
paths (list): list containing image path, or separated image paths
"""
input = input.replace(" ", "")
paths = input.split(",")
if '' in paths:
paths.remove("")
return paths | 53f693885d10c7631257583bb465c9147dc2dca7 | 633,454 |
import torch
def _distance_matrix(x: torch.Tensor, eps_factor=1e2) -> torch.Tensor:
"""Naive dense distance matrix calculation.
Parameters
----------
x : torch.Tensor
Dense [n, d] tensor containing the node attributes/embeddings.
eps_factor : [type], optional
Factor to be multiplied by `torch.finfo(x.dtype).eps` for "safe" sqrt, by default 1e2.
Returns
-------
torch.Tensor
n by n distance matrix.
"""
x_norm = (x ** 2).sum(1).view(-1, 1)
x_norm_t = x_norm.transpose(0, 1)
squared = x_norm + x_norm_t - (2 * (x @ x.transpose(0, 1)))
# For "save" sqrt
eps = eps_factor * torch.finfo(x.dtype).eps
return torch.sqrt(torch.abs(squared) + eps) | bafe4c182101bbf161745aba11de549412bbbbfa | 634,449 |
def dict_failed_keys(table):
"""Returns all failed keys of the dict match comparison result table."""
failed = []
for _, key, result, _, _ in table:
if key and result == 'Failed':
failed.append(key)
return tuple(sorted(failed)) | 58a8707d08c6e944f4a2ea5752e423297c02d84f | 81,362 |
def get_str_comparison_term(args, db_name, arg_name=None):
"""
Return an SQL WHERE term for the named argument.
:param str db_name: database column name
:param str arg_name: command-line argument name
:return: (sql_term, value).
:rtype: (str, str)
"""
arg_name = arg_name or db_name
val = getattr(args, arg_name)
eq = getattr(args, '_eq_' + arg_name, False)
if eq or args.eq:
op = '=='
wildcard = ''
elif args.like:
op = 'LIKE'
wildcard = '%'
elif args.regex:
op = 'REGEXP'
wildcard = '.*'
else:
op = 'glob'
wildcard = '*'
where = ('{} {} ?'.format(db_name, op))
val = val if args.exact else '{1}{0}{1}'.format(val, wildcard)
if args.regex:
val += '$'
return (where, val) | 9091ce0b359d2f4c14da5368a241146974f19f9b | 548,296 |
def checkdms(dms):
"""Verify a sexagesimal string; returns True if valid, False if not
"""
assert isinstance(dms, str)
try:
d=dms.split(':')
for i in range(3): float(d[i])
return True
except:
return False | 8255b4a983ee3e2e5d633ed3eba89b81a12fb6f5 | 102,500 |
def find_duplicated(list1):
"""
Returns a list with duplicated items
"""
duplicated = []
for element in list1:
if list1.count(element) >= 2 and element not in duplicated:
duplicated.append(element)
return duplicated | 1106f02ad169b8f26099e870fb483c910082ef85 | 385,153 |
def beta_exp(a, b):
"""
Expected value of beta distro
https://en.wikipedia.org/wiki/Beta_distribution
"""
return a / (a + b) | 92a10f1e091fbb857b513995a4ad628111e7f71d | 65,794 |
def show_prop(df, target_col='target'):
"""Show proportion of classes in target column"""
return df \
.groupby(target_col) \
.agg(num=(target_col, 'size')) \
.assign(prop=lambda x: x.num / x.num.sum()) \
.style \
.format(dict(
num='{:,.0f}',
prop='{:.2%}')) | f0d60f39da1075e9b7233650f84c8fff5a4ee03c | 172,052 |
def duns_screener(duns):
"""
Takes a duns number and returns a modified string to comply with DUNS+4 format
common DUNS errors:
* leading zero removed: len == 8 --> add leading zero back + '0000' trailing
* 9-digits --> duns + '0000'
* else: 'error'
"""
if len(duns) == 9:
duns = duns + '0000'
return duns
elif len(duns) == 8:
duns = '0' + duns + '0000'
return duns
else:
return 'error' | bed8c6bff392a31d511cf962cec7c30e32b86d8b | 87,346 |
def _get_server_info(metadata=None, created=None):
"""
Creates a fake server config to be used when testing creating servers
(either as the config to use when creating, or as the config to return as
a response).
:param ``dict`` metadata: metadata to include in the server config
:param ``created``: this is only used in server responses, but gives an
extra field to distinguish one server config from another
"""
config = {
'name': 'abcd',
'imageRef': '123',
'flavorRef': 'xyz',
'metadata': metadata or {}
}
if created is not None:
config['created'] = created
return config | 3d96678702dd88173ed069f81e7eda84b8dff358 | 550,369 |
import math
def api_psv_release_rate(P1, Pback, k, CD, T1, Z, MW, area):
"""
PSV vapour relief rate calculated according to API 520 Part I 2014
Eq. 5, 9, 15, 18
Parameters
----------
P1 : float
Upstream pressure
Pback : float
Downstream / backpressure
k : float
Ideal gas k (Cp/Cv)
CD : float
Coefficient of discharge
T1 : float
Upstream temperature
Z : float
Compressibility
MW : float
Molecular weight of the gas relieved
area : float
PSV orifice area
Returns
----------
: float
Relief rate / mass flow
"""
P1 = P1 / 1000
Pback = Pback / 1000
area = area * 1e6
MW = MW * 1000
C = 0.03948 * (k * (2 / (k + 1)) ** ((k + 1) / (k - 1))) ** 0.5
if P1 / Pback > ((k + 1) / 2) ** ((k) / (k - 1)):
w = CD * area * C * P1 / math.sqrt(T1 * Z / MW)
else:
r = Pback / P1
f2 = ((k / (k - 1)) * r ** (2 / k) * (1 - r**((k - 1) / k)) / (1 - r))**0.5
print(f2)
w = CD * area * f2 / (T1 * Z / (MW * P1 * (P1 - Pback)))**0.5 / 17.9
return w/3600 | b87567a1cf88dc550f83bb9facd7d88388fddc40 | 508,299 |
def count_encrypted_layers(encrypted_layers: dict):
"""
Count number of encrypted layers homomorphic encryption (HE) layers/variables.
"""
n_total = len(encrypted_layers)
n_encrypted = 0
for e in encrypted_layers.keys():
if encrypted_layers[e]:
n_encrypted += 1
return n_encrypted, n_total | 12c3f8d19dd161ba84265144de159882f82cab79 | 270,627 |
def collect_runtime_flags(properties):
"""Returns a list of unique runtime flags used by the properties"""
flags = {p['runtime_flag'] for p in properties if p['runtime_flag']}
return sorted(flags) | 70e255f6696533162dc3335bfedbf845ce556339 | 472,053 |
def angle_to_azimuth_scalar(angle):
"""
Helper function for angle_to_azimuth, for scalar inputs.
Takes an angle (in unit circle coordinates) and returns an azimuth
(in compass direction coordinates).
"""
az = - (angle - 90)
while az < 0:
az += 360
while az > 360:
az -= 360
return az | 7e24e8a25092f8d7af80c0007b4a8bc94050d6e3 | 389,099 |
import torch
def denormalize(tensor, mean, std):
"""Denormalize the image for given mean and standard deviation.
Args:
tensor: Image tensor
mean: Dataset mean
std: Dataset standard deviation
Returns:
tensor
Raises:
No Exception
"""
if not tensor.ndimension() == 4:
raise TypeError('tensor should be 4D')
mean = torch.FloatTensor(mean).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)
std = torch.FloatTensor(std).view(1, 3, 1, 1).expand_as(tensor).to(tensor.device)
return tensor.mul(std).add(mean) | 12eedd38c33d81d1d30fc002a5cf3ad404f84ef4 | 132,303 |
import posixpath
def get_fuzzer_benchmark_covered_regions_filestore_path(
fuzzer: str, benchmark: str, exp_filestore_path: str) -> str:
"""Returns the path to the covered regions json file in the |filestore| for
|fuzzer| and |benchmark|."""
return posixpath.join(exp_filestore_path, 'coverage', 'data', benchmark,
fuzzer, 'covered_regions.json') | f999a81c53d961a2ed38a7941d7dd6912dae9621 | 699,148 |
def open_file(filename):
"""Returns list of lines from the text file. \n at the end of the lines are trimmed. Empty lines are excluded"""
lines = []
with open(filename, 'r') as infile:
for line in infile:
if line.strip():
lines.append(line.strip())
return lines | 2baf43bcfceec3286790918620c7a9b3d4bb7f7e | 140,731 |
import math
def calculateDeterminant(termA, termB, termC):
"""
Calculates the determinant and returns the correct value or -1 if is a negative number.
"""
firstCalc = math.pow(termB, 2) - (4*termA*termC)
if(firstCalc < 0):
return -1
else:
determinant = math.sqrt(firstCalc)
return determinant | 9b3f2b7fd35f2ff73b29ec6402e8306123f7a0ba | 398,880 |
import math
def get_conv_output_sizes(lengths, num_layers=4):
"""
Returns the length of the output after conv block.
Using kernel size of 3 for conv1d and maxpool1d, and max pool stride of 2.
"""
kernel_size = 3
max_pool_stride = 2
for _ in range(num_layers):
lengths = [math.floor(((length - (kernel_size-1)) - (kernel_size) + max_pool_stride)/max_pool_stride) for length in lengths]
return lengths | 4c917920edc6b1ab37382ebcba6eef174b5d4f00 | 358,203 |
from typing import Dict
def prettify_wildfire_rule(rule: Dict) -> Dict:
"""
Args:
rule: The profile security rule to prettify.
Returns: The rule dict compatible with our standard.
"""
pretty_rule = {
'Name': rule['@name'],
}
if isinstance(rule.get('application'), dict) and 'member' in rule['application']:
pretty_rule['Application'] = rule['application']['member']
if isinstance(rule.get('file-type'), dict) and 'member' in rule['file-type']:
pretty_rule['File-type'] = rule['file-type']['member']
if 'analysis' in rule:
pretty_rule['Analysis'] = rule['analysis']
return pretty_rule | 1bf153d81a611c1cb275ed1025cd0680ebbf0267 | 641,587 |
def is_prime(number: int):
"""
This function finds if the number is prime
Returns:
True if prime false otherwise
"""
for index in range(2, (number//2) + 1):
if number%index == 0:
return False
return True | abeb7efc2b95f7ad2051e37cd3c96bfd0e71901a | 472,863 |
def nested_update(this, that):
"""Merge two nested dictionaries.
Effectively a recursive ``dict.update``.
Examples
--------
Merge two flat dictionaries:
>>> nested_update(
... {'a': 1, 'b': 2},
... {'b': 3, 'c': 4}
... )
{'a': 1, 'b': 3, 'c': 4}
Merge two nested dictionaries:
>>> nested_update(
... {'x': {'a': 1, 'b': 2}, 'y': 5, 'z': 6},
... {'x': {'b': 3, 'c': 4}, 'z': 7, '0': 8},
... )
{'x': {'a': 1, 'b': 3, 'c': 4}, 'y': 5, 'z': 7, '0': 8}
"""
for key, value in this.items():
if isinstance(value, dict):
if key in that and isinstance(that[key], dict):
nested_update(this[key], that[key])
elif key in that:
this[key] = that[key]
for key, value in that.items():
if key not in this:
this[key] = value
return this | 1f218df42cd37e328118fe7e87b2523aae622be9 | 89,562 |
def extract_query(message, cmdlength=1):
""" Removes the command and trigger from the message body, return the stripped text."""
content = message.getBody()
if message.participant:
offset = 1 + cmdlength
else:
offset = cmdlength
return ' '.join(content.split()[offset:]) | a1cba57c8650599315fd83f70b590ba6c563ef01 | 148,502 |
def run_sixs_job(args):
"""Run sixs for a specific wavelength
Parameters
----------
args : tuple
initialized SixS instance
[additional args passed through]
Returns
-------
dict
correction parameters
list of float
adjacency correction parameters
list
arguments passed through
"""
mysixs = args[0]
moreargs = args[1:]
mysixs.run()
xdict = {
'xa': mysixs.outputs.coef_xa, # inverse of transmitance
'xb': mysixs.outputs.coef_xb, # scattering term of the atmosphere
'xc': mysixs.outputs.coef_xc} # reflectance of atmosphere for isotropic light (albedo)
adjcorr_params = [
mysixs.geometry.view_z,
mysixs.outputs.optical_depth_total.total,
mysixs.outputs.transmittance_global_gas.upward,
mysixs.outputs.transmittance_total_scattering.upward]
return (xdict, adjcorr_params, moreargs) | 52a215c2e06ce6fe50ac7bc106b6aaa89c043c79 | 216,175 |
def flatten(items):
"""Convert a sequence of sequences to a single flat sequence.
Works on dictionaries, tuples, lists.
"""
result = []
for item in items:
if isinstance(item, list):
result += flatten(item)
else:
result.append(item)
return result | d44e3391f791dfd2ec9b323c37c510a415bb23bf | 707,930 |
def _is_passing_grade(course_grade):
"""
Check if the grade is a passing grade
"""
if course_grade:
return course_grade.passed
return False | 0c25fca3629433a2ae1956cd0e14a212b2881639 | 535,004 |
def get_non_link_props(props):
"""Return subset of iterable props that are not links."""
return [val for val in props if '.' not in val] | f2790eb1301c426da0115e543d01a1a454221fdd | 591,932 |
def update_bookmark(connection, bookmark_id, body, error_msg=None):
"""Update a bookmark.
Args:
connection: MicroStrategy REST API connection object
bookmark_id (string): Bookmark ID
body: JSON-formatted information used to format the document
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
Complete HTTP response object.
"""
url = f"{connection.base_url}/api/bookmarks/{bookmark_id}"
return connection.put(url=url, json=body) | bebb30e2b5cdcd8078b9585a568e52a69b05b62a | 253,499 |
import mimetypes
def guess_mimetype(filename: str, fallback: bool = False) -> str:
"""Guess a mimetype based on a filename.
Args:
filename: The filename to check.
fallback: Fall back to application/octet-stream if unknown.
"""
mimetype, _encoding = mimetypes.guess_type(filename)
if mimetype is None:
if fallback:
return 'application/octet-stream'
else:
raise ValueError("Got None mimetype for {}".format(filename))
return mimetype | 3cbaafdb2476491c2723ab1ba776eb62abd4b8d2 | 62,919 |
from datetime import datetime
import click
def validate_optional_timestamp(ctx, param, value):
"""Ensure that a valid value for a timestamp is used."""
if value:
try:
return datetime.strptime(value, "%Y-%m-%dT%H:%M:%SZ").replace(
hour=0, minute=0, second=0
)
except ValueError:
raise click.BadParameter(
"{} must be a valid utc timestamp formatted as `%Y-%m-%dT%H:%M:%SZ` "
"e.g. `2020-12-31T00:00:00Z`".format(param.name),
param=param,
)
return value | ace2b46b6b36d078b450f264164b73f88ebc2537 | 38,887 |
def get_cart_location(env, screen_width=600):
"""
Get the position of cart
:param env: environment, in this file means cartpole-v0
:param screen_width:screen width defined in gym
:return:middle position of cart
"""
world_width = env.x_threshold * 2
scale = screen_width / world_width
return int(env.state[0] * scale + screen_width / 2.0) | da4b7f710e0ecff94e65b48e40e71a108d239bac | 377,132 |
def median(arr):
"""Determines the median of a list of numbers.
Args:
arr: a list of ints and/or floats of which to determine the median
Returns:
int or float: the median value in arr
"""
if len(arr) == 0:
return None
arr_sorted = sorted(arr)
midpoint = int(len(arr) / 2)
if len(arr) % 2 == 0:
# even number of elements; get the average of the middle two
result = (arr_sorted[midpoint - 1] + arr_sorted[midpoint]) / 2
else:
result = arr_sorted[midpoint]
return result | 68c8f7cde71d7e79e8888336454995cd50c4763e | 283,353 |
def allocate_available_excess(region):
"""
Allocate available excess capital (if any).
"""
difference = region['total_mno_revenue'] - region['total_mno_cost']
if difference > 0:
region['available_cross_subsidy'] = difference
region['deficit'] = 0
else:
region['available_cross_subsidy'] = 0
region['deficit'] = abs(difference)
return region | e586435791b56f9e5ce25b760da62f5cdedc3bd7 | 171,540 |
def GetIdFromArgs(args):
"""Returns the id to be used for constructing resource paths.
Args:
args: command line args.
Returns:
The id to be used..
"""
if args.organization:
return 'organizationsId'
elif args.folder:
return 'foldersId'
elif args.billing_account:
return 'billingAccountsId'
else:
return 'projectsId' | b5d61e02b7e04d56a8c2ac5d172e1623d27187db | 136,487 |
def number_equal(element, value, score):
"""Check if element equals config value
Args:
element (float) : Usually vcf record
value (float) : Config value
score (integer) : config score
Return:
Float: Score
"""
if element == value:
return score | df96f13cc31ffd147253dafb8f1f137c09940a8a | 291,553 |
import torch
def position_embeddings(position_tensor: torch.LongTensor, b: int, E: int,
dtype: torch.dtype = torch.float) \
-> torch.Tensor:
"""
Returns the embeddings for bth Transformer block
Parameters
----------
position_tensor: torch.LongTensor
(L, N) where L is the sequence length, N is the batch size.
b: int
The index of Transformer block
E: int
The embedding dimension
dtype: torch.dtype
Returns
-------
embeddings: torch.Tensor
(L, N, E) where L is the sequence length, E is the embedding dimension.
embeddings[i, n, 2j ] =
sin((position_tensor[i, n] + b) / (10000**(2j/E)))
embeddings[i, n, 2j + 1] =
sin((position_tensor[i, n] + b) / (10000**(2j/E)))
"""
device = position_tensor.device
L, N = position_tensor.shape
divisor = torch.arange(0, E) // 2
divisor = \
torch.pow(10000, 2 * divisor.to(dtype=dtype) / E).to(device=device)
embeddings = \
position_tensor.view(L, N, 1).expand(L, N, E)\
.to(dtype=dtype).to(device=device)
embeddings = embeddings + b
embeddings = embeddings
embeddings /= divisor
return torch.sin(embeddings) | af087adeb4560793f27bf0eb92bd167cd0911e4c | 406,624 |
import jinja2
def render(data, template):
"""render jija2 template
Args:
data(obj): dict with data to pass to jinja2 template
template(str): jinja2 template to use
Returns: string, rendered all, or pukes with error :)
"""
with open(template, 'r'):
templateLoader = jinja2.FileSystemLoader(searchpath="./")
templateEnv = jinja2.Environment(loader=templateLoader)
template = templateEnv.get_template(template)
outputText = template.render(data=data)
return outputText | caf24bccd0351f72f5750bcb7c43e676994fecea | 37,702 |
import collections
def _longest(d):
"""
Returns an OrderedDict with the contents of the input dictionary ``d``
sorted by the length of the keys, in descending order.
This is useful for performing substring matching via ``str.startswith``,
as it ensures the most complete match will be found.
>>> data = {'a': 1, 'bb': 2, 'ccc': 3}
>>> pwnlib.context._longest(data) == data
True
>>> for i in pwnlib.context._longest(data):
... print i
ccc
bb
a
"""
return collections.OrderedDict((k,d[k]) for k in sorted(d, key=len, reverse=True)) | 56b6efe5ae544f4026c462893d54fb626b7009b4 | 96,503 |
import re
def cert_arn_lookup(session, domain_name):
"""Looks up the ARN for a SSL Certificate
Args:
session (Session|None) : Boto3 session used to lookup information in AWS
If session is None no lookup is performed
domain_name (string) : Domain Name that the Certificate was issued for
Returns:
(string|None) : Certificate ARN or None if the Certificate could not be located
"""
if session is None:
return None
client = session.client('acm')
response = client.list_certificates()
for certs in response['CertificateSummaryList']:
if certs['DomainName'] == domain_name:
return certs['CertificateArn']
if certs['DomainName'].startswith('*'): # if it is a wildcard domain like "*.thebossdev.io"
cert_name = certs['DomainName'][1:] + '$'
if re.search(cert_name, domain_name) != None:
return certs['CertificateArn']
return None | 17e3e5dcbcb32d2491a8790d0625c12a2ec06f9b | 324,811 |
async def ping() -> str:
"""health check"""
return "pong" | 95c730e4d05fbc5e86d66ffad6bbbab182ece9a4 | 633,388 |
def str_cellarray2str_list(str_cellarray):
""" After using loadmat, the str cell array have a strange shape
>>> here the loaded "strange" array is converted to an str list
Args:
str_cellarray ( ndarray): correponing to str cell array in matlab
Returns:
str list:
"""
str_array=str_cellarray
if str_cellarray.ndim ==2:
tmp= str_cellarray[0,:]
str_array= [ t[0] for t in tmp]
elif str_cellarray.ndim ==1:
tmp= str_cellarray[0]
str_array= tmp
return str_array | 765d07c2b61982bb68bb98faa2792a4ba6f4d3ee | 427,288 |
def mean_squared_error(original_img, resoluted_img):
"""
Computing the mean squared error between original and resoluted images
Args:
-----
original_img: batch of torch Tensors
batch containing the input images
resoluted_img: batch of torch Tensors
batch containing the tensors at the output of the network
Returns:
--------
mse: float
mean squared error of the image
"""
subs = original_img - resoluted_img
mse = subs.pow(2).mean()
return mse | 55379bf6aef8d4d78dba41a03799ecb9447d83ab | 404,295 |
def plane(xy_array, a, b, c):
"""
Return z values such that z = a*x + b*y + c.
:param xy_array: a (2xN) numpy array, x values being the first row and
y values the second row
:param a: coefficient for x values
:param b: coefficient for y values
:param c: constant offset
:return: an array of length N containing the z values
"""
return a * xy_array[0, :] + b * xy_array[1, :] + c | 0cf9441f9d76bab667935cf9b63216750ca74c23 | 582,362 |
def zoom_fit(screen, bounds, balanced=True):
"""What affine transform will zoom-fit the given items?
screen: (w,h) of the viewing region
bounds: (x,y,w,h) of the items to fit
balance: Should the x and y scales match?
returns: [translate x, translate y, scale x, scale y]
"""
(sw, sh) = screen
(gx, gy, gw, gh) = bounds
x_scale = sw/gw
y_scale = sh/gh
if (balanced):
x_scale = min(x_scale, y_scale)
y_scale = x_scale
return [-gx*x_scale, -gy*y_scale, x_scale, y_scale] | 5bdd7d017b65426b3d7937bd866a45da3a67e929 | 120,607 |
from typing import List
from typing import Tuple
from typing import Optional
def match_initial(string: str, candidates: List[str]) -> Tuple[str, Optional[str]]:
"""
Returns the longest match at the initial position among a list of candidates.
The function will check if any candidate among the list is found at the beginning
of the string, making sure to match longer candidates first (so that it will
match "abc" before "ab").
@param string: The string to matched at the beginning.
@param candidates: A list of string candidates for initial match. The list does not
need to be sorted in any way.
@return: A tuple, whose first element is the original string stripped of the initial
match, if any (if no candidate matched the beginning of the string, it will be
a copy of the original one). The second element of the tuple is the candidate
that was matched at the beginning of the string, or `None` if no match could be
found.
"""
# Sort the candidates by inverse length -- this is a bit expensive computationally,
# but it is better to perform it each time to make the function more general.
# Note that we make sure to remove any empty string inadvertedly passed among the
# `candidates`.
candidates = sorted([cand for cand in candidates if cand], reverse=True, key=len)
for cand in candidates:
if string.startswith(cand):
return string[len(cand) :], cand
return string, None | 1df8be244ef1bd241bc5b1b68bd0f2136242e17f | 474,227 |
import csv
def parse_line(value, quotechar=None, **kwargs):
"""
A simple wrapper to parse a single CSV value
"""
quotechar = quotechar or '"'
return next(csv.reader([value], quotechar=quotechar, **kwargs), None) | 786f83d2c268cc7d16b5ad1527817db59793cb48 | 415,881 |
import torch
def determine_device(gpu_selected):
""" Determines the device to be used based on requested device and available device
:param gpu_selected: boolean value provided by the user
:return: device to be used based on choice and availability
"""
if gpu_selected and torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if gpu_selected: print("WARN: CUDA is not available. Using CPU instead")
return device | 8c888a89037f41bbd107132141685471207feb3c | 312,970 |
def reverse_image(image):
"""
Reverse an image horizontally.
:param [int] image: Image to reverse
:return [int]: Reversed image
"""
return tuple(reversed(image)) | 980c7344e0ada7542eaef3f2fbdcd60e622eab85 | 328,153 |
import hashlib
def munch_abbreviation_string(m):
"""
Given a Munch, create a string that communicates the contents
of the k,v pairs in a concise human-readable way. E.g.
m = Munch(
my_special_value=0.1,
something_else='yoda',
a_list=['joe','bob'],
)
becomes => msv_0.1-se_yoda-al_joe.bob
If any str(v) is longer than 32, it will be converted to hexdigest
"""
parts = []
for k, v in m.items():
abbrev = "".join(map(lambda s: s[0], k.split("_")))
v = ".".join(map(str, v)) if isinstance(v, list) else str(v)
if len(v) > 32:
v = hashlib.md5(v.encode()).hexdigest()
parts += [f"{abbrev}_{v}"]
return "-".join(parts) | bdac7c8ebdb10bfa2df9fda2251bb25a6de53ca5 | 478,943 |
def page_replace(context, value):
"""Replaces or adds the page number to form the pagination url.
Example:
if you're on the page ``/company/list/?sorted=created&page=5``,
then
<a href="/company/list/?{% page_replace 3 %}">Page 3</a>
would expand to
<a href="/company/list/?sorted=created&page=3">Page 3</a>
"""
query = context['request'].GET.copy()
query['page'] = value
return query.urlencode() | d6376f5f93323b2d4675ee25513b43d5afa7cf95 | 113,003 |
def is_passed_counting_roi(current_point, first_point, roi_polygon, roi_outside, is_enter):
"""
will check if the point passed the counting roi. if the current location in the roi and the first location not in the roi - it passed
if the current location not in the roi and the first location in the roi - it passed.
:param current_point: Point object for the current location
:param first_point: Point object for the first detected location.
:param roi_polygon: Polygon object for the counting roi.
:param roi_outside: True if the roi if the outside.
:param is_enter: True if we want to check if the person enter to the roi and false if we want to check if the person left the roi.
:return: True if the point passed the roi , False if the point not passed the roi.
"""
if roi_outside ^ is_enter:
return roi_polygon.contains(current_point) and not roi_polygon.contains(first_point)
else:
return roi_polygon.contains(first_point) and not roi_polygon.contains(current_point) | d48a158db0251a4ef64ec472d024029cf5f8ef8d | 635,245 |
def roe(net_income, average_equity):
"""Computes return on equity.
Parameters
----------
net_income : int or float
Net income
average_equity : int or float
Average total equity
Returns
-------
out : int or float
Return on equity
"""
return net_income / average_equity | 947f0623987395ecea199fb42eb32e76763a592a | 623,577 |
def gen_query_string(params):
"""Generate a query string from the parameter dict."""
return '&'.join('{}={}'.format(k, v) for k, v in params.items()) | 4bffaecdaf821a1c1921013c45cb7e60ae963a05 | 349,835 |
def calculate_loan_to_value_ratio(loan_amount, home_value):
"""
Calculates the loan to value ratio.
Converts the loan amount and home value parameters to
int values and divides the loan amount by the home value
to produce the loan to value ratio.
Parameters:
loan_amount (float): The loan amount.
home_value (float): The value of the home.
Returns:
loan_to_value_ratio (int): The loan to value ratio.
"""
loan_to_value_ratio = int(loan_amount) / int(home_value)
return loan_to_value_ratio | 998a9c5d4d8bca66002875f950aa0d650e326f90 | 348,291 |
def is_none(value):
"""Convert a string indicating 'None' or missing value to None."""
if value == 'NA' or value == 'None':
return None
else:
return value | c5e5854a3d9a0c5205797f76a94ed96019fcfc84 | 118,426 |
def asini_c(pb, mf):
"""
asini_c(pb, mf):
Return the orbital projected semi-major axis (lt-sec) given:
'pb' is the binary period in sec.
'mf' is the mass function of the orbit.
"""
return (mf * pb * pb / 8015123.37129)**(1.0 / 3.0) | 1dd86e3619f2334d6d11c14bc2a7b28c9edb9dcb | 693,961 |
def tile_bounds(tsinfo, z, x, y, width=1, height=1):
"""
Get the coordinate boundaries for the given tile.
Parameters:
-----------
tsinfo: { min_pos: [], max_pos [] }
Tileset info containing the bounds of the dataset
z: int
The zoom level
x: int
The x position
y: int
The y position
width: int
Return bounds for a region encompassing multiple tiles
height: int
Return bounds for a region encompassing multiple tiles
"""
min_pos = tsinfo["min_pos"]
max_pos = tsinfo["max_pos"]
max_width = max(max_pos[0] - min_pos[0], max_pos[1] - min_pos[1])
tile_width = max_width / 2 ** z
from_x = min_pos[0] + x * tile_width
to_x = min_pos[0] + (x + width) * tile_width
from_y = min_pos[1] + y * tile_width
to_y = min_pos[1] + (y + height) * tile_width
return [from_x, from_y, to_x, to_y] | 1c59845ec208ff8bada155729c5151cf540cbe37 | 459,535 |
def thread(read=20, write=20):
"""Return the kwargs for creating the Thread table."""
return {
'AttributeDefinitions': [
{
'AttributeName': 'ForumName',
'AttributeType': 'S'
},
{
'AttributeName': 'Subject',
'AttributeType': 'S'
},
],
'TableName': 'Thread',
'KeySchema': [
{
'AttributeName': 'ForumName',
'KeyType': 'HASH',
},
{
'AttributeName': 'Subject',
'KeyType': 'RANGE',
},
],
'ProvisionedThroughput': {
'ReadCapacityUnits': read,
'WriteCapacityUnits': write,
}
} | 15a4022a94c0a326364dedefa4b0561e8055fbdf | 640,752 |
def follow_path(schema_root, path):
""" Follow path from schema_root to get a specific schema. """
schema = schema_root
for leaf in path:
schema = getattr(schema, leaf)
return schema | 672ed961474a788297f579d7dbcea763bdc6258d | 475,739 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.