content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def toroman(number: int) -> str:
"""
Takes an integer number, returns a string with the roman numeral.
Parameters:
number (int): A positive integer less than 4000
Returns:
numeral (str): The roman numeral of the number.
"""
# Exception handling
if(number <= 0): raise ValueError("Number is less than 1")
if(number >= 4000): raise ValueError("Number is greater than 3999")
# Start of algorithm
# The algorithm starts with a long string of I's
# and replaces rows of smaller letters with one greater letter
# until the numeral is optimized
letters = "IVXLCDM"
# Create a long string of I's
numeral = letters[0]*number
for index, letter in enumerate(letters[:-1]):
# Letters which are related to 5 (V, L, and D)
if index % 2:
# Row of VV is replaced with X
numeral = numeral.replace(letter*2, letters[index + 1])
# Row of VIV is replaced with IX
numeral = numeral.replace(
letter+letters[index - 1]+letter, letters[index - 1] + letters[index + 1])
# Letters which are related to 10 (I, X, C and M)
else:
# Row of IIIII is replaced with V
numeral = numeral.replace(letter*5, letters[index + 1])
# Row of IIII is replaced with IV
numeral = numeral.replace(letter*4, letter + letters[index + 1])
return numeral | 755419a02f876ab8bb30c891e2c244bc8f7e6807 | 116,756 |
def is_stack_region(name):
"""Checks whether memory region is stack"""
return name == '[stack]' | 83c8db5e587d018d2cd5234d58741d2a17e0fd18 | 397,097 |
from typing import Any
from typing import Dict
def metadata(user_model: Any) -> Dict:
"""
Call the user model to get the model metadata
Parameters
----------
user_model
User defined class instance
Returns
-------
Model Metadata
"""
if hasattr(user_model, "metadata"):
return user_model.metadata()
else:
return {} | 6fa8df5a8d842c8fbccfa6d8447732da4263a124 | 28,848 |
def get_user_data_usage_limit(connection, id, project_id):
"""Get the data usage limit for users, either all users or a specific user,
in a specific project. A typical use case would be that an administrator
has set a project-level limit of data, such as 10GB, for all users in a
specific project and has also limited the data usage for specific users,
for example to 5GB.
Args:
connection (object): MicroStrategy connection object returned by
`connection.Connection()`.
id (str): User ID
project_id (str): Project ID
Returns:
HTTP response object returned by the MicroStrategy REST server.
"""
url = f'{connection.base_url}/api/users/{id}/projects/{project_id}/quotas'
return connection.session.get(url=url) | 281e82d5c188a7f17145014619874d22d20f1153 | 551,023 |
def get_shape_points(cur, shape_id):
"""
Given a shape_id, return its shape-sequence.
Parameters
----------
cur: sqlite3.Cursor
cursor to a GTFS database
shape_id: str
id of the route
Returns
-------
shape_points: list
elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape
"""
cur.execute('''SELECT seq, lat, lon, d FROM shapes where shape_id=?
ORDER BY seq''', (shape_id,))
shape_points = [dict(seq=row[0], lat=row[1], lon=row[2], d=row[3])
for row in cur]
return shape_points | 35dba0d08c6f3e1fc53e3301f8fc9b0337fa2caf | 675,705 |
from typing import Any
from typing import Optional
from typing import Tuple
def is_iterable(obj: Any, exclude: Optional[Tuple[type, ...]] = None) -> bool:
"""
Recieves an object and checks if it is iterable by
looking for the __iter__ method on the object.
If exclude is provided, is_iterable will evaluate
to False is the object is instance of any of the types
in exclude.
>>> is_iterable([1,2,3,])
True
>>> is_iterable([1,2,3,], exclude=(list,))
False
>>> is_iterable(1)
False
"""
exclude = exclude if exclude is not None else ()
if hasattr(obj, '__iter__'):
return not isinstance(obj, exclude)
else:
return False | 7116b46f727ba2fda1e293b382469249f4359c54 | 383,717 |
def control_valve(P1, P2, T, Z, MW, gamma, Cv, xT=0.75, FP=1):
"""
Flow calculated from ANSI/ISA control valve equations for single phase gas flow.
Equation 19 pp. 132 in
Control Valves / Guy Borden, editor; Paul Friedmann, style editor
Parameters
----------
P1 : float
Upstream pressure
P2 : float
Downstream / backpressure
T : float
Upstream temperature
Z : float
Upstream compressibility
MW : float
Molecular weight of the gas relieved
gamma : float
Upstream Ideal gas k (Cp/Cv)
Cv : float
Valve coefficient
xT : float
Value of xT for valve fitting assembly, default value
FP : float
Piping geometry factor
Returns
----------
: float
Mass flow
"""
P1 = P1 / 1e5
P2 = P2 / 1e5
MW = MW * 1000
N8 = 94.8
Fk = gamma / 1.4
x = (P1 - P2) / P1
if x < 0:
x = 0
Y = 1.0 - min(x, Fk * xT) / (3.0 * Fk * xT)
mass_flow = N8 * FP * Cv * P1 * Y * (MW * min(x, xT * Fk) / T / Z) ** 0.5
return mass_flow / 3600 | 989506f23d74e74b1edd2945366d2ada3b2e6f59 | 339,373 |
def sort(lst):
"""Standard merge sort.
Args:
lst: List to sort
Returns:
Sorted copy of the list
"""
if len(lst) <= 1:
return lst
mid = len(lst) // 2
low = sort(lst[:mid])
high = sort(lst[mid:])
res = []
i = j = 0
while i < len(low) and j < len(high):
if low[i] < high[j]:
res.append(low[i])
i += 1
else:
res.append(high[j])
j += 1
res.extend(low[i:])
res.extend(high[j:])
return res | c77a24bb8b09715c0b29686c02921fe223e4c0ba | 200,201 |
def reverse(text: str) -> str:
"""
:param text: str: Input string
:return: str: Reversed string
"""
return text[::-1] | 0272f456a6d1991b53df80069e2c68324a90fb17 | 246,810 |
import re
def first_item_grabber(the_str: str, re_separator_ptn=";|\-|–|,|\|", def_return=None):
"""
From a string containing more than one item separated by separators, grab the first.
>>> first_item_grabber("1987, A1899")
'1987'
>>> first_item_grabber("1987;A1899")
'1987'
>>> first_item_grabber("1916–1917[1915–1917]")
'1916'
"""
ret_val = re.split(re_separator_ptn, the_str)
if ret_val != []:
ret_val = ret_val[0]
else:
ret_val = def_return
return ret_val | 1b332b28eed5043d0890e862fad884ab72bdf8c7 | 691,782 |
def isbra(qob):
"""Checks if ``qob`` is in bra form -- an array row.
"""
return qob.shape[0] == 1 and qob.shape[1] > 1 | abf2f2a41b469d4224f32b86b17a163ba2bc7b38 | 363,839 |
from datetime import datetime
def parse_time(argument):
""" Time parser to be used as type argument for argparser options. """
return datetime.strptime(argument, "%H:%M") | e6b8204f906f3ea2076058a2877e8f09a002319e | 693,265 |
def get_total_assam_bhuttan(classification_training_data):
"""
calculates total number of assam and bhuttan people in given dataset
:param classification_training_data: data set that contains record for both assam and bhuttan people
:return: number of assam and bhuttan people
"""
# count of bhutan people in the data frame using column name class in panda
total_bhuttan = len(classification_training_data[classification_training_data["Class"] == "Bhuttan"].index)
# count of assam people in the data frame using column name class in panda
total_assam = len(classification_training_data[classification_training_data["Class"] == "Assam"].index)
return total_bhuttan, total_assam | bfda27b7335f740a8d302b434abc702eb65f5cea | 573,119 |
def _blob_and_weights(net, layer_name):
"""Get the activation blob and the weights blob for the named layer
in the Caffe network.
"""
# Get the activation blob for this layer and its parameters
# (weights).
blob = net.blobs[net.top_names[layer_name][0]]
weights = net.params[layer_name][0]
return blob, weights | 07fb1a2c363f710d6f53691facb3a07ef33a3475 | 679,780 |
def file_header(prog_name):
"""Returns the header to use on generated files."""
return """// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This is a generated file. Do not edit it! It was generated by:
// {prog_name}
""".format(prog_name=prog_name) | 90c1bbce830f9f21860678632d6f02c6471f0be2 | 409,305 |
async def order(bot, user, order=None, *args):
"""
usage: !order
Display play order, restricted to distributed_roles.
"""
if not bot.game:
return
available_roles = []
for role in bot.game.initial_roles.values():
elt = {"name": role}
elt.update(bot.role[role])
available_roles.append(elt)
available_roles.sort(key=lambda x: str(x.get("order", "100")))
lines = ["L'ordre dans lequel les rôles sont appelés est :"]
for num, role in enumerate(available_roles):
lines.append("{}. {}".format(num, role["name"]))
return "\n".join(lines) | ddb59c9f2f92693d0c7a019773b5f1ef270fe30a | 204,397 |
def median(series, n):
"""
中位数: 求series在n个周期内居于中间的数值
注意:
1. 当n为有效值但当前的series序列元素个数不足n个, 函数返回 NaN 序列
2. 对n个周期内所有series排序后, 若n为奇数, 则选择第(n + 1) / 2个为中位数, 若n为偶数, 则中位数是(n / 2)以及(n / 2 + 1)的平均数
Args:
series (pandas.Series): 数据序列
n (int): 周期
Returns:
pandas.Series: 中位数序列
Example::
例1:
# 假设最近3日的收盘价为2727, 2754, 2748, 那么当前 median(df["close"], 3) 的返回值是2748
median3 = tafunc.median(df["close"], 3)
例2:
# 假设最近4日的开盘价为2752, 2743, 2730, 2728, 那么当前 median(df["open"], 4) 的返回值是2736.5
median4 = tafunc.median(df["open"], 4)
"""
median_data = series.rolling(n).median()
return median_data | 65bdcf08276b68a9bbe92d6f4dfefab5ec205e74 | 68,649 |
def convert_coordinates(value: str) -> float:
"""Convert coordinates to lat/long."""
if len(value) < 8:
return float(value[0] + "." + value[1:])
return float(value[0:2] + "." + value[2:]) | 3923c3aa85c0944e0d49b029122a3b6ba30492f6 | 685,318 |
import time
def JQL_TIMER(func, *args):
"""计时函数
Args:
func: 函数名
args: 可变参数
Return:
函数执行结果, 函数执行时间
"""
start = time.time()
result = func(*args)
end = time.time()
return result, end - start | a0d318f67ecb4f48782ada983e004f409747e03a | 152,217 |
def ref_mode(mode):
"""
Defines reference pixels for different imaging modes.
Parameters:
mode - string containing imaging mode.
Returns:
xref, yref - Floating point reference pixel coordinates
"""
xref, yref = 692.5, 511.5
xref_slit, yref_slit = 325.13, 299.7
xref_slitless, yref_slitless = 37.5, 300.
BRIGHTSKY_x, BRIGHTSKY_y = 711.5, 305.5
SUB256_x, SUB256_y = 539.5, 177.5
SUB128_x, SUB128_y = 69.5, 951.5
SUB64_x, SUB64_y = 37.5, 809.5
if "SLITLESS" in mode:
xref = xref_slitless
yref = yref_slitless
elif "SLIT" in mode:
xref = xref_slit
yref = yref_slit
elif "BRIGHTSKY" in mode:
xref = BRIGHTSKY_x
yref = BRIGHTSKY_y
elif "256" in mode:
xref = SUB256_x
yref = SUB256_y
elif "128" in mode:
xref = SUB128_x
yref = SUB128_y
elif "64" in mode:
xref = SUB64_x
yref = SUB64_y
else:
xref = xref
yref = yref
return xref, yref | 812f6d8cfe0b5a4b07df8b6c9f79f2708838cec1 | 346,653 |
def split_byte(x):
"""Split byte into groups of bits: (2 bits, 3 bits, 3 bits)"""
return x >> 6, x >> 3 & 7, x & 7 | 7743fdf78c201dce66803ae0eb62cbdf58cccc7d | 12,183 |
def triangle_number(n):
"""
Return the nth triangle number; i.e., the value of
``1+2+3+...+(n-1)+n``.
"""
return n*(n+1)/2 | 84ecc4da3c3e70148471132783828583f2d117a0 | 64,078 |
import glob
def get_file_list(file_type):
"""
Returns a list of all files to be processed
:param file_type: string - The type to be used: crash, charges, person, primaryperson, unit
:return: array
"""
return glob.glob("/data/extract_*_%s_*.csv" % file_type) | f8d4d63348b1abc61e566698fa70b04da63fab89 | 676,144 |
def rational_quadratic(alpha, lengthscale, kernel_variance, r):
"""
The rational quadratic kernel as featured in equation 4.19 on pg. 86 of Rasmussen and Williams. The rational quadratic
kernel can be seen as a scale mixture (an infinite sum) of squared exponential kernels with different characteristic lengthscales.
:param alpha: as alpha goes to infinity the RQ kernel becomes the SQE kernel.
:param lengthscale: the lengthscale
:param kernel_variance: the kernel variance
:param r: The absolute distance in input space
:return: The kernel function evaluated at a list of values r.
"""
fract = (r/lengthscale)**2 * 1/(2*alpha)
k_rq = (1 + fract)**(-alpha)
k_rq *= kernel_variance
return k_rq | a308c464d22e516dae20bc066f3ff8cb2bf7a8f6 | 185,711 |
def _build_string(tokens):
"""Builds string from token list."""
return ' '.join(tokens) | 53119717844fb8c0ea0efb1dccc303a4b1b0129b | 171,344 |
from typing import Set
from typing import Dict
def make_indices_to_labels(labels: Set[str]) -> Dict[int, str]:
""" Creates a mapping from indices to labels. """
return {index: label for index, label in
enumerate(["pad"] + sorted(list(labels)))} | def903371ae37f33db1a0e4db064c4f9d68c9531 | 685,747 |
def _GetExceptionName(cls):
"""Returns the exception name used as index into _KNOWN_ERRORS from type."""
return cls.__module__ + '.' + cls.__name__ | a0b03c28d61609161052120d8db4e124545396e9 | 246,420 |
def remove_domain(hn):
"""Removes domain suffix from provided hostname string
Args:
hn (str): fully qualified dns hostname
Returns:
str: hostname left by removing domain suffix
"""
return hn.split(".")[0] | 07e5136d06f4206f7cd071cda14d3db677f7a37b | 38,387 |
def _get_req_span(request):
""" Return the datadog span from the given request. """
return getattr(request, "_datadog_request_span", None) | 13adcc6519d3beb2ea9900c13b00d0bbba2243d3 | 171,157 |
def is_s3_path(file_path):
""" Return true if file_path is an S3 path, else false.
"""
schema, _, rest = file_path.partition('://')
return schema == 's3' | 6b637eb8e032780ce682a7d7656bef7f5579d350 | 55,726 |
from pathlib import Path
from typing import Dict
from typing import Any
import yaml
def yml_load(path: Path) -> Dict[str, Any]:
"""
Loads a .yml file into a Python dictionary
"""
with open(path, "rt") as source:
content = yaml.safe_load(source)
return content | b13918df90687cffcbb9595c3687c531d2b12b27 | 427,395 |
def decdeg2dms(dd):
""" Convert decimal degrees to deg,min,sec """
is_positive = dd >= 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
degrees = degrees if is_positive else -degrees
return (degrees,minutes,seconds) | 16bc0cae0ea634686e78b1443da5ab4ed19cc093 | 151,786 |
def imshape(mip):
"""Compute image size for different mip levels"""
return 4000 // 2 **mip, 6000 // 2 **mip | cab9476d15ff42b29ab3909225ffddc46c989a89 | 512,752 |
def _get_property_dict(property_list):
"""
Helper method to get a dictionary from list of property dicts
with keys, values
"""
property_dict = {}
for property in property_list:
property_dict[property['key']] = property['value']
return property_dict | 510f95042febffeb00d7c1a19fc89d062f7eeb8f | 197,800 |
def fb_lookup(dic, keys, default):
"""Do dict-lookup for multiple key, returning the first hit.
"""
for key in keys:
if key in dic:
return dic[key]
else:
return default | 762374f19e2487b9025a7233739ef5e5e715c739 | 368,493 |
from functools import reduce
def left_arc(stack, queue, graph, deprel=False):
"""
Creates an arc from the first in the queue to the top of the stack
and reduces it.
The deprel argument is either read from the manually-annotated corpus
(deprel=False) or assigned by the parser. In this case, the deprel
argument has a value
:param stack:
:param queue:
:param graph:
:param deprel: either read from the manually-annotated corpus (value false)
or assigned by the parser
:return:
"""
graph['heads'][stack[0]['id']] = queue[0]['id']
if deprel:
graph['deprels'][stack[0]['id']] = deprel
else:
graph['deprels'][stack[0]['id']] = stack[0]['deprel']
return reduce(stack, queue, graph) | 3fc0beaa5759be512c764b5171fa5a4398d48bc3 | 371,138 |
import torch
def get_device(inputs):
""" Get used device of a tensor or list of tensors. """
if isinstance(inputs, torch.Tensor):
device = inputs.device
elif isinstance(inputs, (tuple, list)):
device = inputs[0].device
else:
raise TypeError(f'Inputs can be a tensor or list of tensors, got {type(inputs)} instead!')
return device | eb67cb3a5ae226c4136bc172d37225ba6a64b45f | 14,157 |
from unittest.mock import Mock
def create_mock_Popen(x_success='', x_error=''):
"""Return mock subprocess.Popen class.
stdout and stderr will be returned by communicate() of its returned
instance.
"""
mock_Popen_instance = Mock()
mock_Popen_instance.communicate = Mock(return_value=(x_success, x_error))
mock_Popen = Mock(return_value=mock_Popen_instance)
return mock_Popen | 65258edc5ff4b6a0c4fc4f17ca152f4c80180d4c | 417,822 |
import re
def replace_token(line, str1, str2):
"""Replace token str1 with new str2 in line."""
# pull out what you want to keep on left and right
# rather than capture what you want and replace it
if str1 not in line:
return line
pattern = r'(^|.*[^a-zA-Z_]+)' + str1 + r'($|[^a-zA-Z0-9_]+.*)'
line = re.sub(pattern, r'\1' + str2 + r'\2', line)
return line | 2e1baa83afb135574af0d0fc5e3acf56cb42bfb7 | 427,096 |
def get_lang_from_corpus_name(corpus_name):
"""
Determines the language of a corpus based on its name.
"""
if corpus_name.startswith("conll03_en") or corpus_name.startswith("ud_en"):
return "en"
elif corpus_name in ["conll03_de", "germeval"]:
return "de"
else:
return None | 76c2954fa3c474f5e788a0662fea84dede196c00 | 610,520 |
def getServer(networkDict):
"""
Get Server information in a specific network
"""
return networkDict['WGNet']['Server'] | 9c68c7109d2155025e1e4147e649e0eb38eebc56 | 123,371 |
def is_python_boolean(var):
"""Tests whether var is a Boolean."""
return var is True or var is False | 7e0fddf5675575f355827c95a468fd3e81eaba07 | 523,749 |
def combine_biluo(tokens: list[str], tags: list[str]) -> tuple[list[str], list[str]]:
"""
Combines multi-token BILUO tags into single entities.
Parameters
----------
tokens : list[str]
Input tokenized string.
tags : list[str]
Tags corresponding with each token with BILUO format.
Returns
-------
tuple[list[str], list[str]]:
Tokens and tags with BILUO removed.
Example
-------
>>> tokens = ['New', 'York', 'City', 'is', 'big', '.']
>>> tags = ['B-PLACE', 'I-PLACE', 'L-City', 'O', 'O', 'O']
>>> tokens, tags = combine_biluo(tokens, tags)
>>> tokens
['New York City', 'is', 'big', '.']
>>> tags
['PLACE', 'O', 'O', 'O']
"""
tokens_biluo = tokens.copy()
tags_biluo = tags.copy()
for idx, tag in enumerate(tags_biluo):
if idx + 1 < len(tags_biluo) and tag[0] == "B":
i = 1
while tags_biluo[idx + i][0] not in ["B", "O"]:
tokens_biluo[idx] = tokens_biluo[idx] + " " + tokens_biluo[idx + i]
i += 1
if idx + i == len(tokens_biluo):
break
zipped = [
(token, tag)
for (token, tag) in zip(tokens_biluo, tags_biluo)
if tag[0] not in ["I", "L"]
]
if list(zipped):
tokens_biluo, tags_biluo = zip(*zipped)
tags_biluo = [tag[2:] if tag != "O" else tag for tag in tags_biluo]
return list(tokens_biluo), tags_biluo
else:
return [], [] | 98a54b02dd458dfa2b0e4b2bc71d2a9f52f27310 | 538,258 |
def element_text(member_elt, elt_name):
"""Extract all `para` text from (`elt_name` in) `member_elt`."""
text = []
if elt_name:
elt = member_elt.find(elt_name)
else:
elt = member_elt
if elt:
paras = elt.findAll('para')
for p in paras:
text.append(p.getText(separator=u' ').strip())
return '\n\n'.join(text) | 13ff356e1a584bcaa9c905c93dcafaa787ca936f | 11,114 |
def flatten_columns(columns):
""" Transforms hierarchical column names to concatenated single-level
columns
"""
return ['_'.join(col).strip() for col in columns] | dbd5fa1a8c04abec8eba71c44698bf682caaf5be | 155,598 |
def audit_link(linkText, uri):
"""Generate link "markdown" from URI."""
return '{{{}|{}}}'.format(linkText, uri) | 5655eedffd51e92d426083f84c160256c236b47d | 170,414 |
def compute_energy_from_density(density, x_dimension, y_dimension, chemical_potential):
"""Computes energy from energy density."""
return (x_dimension * y_dimension) * (density - chemical_potential) | d3b43b513ef57aa78f1b483ea598c974313f3fcc | 604,171 |
def _create_mmio_property(addr):
"""Returns a python property.
Create mmio getter and setters for hardware registers.
"""
def _get(self):
return self.read(addr)
def _set(self, value):
self.write(addr, value)
return property(_get, _set) | 2b557a642e2989b9980e5e1bdc2e6d45f76dba7b | 456,359 |
import logging
def _find_internal_ip_interface(iface_list):
"""Find an IP address that is internal from GCE.
Args:
iface_list: a list of GCE networkInterface.
Returns:
IP address string or None.
"""
logger = logging.getLogger(__name__)
for iface in iface_list:
try:
ip_addr = iface['networkIP']
if ip_addr != '127.0.0.1':
return ip_addr
except KeyError:
logger.warning('Network Interface has no "networkIP": %s', iface)
logger.warning('Could not find internal IP interface in %s:', iface_list)
return None | 32cac2db32ec98ef736861c7178710855612bcfa | 66,041 |
import shutil
def command_exists(command: str) -> bool:
"""
Checks whether some external utility is installed and accessible to this script.
Args:
command: The name of the binary/command to look for. It must be a single name; arguments and shell command lines
are not accepted.
Returns:
True if the command exists and is accessible (as per `which`).
"""
return shutil.which(command) is not None | f9160163289f75af6a602641fc357addf0fc18bc | 26,690 |
from typing import List
from pathlib import Path
def do_single_check(command: str, relevant_files: List[Path],
command_check_mapping) -> bool:
"""Do a single check requested by a command."""
check = dict(command_check_mapping)[command]
if command == 'format':
success = check(relevant_files, False)
else:
success = check(relevant_files)
if not success:
print('ERROR: %s failed, see errors above.' % check.__name__)
return success | f02c5befefe6b8bbecf95509d1d0c813ad67544a | 81,209 |
import uuid
def _parse_legacy_uuid(doc):
"""Decode a JSON legacy $uuid to Python UUID."""
if len(doc) != 1:
raise TypeError('Bad $uuid, extra field(s): %s' % (doc,))
return uuid.UUID(doc["$uuid"]) | 6a238a637f98a5f0a7c79374f09bb72fc7b17ebe | 48,696 |
from datetime import datetime
def validate_date_string(date_string):
"""
Validates the date string is a valid date in the YYYY-MM-DD format.
If the string is valid, it returns the string. Otherwise, it raises either
a ValueError or TypeError.
:param date_string: The string to validate
:return: a valid date string
:raises: A ValueError if the date string is not a valid date or
doesn't conform to the specified format.
"""
datetime.strptime(date_string, '%Y-%m-%d')
return date_string | c3609e65075cfce89bd052a82907f8ae126dd2b5 | 487,014 |
import torch
def _gram_matrix(outputs):
"""Computes the gram matrix, the inner product of a matrix with itself
"""
# Apply operation batch wise
G = []
for output in outputs:
# View output in two dimensions
c, *_ = output.size()
X = output.view(c, -1)
# Compute inner product and normalise by size of output
G_ = torch.mm(X, X.t())
# Normalise w.r.t number of elements
G_ /= X.numel()
G.append(G_)
return torch.stack(G) | 001467f1c8364343b68ba06ecc44300721730c89 | 351,786 |
def get_number_default_boxes(aspect_ratios, extra_box_for_ar_1=True):
""" Get the number of default boxes for each grid cell based on the number of aspect ratios
and whether to add a extra box for aspect ratio 1
Args:
- aspect_ratios: A list containing the different aspect ratios of default boxes.
- extra_box_for_ar_1: Whether to add a extra box for aspect ratio 1.
Returns:
- An integer for the number of default boxes.
"""
num_aspect_ratios = len(aspect_ratios)
return num_aspect_ratios + 1 if (1.0 in aspect_ratios) and extra_box_for_ar_1 else num_aspect_ratios | f902c32cb72e646a67926019064beee4427f482e | 521,310 |
def _get_other_dims(ds, dim):
"""Return all dimensions other than the given dimension."""
all_dims = list(ds.dims)
return [d for d in all_dims if d != dim] | 4c8fc614442cbf4ee1a22aa8113532dcd2905e3c | 696,479 |
def returning(data, fn, *args, **kwargs):
"""
Call function `fn` with `data` and return the unchanged data.
The return value of `fn` is ignored.
`returning` is useful if `fn` does not return anything but has sideeffects,
e.g., printing `data` in a `pipe` call.
Args:
data (anything): argument for fn
fn (callable): a function that is going to be called
*args: optional arguments
**kwargs: optional key word arguments
Returns:
`data`
Examples:
>>> pipe(
... "Some text",
... (returning, print),
... lambda data: data.upper(),
... )
Some text
'SOME TEXT'
"""
fn(data, *args, **kwargs)
return data | 6e2673e8e65401823b6d3447ecb6308852de350f | 370,241 |
from datetime import datetime
import itertools
def satisfies(query, **kwargs):
"""Check whether a given datetime object satisfies day and time predicates
Keyword Args:
month: The month predicate (January, February, ...)
day: The day of month predicate [1 31)
weekday: The day of week predicate (Sunday, Monday, ...)
hour: The hour of day predicate [0 24)
minute: The minute of hour predicate [0 60)
"""
formatters = {
'month': lambda: datetime.strftime(query, '%B'),
'weekday': lambda: datetime.strftime(query, '%A'),
'day': lambda: query.day,
'hour': lambda: query.hour,
'minute': lambda: query.minute
}
attributes = kwargs.keys()
predicates = itertools.product(*kwargs.values())
for values in predicates:
if all([formatters[attr]() == value for attr,value in zip(attributes,values)]):
return True
return False | e36ff908a31e71eab242b9ff6cf4df6721a1fbf7 | 11,604 |
def is_constant_fill(bytes, fill_byte):
"""Check a range of bytes for a constant fill"""
return all(b == fill_byte for b in bytes) | 11106cc2a3d82fe71ca6daf712992d846ee24c96 | 694,342 |
def optional(s, missing="-"):
"""Maps empty/None strings to the missing parameter
"""
return s if s else missing | f05ce271b0cc9239d654a48bce0dc678150f0754 | 404,370 |
def psi3(ctx, z):
"""Shortcut for psi(3,z) (the pentagamma function)"""
return ctx.psi(3, z) | 5419ecfa51f0a808318ad8c6117ed0b9f4a0f73c | 532,952 |
def check_time_frame(accu_block,rel_pos_frame,actual_frame_time,seconds_ref,seconds_duration):
"""
Check if actual timestamp of this frame is inside the experiment time wnidow.
Parameters
----------
accu_block
accumulation period id (-1 if outside scan window).
rel_pos_frame
relative frame position in accumulation period.
actual_frame_time
actual timestamp (considering delays) for the first sample of the frame.
seconds_ref
start of the scan [s].
seconds_duration
duration of the scan [s].
Returns
-------
process_frame : int
1 if frame inside scan.
after_end_time : int
1 if frame after end of defined window.
"""
process_frame=1
after_end_time=0
if accu_block<0:
process_frame=0
elif actual_frame_time<seconds_ref:
if rel_pos_frame!=0: # if zero then split frame
process_frame=0
elif actual_frame_time>(seconds_ref+seconds_duration):
process_frame=0
after_end_time=1
return([process_frame,after_end_time]) | 293fb65f56089da8e92efe2f54f7ba8b6012415b | 563,456 |
def expose(class_method):
"""
Decorator which exposes given method into interface
:param class_method: method to expose
:return: given method with modifications
"""
class_method.is_exposed = True
return class_method | ee234bd7535f29c39fc80643997b89aeb3c0f533 | 8,524 |
def _get_name(f):
"""Gets the name of underlying objects."""
if hasattr(f, '__name__'):
return f.__name__
# Next clause handles functools.partial objects.
if hasattr(f, 'func') and hasattr(f.func, '__name__'):
return f.func.__name__
return repr(f) | c6f5c35b004afea321d981b5db9dd7be52b5efa6 | 38,036 |
def binary_search(n,arr,ele):
"""
Function to perform iterative binary search
params:
n : Size of input array
arr : input array
ele : element to be searched in array
returns:
index : index of element if found else -1.
"""
left = 0
right = n-1
while left<=right:
mid = left + int((right-left)/2)
if arr[mid] == ele:
return mid
elif ele > arr[mid]:
left = mid+1
else:
right = mid-1
return -1 | 1707c7b39cadd04371cfe8b609784e07434a60fc | 137,406 |
import math
def _critical_size_index(config):
"""computes the index of the critical cluster size
(assuming that these sizes are sorted)"""
pthr = config["pthr"]
nsize = config["niter"]
idx = math.ceil((1.0 - pthr) * nsize) # index of critical cluster size
if idx >= nsize or idx == 0:
raise ValueError(
"Illegal critical index (p=%s): %s; "
"consider increasing --niter" % (pthr, idx)
)
return int(idx) | 8fbc0a1a91cd80e93de4d37f7951fbcaf6a66572 | 449,952 |
import itertools
def merge_to_uniques(l):
"""Returns a unique list of elements from a list of lists."""
combined = list(itertools.chain.from_iterable(l))
return list(set(combined)) | 34ac0a0bcf8d81f325f16d897153e6261cc3e85e | 346,094 |
import bisect
def find_closest(timestamps, t):
"""
Function that finds the nearest timestamp to t in timestamps
:param timestamps: List of timestamps
:param t: Timestamp to find nearest to
:return: Index in the list of timestamps
"""
# https://stackoverflow.com/questions/29525050/nearest-timestamp-price-ready-data-structure-in-python
idx = bisect.bisect_left(timestamps, t) # Find insertion point
# Check which timestamp with idx or idx - 1 is closer
if idx > 0 and abs(timestamps[idx] - t) > abs(timestamps[idx - 1] - t):
idx -= 1
return idx | 2968f7371b2dac7db464e29d403a14e647a430af | 524,185 |
def stripDrive(s):
"""
Remove a Windows drive letter specification from a path.
"""
if len(s) > 1 and s[1] == ':':
return s[2:]
else:
return s | 0ad7fc5396decd0db88b993e6d48d6af86b99e3b | 227,635 |
def nD0_thermal(N, kT, gamma, L):
"""
Returns product of particle density n = N/L^2 and diffusion constant
D_0 = 2*kT*N/gamma*L^2.
Parameters
----------
N : int or float
Number of particles.
kT : float
Dimensionless temperature.
gamma : float
Brownian dumping coefficient.
L : float
Characteristic system length.
Returns
-------
product : float
n D_0
"""
return (2*kT*N)/(gamma*(L**2)) | b6560fb5682a8f4f85212f6cfe665595a0876766 | 125,542 |
def match_ot(gold_ote_sequence, pred_ote_sequence):
"""
calculate the number of correctly predicted opinion target
:param gold_ote_sequence: gold standard opinion target sequence
:param pred_ote_sequence: predicted opinion target sequence
:return: matched number
"""
n_hit = 0
for t in pred_ote_sequence:
if t in gold_ote_sequence:
n_hit += 1
return n_hit | 7af349a13348b0110fb2d288dd3128c0f4e8632d | 598,818 |
def mean(x):
"""
리스트 x의 모든 아이템들의 평균을 계산해서 리턴
x = [x1, x2, ..., xn]
mean = (x1 + x2 + ... + xn) / n
:param x: 원소 n개인 (1차원) 리스트
:return: 평균
"""
return sum(x) / len(x) | 4818b01db37cf77733b37ddb73cf3fba21c02f0b | 550,103 |
import torch
def one_hot_conversion(batch, input_size, use_cuda):
"""
Convert tensor, batch, to a one hot encoding tensor with length input_size.
Dimensions:
batch: batch size * sequence length
result: batch size * sequence length * input_size
"""
result = torch.ByteTensor(batch.size()[0], batch.size()[1], input_size).zero_()
if use_cuda:
result = result.cuda()
result.scatter_(2, batch.unsqueeze(2), 1)
return result | 5bf7d394bd0019d0070d8a695839f7daf20e7c1a | 530,888 |
def get_FTPdetect_coordinates(FTPdetect_file_content, ff_bin, meteor_no = 1):
""" Returns a list of FF*.bin coordinates of a specific bin file and a meteor on that image as a list of tuples e.g. [(15, 20), (16, 21), (17, 22)] and the rotation angle of the meteor.
"""
if int(FTPdetect_file_content[0].split('=')[1]) == 0: #Solving issue when no meteors are in the file
return [], 0, 0
skip = 0
skip_to_end = False
coord_list = []
HT_rho = 0
HT_phi = 0
found_bin = False
found_meteor = False
read_angle = False
for line in FTPdetect_file_content[12:]:
if skip_to_end:
if ("-------------------------------------------------------" in line):
skip_to_end = False
continue
continue
if skip>0:
skip -= 1
continue
if ff_bin in line:
found_bin = True
skip = 1
continue
if found_bin and not found_meteor:
line = line.split()
if int(float(line[1])) == meteor_no:
found_meteor = True
else:
skip_to_end = True
if found_bin and found_meteor:
if read_angle == False:
HT_phi = float(line[-1])
HT_rho = float(line[-2])
read_angle = True
continue
if ("-------------------------------------------------------" in line):
break
line = line.split()
coord_list.append((float(line[0]), int(round(float(line[1]), 0)), int(round(float(line[2]), 0))))
return (coord_list, HT_rho, HT_phi) | 3a5adf21d545848ac8807d3bd03e0e2ef11eda77 | 657,419 |
def min_index(l):
"""Returns the index of the min value"""
m = float("inf")
index = -1
for i in range(len(l)):
if l[i] < m:
index = i
m = l[i]
return index | bf17741f10045a93f2db8546d169d7c641c1212c | 475,467 |
import random
def make_random_tree(size):
"""Make a random binary tree with size nodes."""
if size <= 1:
return 0
r = random.randint(1, size-1)
return (make_random_tree(r), make_random_tree(size-r)) | f950e58a22f6b615c0ae9c940c93f0174244178d | 600,916 |
def scenario(request, test_env):
"""Configuration data for the scenario
Find out the name of the scenario (anything that follows "test_"),
and return the corresponding configuration.
This is a convenience fixture to serve as a shortcut to access
scenario configuration.
"""
scenario_name = request.function.__name__.split("test_", 1)[1]
return test_env["testdata"][scenario_name] | 035cd1924fbf2c105875a358cb0fa16eca0a3767 | 299,645 |
import socket
def getHostIP(url='www.google.com'):
"""Returns the (external) host ip for this machine"""
s = socket.socket()
try:
s.connect((url, 80))
return s.getsockname()[0]
except Exception:
return socket.gethostbyname(socket.gethostname()) | a2a92e545404e3bc43df64aa6d663e51d36819eb | 524,847 |
def get_fast_weights(updates, initialization):
"""Compute task-specific weights
Takes initialization parameters and task-specific weight updates
and creates the corresponding, new (fast) weights.
Parameters
----------
updates : torch.Tensor
Updates proposed for parameters. Shape = (#params,1)
initialization : list
List of initialization parameters. Every element in the list
is a torch.Tensor.
Returns
----------
fast_weights
Task-specific weights obtained by computing initialization + updates
"""
fast_weights = []
lb = 0
ub = 0
for lid, l in enumerate(initialization):
num_els = l.numel()
ub += num_els
fast_weights.append(l + updates[lb:ub].reshape(l.size()))
lb += num_els
return fast_weights | 08ec9441332050282da3dee5653c1e10442a1fa0 | 72,383 |
def page_not_found(e):
"""
Page not Found Error Handler
Args:
e (error): 404 error obtained
Returns:
template (html): HTML content with a paragraph content
"""
return "<h1>404</h1><p>The resource could not be found.</p>", 404 | c4f5f58654a8a73a808309d32273d8f74c30a3aa | 548,877 |
def chance_to_hit(accuracy, evasion):
"""
Calculates the chance to hit for the given accuracy and evasion.
Parameters
----------
accuracy : int
Accuracy rating of the attacker
evasion : int
Evasion rating of the defender
Returns
-------
float
chance to hit
"""
return accuracy / (accuracy + (evasion * 0.25) ** 0.8) | 21bdf70dc1d26a73cf3fd33f451745e642838d33 | 508,358 |
def calculate_score(lsh, minhash, total_num_events):
"""Calculate a score based on Jaccard distance.
The score is calculated based on how many similar events that there are
for the event being scored. This is called neighbours and we simply
calculate how many neighbours the event has divided by the total events
in the LSH.
Args:
lsh: instance of datasketch.lsh.MinHashLSH
minhash: instance of datasketch.minhash.MinHash
total_num_events: integer of how many events in the LSH
Returns:
A float between 0 and 1.
"""
neighbours = lsh.query(minhash)
return float(len(neighbours)) / float(total_num_events) | 042af870a599d6da83b405ae05ddcd4b88676cbe | 527,610 |
def format_output(output,format):
""" Return a string for 'output' with the specified format. If output is
None, it returns 'NA'."""
if output!=None:
return format%output
else:
return 'NA' | c06f9c1123be8b632c221165be8ddffa3a5ec438 | 565,988 |
import six
def find(inventories, res_class):
"""Return the inventory record from the list of Inventory records that
matches the supplied resource class, or None.
:param inventories: A list of Inventory objects.
:param res_class: An integer or string representing a resource
class. If the value is a string, the method first
looks up the resource class identifier from the
string.
"""
if not isinstance(res_class, six.string_types):
raise ValueError
for inv_rec in inventories:
if inv_rec.resource_class == res_class:
return inv_rec | a8fc41ab5e31693525088e93acc4a279c9a5e668 | 494,785 |
def get_windows(doc, hws=5):
"""
For each word in a document get a window around it.
Arguments:
doc: a list of words.
hws: an integer, half window size.
Returns:
a list of tuples, each tuple looks like this
(word w, window around w),
window around w equals to
[hws words that come before w] + [hws words that come after w],
size of the window around w is 2*hws.
Number of the tuples = len(doc).
"""
length = len(doc)
assert length > 2*hws, 'doc is too short!'
inside = [(w, doc[(i - hws):i] + doc[(i + 1):(i + hws + 1)])
for i, w in enumerate(doc[hws:-hws], hws)]
# for words that are near the beginning or
# the end of a doc tuples are slightly different
beginning = [(w, doc[:i] + doc[(i + 1):(2*hws + 1)])
for i, w in enumerate(doc[:hws], 0)]
end = [(w, doc[-(2*hws + 1):i] + doc[(i + 1):])
for i, w in enumerate(doc[-hws:], length - hws)]
return beginning + inside + end | 50b16a8f3564a373e5f1f8bfe0816d3bbfc24ff7 | 491,170 |
import inspect
def get_model_name(instance):
"""
Converts a `Model` instance or `class` to a model name.
:param instance:
A `Model` instance or `class`.
:return:
The stringified model name in the form `{app label}.{model name}`.
"""
return '{}.{}'.format(
instance._meta.app_label,
instance.__name__ if inspect.isclass(instance) else instance.__class__.__name__) | 965868083ed1d55daf3995ec6aa8e78e5a953aa7 | 486,009 |
def euclidean_gcd_recursive(a: int, b: int) -> int:
"""
Recursive method for euclicedan gcd algorithm
Examples:
>>> euclidean_gcd_recursive(3, 5)
1
>>> euclidean_gcd_recursive(6, 3)
3
"""
return a if b == 0 else euclidean_gcd_recursive(b, a % b) | 1f1ad517d0ba274b97e742a40e7190c4ee9f1504 | 517,360 |
import pickle
def load_model(file):
"""
load (unpickle) a saved model
Parameters
----------
file : `string'
file name
Returns
-------
a :class:`yadll.model.Model`
Examples
--------
>>> my_model = load_model('my_best_model.ym')
"""
with open(file, 'rb') as f:
model = pickle.load(f)
return model | 99a45a6a15d96604c2c92fcc13b4b21cb164fdaf | 406,396 |
def max_clamp(val: int, limit: int) -> int:
"""Clamp int to limit."""
return min(val, limit) | 3b0ed35ad5c99391070c9d81436f90190a80a11f | 191,665 |
import typing
def read_name(file: typing.IO[bytes]) -> str:
"""Read a 4 byte name from a RIFF file"""
return file.read(4).decode() | 066c0f320a56cbfefe96701d6f9fc7e2c1a915af | 412,756 |
def jaccard(list_a, list_b):
"""
Calculates the jaccard index from two python lists.
:param list_a: First list
:param list_b: Other list
:return: The jaccard index
"""
union_count = len(set(list_a + list_b))
intersection_count = len(set(list_a).intersection(set(list_b)))
return intersection_count/union_count | 7e6a5b0d01f9183873f3023459a46ed2235dc405 | 493,630 |
def get_qt_labeled_progress_bar(prog, viewer):
"""Given viewer and progress, finds associated QtLabeledProgressBar"""
activity_dialog = viewer.window._qt_viewer.window()._activity_dialog
pbar = activity_dialog.get_pbar_from_prog(prog)
return pbar | 91f2f9906a4448fb34ee596fe6c55ea44140f95f | 296,443 |
import importlib
def get_library_version(lib):
"""Try to get the version of a library if it has been installed.
Args:
lib (str): The name of library.
Returns:
None | str: If the library has been installed, return version.
"""
try:
lib = importlib.import_module(lib)
except Exception:
version = None
else:
version = lib.__version__
return version | 56ea113f8ed03c131de8e0f45523e41d8956e6a8 | 187,895 |
def sanitize_string(string: str) -> str:
"""
Sanitize the filename to be used in the file system.
"""
output = string
# this is windows specific (disallowed chars)
output = "".join(char for char in output if char not in "/?\\*|<>")
# double quotes (") and semi-colons (:) are also disallowed characters but we would
# like to retain their equivalents, so they aren't removed in the prior loop
output = output.replace('"', "'").replace(":", "-")
return output | 8b346d3644d2c7d3f625f93cde5b39929ad19c63 | 539,229 |
def str_to_sec(time_str):
"""Convert string to seconds.
:param str time_str: time in string format
:returns: seconds
:rtype: float
"""
time_str = time_str.split(".")
sec = sum(x * int(t) for x, t in zip([1, 60, 3600], reversed(time_str[0].split(":"))))
msec = int(time_str[1])/1000 if len(time_str) > 1 else 0.0
return sec + msec | 945857bf822f5fdef895f59c084ab91e3ab7fb76 | 249,136 |
def pretty_format_args(*args, **kwargs):
"""
Take the args, and kwargs that are passed them and format in a
prototype style.
"""
args = list([repr(a) for a in args])
for key, value in kwargs.items():
args.append("%s=%s" % (key, repr(value)))
return "(%s)" % ", ".join([a for a in args]) | d3326bf2a36dc2eb002a77aec257682757f7bca5 | 648,633 |
def phoneme_set(transcriptions):
"""Reduce list of lists of phonemes to a set of phonemes."""
transcription_phonemes = set()
for transcription in transcriptions:
for phoneme in transcription:
transcription_phonemes.add(phoneme)
return transcription_phonemes | f288abeb52701e0c30f3ef377c3a175e3239dd2e | 190,797 |
def term_to_integer(term):
"""
Return an integer corresponding to the base-2 digits given by ``term``.
Parameters
==========
term : a string or list of ones and zeros
Examples
========
>>> from sympy.logic.boolalg import term_to_integer
>>> term_to_integer([1, 0, 0])
4
>>> term_to_integer('100')
4
"""
return int(''.join(list(map(str, list(term)))), 2) | 95c014e0cd3c4e0f5b1f3df45278404ce972dafc | 531,947 |
from typing import Any
from typing import Optional
def format_strinfo_error(error: Any, filename: Optional[str] = None) -> str:
"""Format a strinfo error as a string."""
# srcinfo is on major version 0 and the error format is ad-hoc and undocumented so
# this is flexible in case the format changes.
# Current format: {'line': Int, 'error': [Str]}
# with only a single error message in the error list.
filename_prefix = "" if filename is None else f"{filename}:"
try:
line = error["line"]
messages = error["error"]
except KeyError:
return filename_prefix + str(error)
if isinstance(messages, str):
message = messages
else:
try:
message = "; ".join(messages)
except TypeError:
message = str(messages)
return f"{filename_prefix}{line}: {message}" | a4b8db457bc7dec0fb3076f6a11650a7038ddaba | 558,383 |
def bonded(self, atom_a, atom_b):
"""
Check if atom_a is bonded to atom_b given the bonding settings
Parameters
----------
atom_a, atom_b : Atom objects
The atoms to be compared
Returns
-------
bonded_bool : bool
True if the atoms are bonded and False if not
"""
bonded_bool = atom_a.dist(atom_b, ref=self.bonding) <= self.thresh
return bonded_bool | b836c15d72c648921c3343bf813d2ab397acc689 | 330,534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.