content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def is_grease(int_value):
"""
Returns if a value is GREASE.
See https://tools.ietf.org/html/draft-ietf-tls-grease-01
"""
hex_str = hex(int_value)[2:].lower()
if len(hex_str) < 4:
return False
first_byte = hex_str[0:2]
last_byte = hex_str[-2:]
return (
first_byte[1] == 'a' and
last_byte[1] == 'a' and
first_byte == last_byte
)
|
4f220e269fc1d2ca9091f401bca12119cebd3969
| 553,748 |
def check_parentality(object_a, object_b):
"""Check recursively if object 'a' is not parent of object 'b'"""
if object_b.parent:
if object_a == object_b.parent:
return True
else:
return check_parentality(object_a, object_b.parent)
else:
return False
|
0ec90527f80b87910e4f284dacef95ca83c5c513
| 89,560 |
def specialty_grain_to_liquid_malt_weight(grain):
"""
Specialty Grain to LME Weight
:param float grain: Weight of Specialty Grain
:return: LME Weight
:rtype: float
"""
return grain * 0.89
|
0d21b984e1ec86bf841750fb862c56daa93eae97
| 84,330 |
from typing import Iterable
def create_tooltip_formatter_code(groups: Iterable[str]) -> str:
"""Return tooltip formatter code.
Parameters
----------
groups : iterable of str
Groups.
Returns
-------
str
"""
map_str = ', '.join(
[f'[{index}, {group!r}]' for index, group in enumerate(groups)])
return f"""
function(params){{
const convert = new Map([{map_str}]);
if (isNaN(params.value)) {{
return params.name;
}}
return params.name + ': ' + convert.get(params.value);
}}
"""
|
af31a3a2fa6ec59b5407b04f4ab7e27701e96753
| 550,940 |
import torch
def validating(network, loader):
"""
Validating the network during and after training
:param network: trained network
:param loader: dataloader
:return: accuracy
"""
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
correct_num = 0
total_num = 0
for i, data in enumerate(loader, 0):
digits, labels = data
total_num += labels.size(0)
digits, labels = digits.to(device), labels.to(device)
outputs = network(digits)
_, predicted = torch.max(outputs, 1)
correct_num += ((predicted == labels).sum().to("cpu")).item()
accuracy = correct_num / total_num
return accuracy
|
ec8bdb219776bb612e68c49513f048e88c8a5457
| 576,101 |
def test_create_dataframe(df, column_names):
"""
Test if the DataFrame contains only the columns that you specified as the second argument.
Test if the values in each column have the same python type
Test if there are at least 10 rows in the DataFrame.
:param df:
:param column_names:
:return: true of false
"""
# The DataFrame contains only the columns that you specified as the second argument.
df_column_names = df.columns.tolist()
set_df_column = set(df_column_names)
set_column = set(column_names)
if set_df_column != set_column:
return False
# The values in each column have the same python type
for i in range(len(list(df.dtypes))):
for j in range(list(df.count())[i]):
if type(df.iloc[j, i]) != type(df.iloc[0, i]):
return False
# There are at least 10 rows in the DataFrame.
if df.shape[0] < 10:
return False
return True
|
cacd0b1a6766f0440edec5ac6198337c56e7d365
| 65,206 |
def checkAlive(health):
""" check_alive == PEP8 (forced by Codewars) """
return health > 0
|
35136f3bd7bd6235823e0d1b5d1b75cca8a72bdf
| 182,861 |
import random
def get_random_cluster_service_component(client, cluster, service) -> dict:
"""
Get random cluster service component
Args:
client: ADCM client API objects
cluster: some cluster object
service: some service object in cluster
Raises:
:py:class:`ValueError`
If service is not found
"""
components = client.cluster.service.component.list(cluster_id=cluster['id'], service_id=service['id'])
if components:
return random.choice(components)
raise ValueError('Service has not components')
|
93ccbc71b7ff4ad88a0de0d08f222ea8d90b88fe
| 634,249 |
def compare_locations(loc1, loc2):
"""Checks whether locations are within 1kb of each other"""
if loc1[0] == loc2[0] and loc1[2] == loc2[2] and abs(int(loc1[1]) - int(loc2[1])) < 1000:
return 'close'
else:
return 'far'
|
be29c4e7004d5c4b45463e38e65d22c277002b9f
| 38,513 |
def _split(rect):
"""
Split a rectangle into two parts.
'rect' is a rectangle, containing a tuple (x, y, w, h).
"""
x, y, w, h = rect
return ((x, y, int(w * 0.57), h),
(x + w - int(w * 0.57), y, int(w * 0.57), h))
|
8a6886a4334125dbb4e589b69bb0e447e2de889a
| 329,265 |
def _find_logging_config(config):
"""
Look for the dictionary containing logging-specific configurations,
starting in the 'distributed' dictionary and then trying the top-level
"""
logging_keys = {"logging-file-config", "logging"}
if logging_keys & config.get("distributed", {}).keys():
return config["distributed"]
else:
return config
|
7ba5b108a3a2e71bab9d7ba4261bec9a20e70f5e
| 434,001 |
def _prepare_bill(pdf, page):
"""Prepare the PDF page to be parsed.
Due to the nature of the document, we need to preserve some whitesapce,
thus we split on returns and remove the last item in the list which is
an empty string.
"""
raw_page = pdf.getPage(page)
raw_text = raw_page.extractText()
prepared_page = raw_text.split('\n')[:-1]
return prepared_page
|
35ba9950e87f2c927a29c402ed7c095c0874864f
| 260,548 |
def block_format(pieces, width=79):
"""Concatenate bytestrings, adding newlines.
pieces -- iterable of strings
width -- int (default 79)
Returns "".join(pieces), with added newlines between pieces as necessary to
avoid lines longer than 'width'.
Leaves newlines inside 'pieces' untouched, and ignores them in its width
calculation. If a single piece is longer than 'width', it will become a
single long line in the output.
"""
lines = []
line = b""
for s in pieces:
if len(line) + len(s) > width:
lines.append(line)
line = b""
line += s
if line:
lines.append(line)
return b"\n".join(lines)
|
a96785e83b0d756b3b83b61ac8cb7629084cb19e
| 610,710 |
def get_test_config(config, test):
"""Get a single test module's config"""
return config["modules"].get(test)
|
6a8cb61faad7b325a49f0c4e1ce1cea7d6ff5ffa
| 146,351 |
def determine_host(environ):
"""
Extract the current HTTP host from the environment.
Return that plus the server_host from config. This is
used to help calculate what space we are in when HTTP
requests are made.
"""
server_host = environ['tiddlyweb.config']['server_host']
port = int(server_host['port'])
if port == 80 or port == 443:
host_url = server_host['host']
else:
host_url = '%s:%s' % (server_host['host'], port)
http_host = environ.get('HTTP_HOST', host_url)
if ':' in http_host:
for port in [':80', ':443']:
if http_host.endswith(port):
http_host = http_host.replace(port, '')
break
return http_host, host_url
|
6e91f93d5854600fe4942f093de593e53aaf2aa0
| 703,680 |
import torch
def get_optimizer(model_params, lr, opt_name):
"""
Calls chosen optimizer from the pytorch library.
Args:
model_params (generator): model parameters
lr (float): learning rate
opt_name (string): optimizer name used for training
Returns:
opt (pytorch object): optimizer from the pytorch library
"""
if opt_name=='Adam':
opt = torch.optim.Adam(params=model_params, lr=lr)
elif opt_name=='AdamW':
opt = torch.optim.AdamW(params=model_params, lr=lr)
elif opt_name=='RAdam':
opt = torch.optim.RAdam(params=model_params, lr=lr)
else:
raise ValueError("Wrong optimizer name")
return opt
|
b27b399e400c9a18d3c7fb2bcf78c823d292631c
| 145,029 |
from typing import OrderedDict
def object_attributes_to_ordered_dict(obj, attributes):
"""Returns the specified attributes from the object in an OrderedDict."""
dict = OrderedDict()
object_vars = vars(obj)
for attribute in attributes:
dict[attribute] = object_vars[attribute]
return dict
|
2aa1e75669bbe13f8d3fa238dc0c2bb681aa8b72
| 25,232 |
def dumps_double(obj):
"""
Convert a dbus.types.Double into a floating point representation.
"""
return "f%r;" % float(obj)
|
80cc1ad91de7846a111f241c8fd4600a7d26dd6f
| 236,184 |
import types
def add_ipython_key_completions(store):
"""Add tab completion that shows you the keys of the store.
Note: ipython already adds local path listing automatically,
so you'll still get those along with your valid store keys.
"""
def _ipython_key_completions_(self):
return self.keys()
if isinstance(store, type):
store._ipython_key_completions_ = _ipython_key_completions_
else:
setattr(
store,
"_ipython_key_completions_",
types.MethodType(_ipython_key_completions_, store),
)
return store
|
719c329e9a051c718c2738933a0c75a5efc03cac
| 331,534 |
import re
def get_indentation(line):
"""
Take a line, and return its indentation as string
"""
pattern = re.compile(r"(.*?)(\w|\})")
match = pattern.search(line)
if not match:
return
indentation = match.group(1)
add_extra_indent = ('public', '}')
if any(s in line for s in add_extra_indent):
return indentation + indentation
return indentation
|
31e17cb1c223dd2051fc08519c4bd51c9b696fb0
| 116,953 |
def _clamp(value: float, v_min: float, v_max: float) -> float:
"""Clamps the value into the range [v_min, v_max].
e.g., _clamp(50, 20, 40) returns 40.
v_min should be less or equal to v_max. (v_min <= v_max)
"""
if not v_min < v_max:
raise ValueError("v_min is the lower bound, which should be smaller than v_max")
if value > v_max:
value = v_max
elif value < v_min:
value = v_min
return value
|
2b4aac0def0732501ba6155125e59a8f1a6eadca
| 164,342 |
import random
def choice(hosts):
"""
Select randomly a host
:param hosts: Lost of hosts
:return: A randomly selected host.
"""
return random.choice(hosts)
|
0bfc82a8d85ccf08068b24593d0393dcd974cde0
| 212,250 |
def _skip_sql(sql, options):
"""Check to see if we skip this SQL statement
sql[in] SQL statement to evaluate
options[in] Option dictionary containing the --skip_* options
Returns (bool) True - skip the statement, False - do not skip
"""
prefix = sql[0:100].upper().strip()
if prefix[0:len("CREATE")] == "CREATE":
# need to test for tables, views, events, triggers, proc, func, db
index = sql.find(" TABLE ")
if index > 0:
return options.get("skip_tables", False)
index = sql.find(" VIEW ")
if index > 0:
return options.get("skip_views", False)
index = sql.find(" TRIGGER ")
if index > 0:
return options.get("skip_triggers", False)
index = sql.find(" PROCEDURE ")
if index > 0:
return options.get("skip_procs", False)
index = sql.find(" FUNCTION ")
if index > 0:
return options.get("skip_funcs", False)
index = sql.find(" EVENT ")
if index > 0:
return options.get("skip_events", False)
index = sql.find(" DATABASE ")
if index > 0:
return options.get("skip_create", False)
return False
# If we skip create_db, need to skip the drop too
elif prefix[0:len("DROP")] == "DROP":
return options.get("skip_create", False)
elif prefix[0:len("GRANT")] == "GRANT":
return options.get("skip_grants", False)
elif prefix[0:len("INSERT")] == "INSERT":
return options.get("skip_data", False)
elif prefix[0:len("UPDATE")] == "UPDATE":
return options.get("skip_blobs", False)
elif prefix[0:len("USE")] == "USE":
return options.get("skip_create", False)
return False
|
56a11f151424dc52aff57c54ddef93c71ced50fa
| 516,419 |
from typing import Any
from typing import List
def rec_load(obj: Any, mod: List[str], count: int=0) -> Any:
"""
Load recursively JavaPackages and JavaClasses residing inside the JVM:
python world -> gateway -> JVM -> my_scala_packages
There is no guarantee your package exist though!
See the example below for the syntax.
Parameters
----------
obj : SparkContext instance or Any
Initial call must take a SparkContext (`pyspark.context.SparkContext`).
Then obj will represent subsequently:
- `py4j.java_gateway.JavaGateway`
- `py4j.java_gateway.JVMView`
- `py4j.java_gateway.JavaPackage`
- `py4j.java_gateway.JavaPackage`
- ...
mod : str
List of packages from the SparkContext to your class in the JVM
count : int, optional
Counter used for the recursion. Must be 0 at the initial call.
Returns
----------
obj : Any
obj is an instance of a JVM object and will represent subsequently:
- `py4j.java_gateway.JavaGateway`
- `py4j.java_gateway.JVMView`
- `py4j.java_gateway.JavaPackage`
- `py4j.java_gateway.JavaPackage`
- ...
Example
----------
>>> pysc = get_spark_context()
>>> mod = "_gateway.jvm.com.astrolabsoftware.spark3d.geometryObjects"
>>> jvm_obj = rec_load(pysc, mod.split("."))
>>> print(type(jvm_obj))
<class 'py4j.java_gateway.JavaPackage'>
"""
if count == len(mod):
return obj
else:
return rec_load(getattr(obj, mod[count]), mod, count+1)
|
433a310c02393c04d943921bb85486e497fa9579
| 92,927 |
def pochhammer(x, k):
"""Compute the pochhammer symbol (x)_k.
(x)_k = x * (x+1) * (x+2) *...* (x+k-1)
Args:
x: positive int
Returns:
float for (x)_k
"""
xf = float(x)
for n in range(x+1, x+k):
xf *= n
return xf
|
9f4641a9deb5b7c7862811baec325c1423d34838
| 218,495 |
def format_metrics(metrics, mode):
"""Format metrics for logging."""
result = ''
for metric in metrics:
result += '{}_{} = {:.4f} | '.format(mode, metric, float(metrics[metric]))
return result
|
b53c2d74dcd01b08d3976c3eea656d12b2428351
| 323,458 |
import collections
def _get_output_map_from_op(varmap, op):
"""Returns a dict from op output name to the vars in varmap."""
iomap = collections.OrderedDict()
for key in op.output_names:
vars = []
for varname in op.output(key):
vars.append(varmap[varname])
if len(vars) == 1:
iomap[key] = vars[0]
else:
iomap[key] = vars
return iomap
|
33d32952c9077bf78c6e4c5672880786ed28075d
| 672,651 |
def get_valid_regs(returned_reg, regs):
"""
Return the valid registers that can be used to store data.
"""
valid_regs = []
for reg in regs:
if ((reg[0] == 'v') and (reg != returned_reg) and (int(reg[1:]) < 16)):
valid_regs.append(reg)
return valid_regs
|
ba98f970b7b331a2f87939110b611fcc4c1e3b31
| 218,138 |
def recursive_merge_dicts(a, b):
"""Recursively merge two dictionaries.
Entries in b override entries in a. The built-in update function cannot be
used for hierarchical dicts, see:
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth/3233356#3233356
Parameters
----------
a : dict
dictionary to be merged
b : dict
dictionary to be merged
Returns
-------
c : dict
merged dict
Examples
--------
>>> from gammapy.utils.scripts import recursive_merge_dicts
>>> a = dict(a=42, b=dict(c=43, e=44))
>>> b = dict(d=99, b=dict(c=50, g=98))
>>> c = recursive_merge_dicts(a, b)
>>> print(c)
{'a': 42, 'b': {'c': 50, 'e': 44, 'g': 98}, 'd': 99}
"""
c = a.copy()
for k, v in b.items():
if k in c and isinstance(c[k], dict):
c[k] = recursive_merge_dicts(c[k], v)
else:
c[k] = v
return c
|
c4ed405ae2de4b7c7567a68cb25fb2d62cc5a196
| 44,682 |
def choose_robots(exclude_bimanual=False):
"""
Prints out robot options, and returns the requested robot. Restricts options to single-armed robots if
@exclude_bimanual is set to True (False by default)
Args:
exclude_bimanual (bool): If set, excludes bimanual robots from the robot options
Returns:
str: Requested robot name
"""
# Get the list of robots
robots = {
"Panda",
"Jaco",
"Kinova3",
"IIWA",
}
# Add Baxter if bimanual robots are not excluded
if not exclude_bimanual:
robots.add("Baxter")
# Make sure set is deterministically sorted
robots = sorted(robots)
# Select robot
print("Here is a list of available robots:\n")
for k, robot in enumerate(robots):
print("[{}] {}".format(k, robot))
print()
try:
s = input(
"Choose a robot "
+ "(enter a number from 0 to {}): ".format(len(robots) - 1)
)
# parse input into a number within range
k = min(max(int(s), 0), len(robots))
except:
k = 0
print("Input is not valid. Use {} by default.".format(list(robots)[k]))
# Return requested robot
return list(robots)[k]
|
c090639380ca301a51973e235ff6bbbbe848c1b6
| 637,144 |
from typing import List
import torch
from typing import Tuple
def pad_batch(
batch: List[torch.Tensor],
max_len: int = 0,
pad_value: int = 0,
left_padded: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Convert the batch into a padded tensor and mask tensor.
Parameters
----------
batch
The data for padding
max_len
Max length of sequence of padding
pad_value
The value to use for padding
left_padded
If True, pad on the left, otherwise on the right
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
The padded matrix and correspoing mask matrix.
"""
batch_size = len(batch)
max_seq_len = int(np.max([len(item) for item in batch])) # type: ignore
if max_len > 0 and max_len < max_seq_len:
max_seq_len = max_len
padded_batch = batch[0].new_full((batch_size, max_seq_len), pad_value)
for i, item in enumerate(batch):
length = min(len(item), max_seq_len) # type: ignore
if left_padded:
padded_batch[i, -length:] = item[-length:]
else:
padded_batch[i, :length] = item[:length]
mask_batch = torch.eq(padded_batch.clone().detach(), pad_value).type_as(
padded_batch
)
return padded_batch, mask_batch
|
bb542537ebf18f91b59ea10b322b17e4017db9da
| 560,836 |
def bdev_ocf_set_cache_mode(client, name, mode):
"""Set cache mode of OCF block device
Args:
name: name of OCF bdev
mode: OCF cache mode: {'wb', 'wt', 'pt', 'wa', 'wi', 'wo'}
Returns:
New cache mode name
"""
params = {
'name': name,
'mode': mode,
}
return client.call('bdev_ocf_set_cache_mode', params)
|
3a7a3d1c8609403f5910c0084d51763d81af416e
| 212,629 |
from typing import Iterable
def check_wires(wires):
"""Checks that ``wires`` is either a non-negative integer or a list of non-negative integers.
If ``wires`` is an integer, wrap it by a list.
Args:
wires (int or list[int]): (subset of) wires of a quantum node
Return:
list: list of wire indices
Raises:
ValueError: if the wires argument is invalid
"""
if isinstance(wires, int):
wires = [wires]
msg = "wires must be a positive integer or a " "list of positive integers; got {}.".format(
wires
)
if not isinstance(wires, Iterable):
raise ValueError(msg)
if not all([isinstance(w, int) for w in wires]):
raise ValueError(msg)
if any([w < 0 for w in wires]):
raise ValueError(msg)
return wires
|
af08f4e0a9081c9f8d0285448275a6939781a48c
| 431,981 |
def format_data(data):
"""
Convert data into list of list of tokens.
parameters
-----------
:param data: list of str
:return: list of list of tokens
"""
formated_data = []
for i in data:
temp = []
for j in i.split():
temp.append(j)
formated_data.append(temp)
return formated_data
|
8125dfaddc6c6c57af1aa1b97fb4b68910d185f8
| 455,460 |
def reg_indicator(letter):
"""Return a regional indicator charater from corresponing capital letter.
"""
return 0x1F1E6 + ord(letter) - ord('A')
|
347bc0191d9604ab2a1eb656b7e7d395f845afee
| 336,960 |
import unicodedata
def normalize_caseless(s):
"""
Shifts the string into a uniform case (lowercase),
but also accounting for unicode characters. Used
for case-insenstive comparisons.
"""
return unicodedata.normalize("NFKD", s.casefold())
|
4ad36296d6aca14b6691ab38dbd029ecd8f6951e
| 417,835 |
import hashlib
def persistent_hash(s: str):
"""Compute a hash that persists across multiple Python sessions for a
string."""
return int(hashlib.sha224(s.encode()).hexdigest(), 16)
|
62bc73d28118e73aa0ce8a731be0247998938482
| 600,833 |
def humanize_name(name):
"""
"Humanize" camel case or Python variable name.
Usage:
>>> humanize_name('SomeName')
'some name'
"""
name = name.replace("_", " ")
parts = []
buffer = []
is_last_lower = False
for chr in name:
if chr.isupper():
if is_last_lower:
parts.append("".join(buffer))
buffer = [chr]
else:
buffer.append(chr)
is_last_lower = False
else:
is_last_lower = True
buffer.append(chr)
parts.append("".join(buffer))
return " ".join(parts)
|
0325b45a2fae1efb3566b591fafe03b3bf2b5a87
| 590,398 |
def split_blocks(bytes_):
"""Split a byte sequence into 16-byte blocks."""
return [bytes_[start:start+16] for start in range(0, len(bytes_), 16)]
|
cdcc2f359f48c641be6dfaa3731ab7ccefd983dc
| 309,172 |
def fraction_edge(r):
"""Calculate fraction of coins that landed on edge"""
total = r['Edge'] + r['Tails'] + r['Heads']
fraction_edge = r['Edge'] / total
return fraction_edge
|
0153fc983bc9c2ae3f3d7b3cd1940c11671c71ca
| 26,004 |
def int_to_words(int_val, num_words=4, word_size=32):
"""
@param int_val: an arbitrary length Python integer to be split up.
Network byte order is assumed. Raises an IndexError if width of
integer (in bits) exceeds word_size * num_words.
@param num_words: number of words expected in return value tuple.
@param word_size: size/width of individual words (in bits).
@return: a list of fixed width words based on provided parameters.
"""
max_int = 2 ** (word_size*num_words) - 1
max_word_size = 2 ** word_size - 1
if not 0 <= int_val <= max_int:
raise IndexError('integer %r is out of bounds!' % hex(int_val))
words = []
for _ in range(num_words):
word = int_val & max_word_size
words.append(int(word))
int_val >>= word_size
words.reverse()
return words
|
424e55b75bfae2b4815412c7b2ab6c63e5c25730
| 397,291 |
def rgbToHexColor(r, g, b):
"""Convert r, g, b to #RRGGBB."""
return f'#{int(r):02X}{int(g):02X}{int(b):02X}'
|
5cff9abc67c235a4f0fdf258ea555f018d80d1ad
| 698,311 |
def render_calendar(context):
"""Render venue calendar."""
return {"request": context.get("request")}
|
d3829238d29273264857f5291a7b32847e40df0d
| 234,826 |
from typing import List
from typing import Any
import random
def get_random_item(list_of_items: List[Any], exclude: List[Any] = []) -> Any:
"""
Gets a random item from the given list excluding the items if any provided
Args:
list_of_items: list of items of any data type
exclude: if any items needs to be excluded in random pick
Returns:
Randomly picked Item
Raises:
In case of no item to pick, Exception will be raised
"""
count = len(list_of_items)
while count != 0:
selected_item = random.SystemRandom().choice(list_of_items)
if selected_item not in exclude:
return selected_item
else:
count = count - 1
raise(Exception("There is no item to select randomly"))
|
83885121888dea2d56bd16de19952587e8382825
| 543,090 |
import hashlib
def compute_md5_hash(string):
"""Gets md5 digest from a string."""
md5_hash = hashlib.md5() # noqa: S303, W291
md5_hash.update(string.encode('utf-8'))
return md5_hash.hexdigest()
|
5a7aa538078d937a80cd27c25e131454be6f494d
| 625,151 |
def index(value, i):
"""Returns the index of the given value."""
return value[i]
|
0f444b3a4c3c545a967a49093f903be8225fa392
| 577,249 |
def get_job_name(table_name: str, incremental_load: bool) -> str:
"""Creates the job name for the beam pipeline.
Pipelines with the same name cannot run simultaneously.
Args:
table_name: a dataset.table name like 'base.scan_echo'
incremental_load: boolean. whether the job is incremental.
Returns:
A string like 'write-base-scan-echo'
"""
# no underscores or periods are allowed in beam job names
fixed_table_name = table_name.replace('_', '-').replace('.', '-')
if incremental_load:
return 'append-' + fixed_table_name
return 'write-' + fixed_table_name
|
37509b2165c597426e8264ad7cb3f0ccc0dc020f
| 580,332 |
from typing import List
def _parse_foreign_key_rule(rule: dict, name: str, key: List[str]) -> List[dict]:
"""Parse foreign key rule from resource descriptor.
Args:
meta: Resource descriptor.
name: Resource name.
key: Resource primary key.
Returns:
Parsed foreign key rules:
* `fields` (List[str]): Local fields.
* `reference['resource']` (str): Reference resource name.
* `reference['fields']` (List[str]): Reference primary key fields.
* `exclude` (List[str]): Names of resources to exclude, including `name`.
"""
rules = []
for fields in rule["fields"]:
exclude = rule.get("exclude", [])
rules.append(
{
"fields": fields,
"reference": {"resource": name, "fields": key},
"exclude": [name] + exclude,
}
)
return rules
|
d6c465258d7836284093ff38904e28f3b35baf88
| 134,058 |
def nf(stage, fmap_base=8192, fmap_decay=1.7, fmap_max=32, fmap_min: int = 32):
"""Computes the number of feature map given the current stage.
This function is inspired from the following Github repo:
https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py
Arguments:
stage:
Integer
Returns:
Integer, which is the number of feature map wanted at this stage.
"""
return min(
max(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min), fmap_max
)
|
4b952198276781980757d932b14c98740f109e98
| 650,561 |
def sort_pancake_list(list_of_people):
"""Return sorted list (high to low) of pancake eaters."""
sorted_list_of_people = []
for f in sorted(list_of_people, key=lambda t: t[1], reverse=True):
sorted_list_of_people.append(f)
return sorted_list_of_people
|
19255f2fb058df6f0e42a1ea4b6d8ed64e30f07f
| 293,606 |
def _get_charset(message):
"""Return charset, given a http.client.HTTPMessage"""
if not message["content-type"] or not "charset=" in message["content-type"]:
# utter guesswork
return "utf-8"
charset = message["content-type"].split("charset=")[1]
return charset.split(";")[0]
|
2a4d208f57b2e67f51f8ff579ca1a9505b2d41f3
| 487,042 |
def name_to_entity(name: str):
"""Transform entity name to entity id."""
return name.lower().replace(" ", "_")
|
62730076236d9af5632848a7bd9da5fbb2427a0a
| 140,983 |
def get_number_alien_rows(game_settings, starship_height, alien_height):
"""Returns the number of alien rows."""
available_space_y = game_settings.screen_height - (3 * alien_height) - 3 * starship_height
number_alien_rows = int(available_space_y / (2 * alien_height))
return number_alien_rows
|
c99145704a7cf4bfc6474b9ca078f2f1dbf82915
| 649,600 |
def read_until_whitespace(stream, maxchars=None):
"""
Reads non-whitespace characters and returns them.
Stops upon encountering whitespace or when maxchars is reached.
"""
if maxchars == 0:
return b''
def _build():
stop_at = None if maxchars is None else stream.tell() + maxchars
while maxchars is None or stream.tell() < stop_at:
tok = stream.read(1)
if tok.isspace() or not tok:
break
yield tok
return b''.join(_build())
|
b245f3abc8ec0d9267c9faf970d8d722db0636b9
| 572,773 |
def crop(img, top, left, height, width):
"""Crop the given cv2 Image.
Args:
img (cv2 Image): Image to be cropped. (0,0) denotes the top left corner of the image.
top (int): Vertical component of the top left corner of the crop box.
left (int): Horizontal component of the top left corner of the crop box.
height (int): Height of the crop box.
width (int): Width of the crop box.
Returns:
cv2 Image: Cropped image.
"""
return img[top:top + height, left:left + width]
|
8b1e142f2debcfba94290f14964af54ef84f8983
| 422,326 |
def floor_lg(n: int) -> int:
"""Return floor(log_2(n)) for a positive integer `n`"""
assert n > 0
r = 0
t = 1
while 2 * t <= n:
t = 2 * t
r = r + 1
return r
|
631cdd7238da5875ecc7b9fbe14715bd6cf7ffa0
| 298,866 |
from typing import Tuple
def split_group_tag(exiftag: str) -> Tuple[str, str]:
"""split the group and tag from an exiftool tag in format Group:Tag or Tag"""
if ":" not in exiftag:
return "", exiftag
group, tag = exiftag.split(":", 1)
return group, tag
|
0ff7b37cd002286bc0beb87eb13a2124caf397f2
| 439,529 |
def is_hit(x, y):
"""Return wheter given coords hit a circular target of r=1."""
return x*x + y*y <= 1
|
4f71afa458ad0a891010e1f5a2be3049b0818c71
| 8,149 |
from pathlib import Path
def _load(exclude_past_answers=False):
"""
Load the full corpus from file,
"""
pkg = Path(__name__).resolve()
path_data = pkg.parent / "mordle" / "data" / "wordle-corpus.txt"
with open(path_data, "r") as f:
lines = f.readlines()
result = [line.strip() for line in lines]
if not exclude_past_answers:
return result
path_answers = pkg.parent / "mordle" / "data" / "past-answers.txt"
with open(path_answers, "r") as f:
past = f.readlines()
for word in past:
result.remove(word.strip())
return result
|
12baa4eb4eaec03b8fbff460d7588517effe6321
| 110,531 |
def _set_wavefield_save_strategy(requires_grad, dt, inner_dt, scalar_wrapper):
"""Decides which of the source wavefield saving strategies to use.
The source wavefield must be saved for backpropagation if model gradients
required. The C code provides multiple ways of doing this, which are
applicable in different situations.
Args:
requires_grad: Boolean specifying whether model gradients are required
dt: The time interval between source samples
inner_dt: The time interval between time steps of the wave propagator
scalar_wrapper: The object that contains enum values for the strategies
Returns:
An enum value specifying which strategy to use
"""
if requires_grad:
wavefield_save_strategy = scalar_wrapper.STRATEGY_COPY
else:
wavefield_save_strategy = scalar_wrapper.STRATEGY_NONE
return wavefield_save_strategy
|
0d884c768827f409545fa855470f618e3a1ce711
| 115,503 |
def lettersInString(s):
"""return the number of non-whitespace or hyphen characters in a string"""
ret = 0
for c in s:
if (c!=' ' and c!=' '):
ret += 1
return ret
|
2fdaa35ca1067ea5a2ee81c9351749f67e91c7e9
| 169,688 |
from typing import Any
def target_present(test_result: dict) -> Any:
"""
Returns a value for ``warehouse.presence_absence.present``
for the given received *test_result*, or ``...``
(``Ellipsis``) if the test should be skipped.
Raises a :py:class:`ValueError` if a value cannot be determined.
"""
status = (
test_result.get("targetStatus")
or test_result.get("sampleState")
)
mapping = {
"Detected": True,
"NotDetected": False,
"Positive": True,
"PositiveControlPass": True,
"Negative": False,
"Indeterminate": None,
"Inconclusive": None,
# These are valid _workflow_ statuses, but they're not really test
# results; they describe the circumstances around performing the test,
# not the result of the test itself. We skip ingesting them for now as
# there is no place for them in our current data model.
#
# I did consider making these map to None/null like Indeterminate.
# That would make "present is null" results in the database mean "this
# test was run, but the result is unknown due to circumstances left
# unspecified". I ultimately decided against it as the goal with ID3C
# is to aim for simpler data models which are easier to reckon about,
# not track everything that's performed like a LIMS/LIS does.
# -trs, 20 Mar 2020
"Fail": ...,
"Repeat": ...,
"Review": ...,
}
if not status or status not in mapping:
raise ValueError(f"Unable to determine target presence given «{test_result}»")
return mapping[status]
|
65028846a9e66ffd455796742469e3b84a125b75
| 241,366 |
def subtract(value, arg):
"""Number subtraction filter for django template language.
"""
try:
return value - arg
except (ValueError, TypeError):
return None
|
c4978250bf5c44336f04aaab64a6cbd2f294f4f1
| 231,219 |
def host_available(compute_host):
""":returns: `True` if compute host is enabled in nova, `False`
otherwise"""
return compute_host.state == 'up' and compute_host.status == 'enabled'
|
0ec053887a0e3b67c53c634c5e93babf12677c47
| 115,637 |
def collect_required_arguments(req_args, all_kwargs):
"""Extract a dictionary with the required keys from a larger dictionary.
Args:
req_args: List of keys to extract
all_kwargs: Dictionary with a superset of the required keys.
Returns:
req_args_dict: Dictionary with all the required arguments.
"""
return {key: all_kwargs[key] for key in req_args}
|
c6d741e7e473cbfb5a2158aa9e4397814299a21c
| 240,500 |
import hashlib
def valid_proof(last_proof, proof, last_hash, difficulty):
"""
Validates the Proof
:param last_proof: <int> Previous Proof
:param proof: <int> Current Proof
:param last_hash: <str> The hash of the Previous Block
:return: <bool> True if correct, False if not.
"""
guess = f'{last_proof}{proof}{last_hash}'.encode()
guess_hash = hashlib.sha256(guess)
binary_hash = ''.join(format(n, '08b') for n in guess_hash.digest())
return binary_hash[:difficulty] == '0' * difficulty
|
3667e7171a65f088974f954de37c0b6e11048412
| 101,627 |
def getItemsFromDefaults(defaults, params):
"""Converts defaults to specific items for this view.
Args:
defaults: the defaults to instantiate
params: a dict with params for this View
"""
result = []
for url, menu_text, access_type in defaults:
url = url % params['url_name'].lower()
item = (url, menu_text % params, access_type)
result.append(item)
return result
|
67a68448ff71ec045a7dc444f62d39da8e3d72dc
| 149,981 |
import torch
def convert_text_ids_to_count_vector(text_ids, vector_size):
"""
Args:
text_ids: list[int], numericalized text
vector_size: int, size of the CountVector
Returns:
torch.FloatTensor[vector_size]
"""
count_vector = torch.bincount(text_ids, minlength=vector_size)
count_vector = count_vector.float()
return count_vector
|
c7f2d08a15160d4c51f23d31eb38a68b5c338da4
| 192,819 |
def abort_multipart_upload(resource, bucket_name, object_name, upload_id):
"""Abort in-progress multipart upload"""
mpupload = resource.MultipartUpload(bucket_name, object_name, upload_id)
return mpupload.abort()
|
93535c2404db98e30bd29b2abbda1444ae4d0e8a
| 443 |
import re
def extract_sample_name(has_a_sample, sample_names):
"""
Useful for matching sample names in larger strings such as fastq file names.
Note that we must sort the samples names by length in order to return the longest match:
e.g. sample_abc123-IGO-XXX, [sample_abc123, sample_abc12] --> sample_abc123
:param: has_a_sample String that has a Sample ID inside (usually a file path)
:param: sample_names String[] that contains all possible sample IDs to be found in `has_a_sample`
Note that if the target sample ID is not in this list, the wrong sample ID may be returned.
"""
sample_names = sorted(sample_names, key=len, reverse=True)
sample_name_search = r"|".join(sample_names)
sample_name_search = r".*(" + sample_name_search + ").*"
return re.sub(sample_name_search, r"\1", has_a_sample)
|
abee7e40d88a4778a2e8f6b20e603f0c9f3a1319
| 267,472 |
def strip_prefix(prefix, key):
"""
Strip the prefix of baggage items.
:param prefix: Prefix to be stripped.
:type prefix: str
:param key: Baggage item to be striped
:type key: str
:return: Striped baggage item
:rtype: str
"""
return key[len(prefix):]
|
9eacbc8d4e93c6a65a898f6a01e59316355766d6
| 558,246 |
def get_tweets(api, t_user):
"""Get tweets from api.
Parameters
----------
api : tweepy.API
twitter api object
t_user : str
The username of twitter you want to get.
Returns
-------
list
A list of tweets.
"""
# test authentication
try:
api.verify_credentials()
print('Authentication OK')
except:
print('Error during authentication')
exit()
user = api.get_user(t_user)
tweets = api.user_timeline(screen_name=user.screen_name, count=10,
include_rts=False, exclude_replies=True,
tweet_mode='extended')
return tweets[:10]
|
27a3221ee1a7bf52a404c5cc286c9f4d8756363f
| 167,206 |
def get_nonref_alleles(GT_string):
"""Take a VCF genotype (GT) string and return a set containing all non-reference alleles"""
alleles = set(GT_string.split('/'))
try:
alleles.remove('.') # remove missing genotypes
except KeyError:
pass
try:
alleles.remove('0') # remove reference alleles
except KeyError:
pass
return(alleles)
|
31f296b9bfd5f979de1d8145d360276d86fc71ad
| 421,325 |
import fnmatch
import re
def convertPatternsToRegexp(patterns):
"""Converts multiple file name patterns to single regular expression"""
if not patterns:
return None
fullRe = ""
for pattern in patterns:
reStr = fnmatch.translate(pattern)
if fullRe:
fullRe += "|"
fullRe += "(" + reStr + ")"
return re.compile(fullRe)
|
0bdd23718c104dc1c6ab3c77f4ddd90979feca7b
| 157,116 |
def round_fracs(amts):
"""
Rounds array, ensuring that sum(result) == sum(amts)
Using even-rounding (rounds to the nearest even for 0.5)
All fractions are closed (accumulated) to last element.
"""
if len(amts) == 0:
return []
if len(amts) == 1:
return [round(amts[0])]
first = [round(amt) for amt in amts[:-1]]
return first + [int(sum(amts) - sum(first))]
|
488831b8e71215c965e08e12f4777112ff09ac84
| 521,091 |
def row2dict(row):
"""Takes sqlite3.Row objects and converts them to dictionaries.
This is important for JSON serialization because otherwise
Python has no idea how to turn a sqlite3.Row into JSON."""
x = {}
for col in row.keys():
x[col] = row[col]
return x
|
1a4f8ac13a0116a37d1b1c6b85834c4a463fbefb
| 132,363 |
import re
def replace_double_inline_code_marks(text: str) -> str:
"""Finds and replaces cases where we have `` to `."""
return re.sub("(`+\b)|(\b`+)", "`", text)
|
29978384deb490e3c7553242592f9b774370ff2e
| 118,305 |
def _get_buildlogger_handler_info(logger_info):
"""
Returns the buildlogger handler information if it exists, and None
otherwise.
"""
for handler_info in logger_info["handlers"]:
handler_info = handler_info.copy()
if handler_info.pop("class") == "buildlogger":
return handler_info
return None
|
47b08a27b833fae7546fb81547f8bf6796a57fb0
| 482,100 |
def get_MAP(chain_name):
"""Read maximum posterior parameters from a stats file of multinest.
Parameters
----------
chain_name : str
Root for the chain files
Returns
-------
list-like
Best-fitting parameters
"""
stats_file = chain_name + 'stats.dat'
fl = open(stats_file, 'r')
lines = fl.readlines()
ind = [i for i in range(len(lines)) if 'MAP' in lines[i]][0]
params = [float(l.split()[1]) for l in lines[ind + 2:]]
return params
|
15481f9c966ae0ef166b3f80cfcc769a6f7e3e14
| 446,207 |
import hashlib
import json
def get_python_dict_hash_sha256(python_dict):
"""Function to get sha256 hash from a Dict
:type python_dict: Dict
:param python_dict: Any python Dict
:rtype: String
:returns: Hash Value
"""
return hashlib.sha256(json.dumps(python_dict).encode('utf-8')).hexdigest()
|
1c5e8694b51e9f103bb7366848ede97acdf6da3d
| 373,512 |
def convert_str_to_list(input_string):
"""
Convert string to list
"""
l = input_string.split(",")
return [item.strip(' ') for item in l]
|
6e183c275d37950679cc3fa63fd7f9a0998c85a2
| 410,262 |
def generate_shared_public_key(my_private_key, their_public_pair, generator):
"""
Two parties each generate a private key and share their public key with the
other party over an insecure channel. The shared public key can be generated by
either side, but not by eavesdroppers. You can then use the entropy from the
shared public key to created a common symmetric key for encryption. (This
is beyond of the scope of pycoin.)
See also <https://en.wikipedia.org/wiki/Key_exchange>
:param my_private_key: an integer private key
:param their_public_pair: a pair ``(x, y)`` representing a public key for the ``generator``
:param generator: a :class:`Generator <pycoin.ecdsa.Generator.Generator>`
:returns: a :class:`Point <pycoin.ecdsa.Point.Point>`, which can be used as a shared
public key.
"""
p = generator.Point(*their_public_pair)
return my_private_key * p
|
9b4468cd0be73ae662d4a3f6952e06a561dd8319
| 387,295 |
def is_around_angle(test, angle, offset):
"""
Checks if a test angle is close to the angle or not.
Parameters
----------
test : float
Angle to test in Degrees.
angle : float
Angle to compare in Degrees.
offset : float
Tolerance around 'angle' in degrees.
Returns
-------
bool
True if it is in the range [angle-offset,angle+offset].
"""
return (angle - offset) <= test <= (angle + offset)
|
d4e3e0e8014243ef4d96c3bf330fb539dfaf0490
| 196,198 |
import json
def get_value_of_key_from_json(json_obj, key):
"""return the value of key in provided json object"""
value = json.loads(json_obj)[key]
return value
|
ed3d2d8edeb0caf151bafb6e08b5d9250382ec6e
| 375,924 |
def add(bowl_a, bowl_b):
"""Return bowl_a and bowl_b added together"""
return bowl_a + bowl_b
|
e12bb4d5f4d21f4ae113f064d62d0db2ea6f8014
| 35,396 |
def lexical_diversity(tokens):
"""Returns a *case-sensitive* lexical diversity measure. We want to keep case forms
of the same word as these are considered different tokens in this corpus. `tokens`
is a list of token strings."""
return len(set(tokens)) / len(tokens)
|
b0ea9d1a1b921f3175cb6617dd540cd8d8eba596
| 455,272 |
def beta_mean(x,y):
"""Calculates the mean of the Beta(x,y) distribution
Args:
x (float): alpha (shape) parameter
y (float): beta (scale) parameter
Returns:
float: A float that returns x/(x+y)
"""
output = x / (x+y)
return output
|
24cbb908cff98886b11443fb5a6b8603731ecc3b
| 623,552 |
def _NormalizeResourceFormat(resource_format):
"""Translate Resource Format from gcloud values to config-connector values."""
if resource_format == 'terraform':
return 'hcl'
return resource_format
|
463b98ec9cbb8f41944d9a5217838bcef2f7bb0b
| 51,749 |
import re
def replace_aea_add_statements(
content: str, old_string: str, new_string: str, type_: str
) -> str:
"""Replace statements of the type: 'aea add <type> <old_string>'."""
if type_ != "agents":
content = re.sub(
fr"aea +add +{type_} +{old_string}",
f"aea add {type_} {new_string}",
content,
)
return content
|
b0670f2ffc759266c2609be21efda86ca83a3ea1
| 19,921 |
def odml_tuple_export(odml_tuples):
"""
Converts odml style tuples to a parsable string representation.
Every tuple is represented by brackets '()'. The individual elements of a tuple are
separated by a semicolon ';'. The individual tuples are separated by a comma ','.
An odml 3-tuple list of 2 tuples would be serialized to: "[(11;12;13),(21;22;23)]".
:param odml_tuples: List of odml style tuples.
:return: string
"""
str_tuples = ""
for val in odml_tuples:
str_val = ";".join(val)
if str_tuples:
str_tuples = "%s,(%s)" % (str_tuples, str_val)
else:
str_tuples = "(%s)" % str_val
return "[%s]" % str_tuples
|
24fb4342f4dd6183f3579cda6f23884e4bc07f04
| 257,609 |
def find_outlier_bounds(df, col_to_check, groupby=None):
"""Calculate upper and lower outlier bounds.
Outliers are defined as data points 1.5x the interquartile range
for all other data with matching ParseKey parameters,
NOT for the total dataset.
Parameters
----------
df : a pandas DataFrame
col_to_check : str
the name of a column in df in which to check for outliers
groupby : str or list, optional
a column name or list of column names by which to group df before
calculating outliers separately
Returns
-------
a tuple (lower_outlier_bound, upper_outlier_bound) where each position
contains a pandas Series, indexed by groupby column(s) if provided (multiple
columns will result in a multiindex)
"""
if groupby == None:
grouped = df
else:
grouped = df.groupby(groupby)
percentile_75 = grouped[col_to_check].quantile(0.75)
percentile_25 = grouped[col_to_check].quantile(0.25)
interquartile_range = percentile_75 - percentile_25
lower_outlier_bound = percentile_25 - 1.5 * interquartile_range
upper_outlier_bound = percentile_75 + 1.5 * interquartile_range
return lower_outlier_bound, upper_outlier_bound
|
75631ce5efb4e32290d9fc61840cdb204d7de4b2
| 501,916 |
def default(base, deft):
"""Return the deft value if base is not set.
Otherwise, return base"""
if base == 0.0:
return base
return base or deft
|
98dd697f762acb056cb491d31c54e2d8ad47475e
| 686,243 |
def _nearest_mult_of_8(number, up=True):
""" Find the nearest multiple of 8, rounding up or down """
if up:
return ((number + 7) // 8) * 8
else:
return (number // 8) * 8
|
9b3c80123d9f560e788218429b4dc0a2ebda8d8d
| 393,275 |
def calc_checksum(input):
"""
calculate the checksum
:param input: number as string, e.g. "12345"
:return: int: checksum (1 * z1 + 2*z2 + 3*z3 + 4*z4 + ... + n*zn)%10
"""
checksum = 0
for i, val in enumerate(input):
checksum += (i + 1) * int(val)
return checksum % 10
|
6389a0e4acd7e371f5e44e3ae32cd45bc32b586d
| 360,901 |
import torch
def to_tensor(im, dims=3):
""" Converts a given ndarray image to torch tensor image.
Args:
im: ndarray image (height x width x channel x [sample]).
dims: dimension number of the given image. If dims = 3, the image should
be in (height x width x channel) format; while if dims = 4, the image
should be in (height x width x channel x sample) format; default is 3.
Returns:
torch tensor in the format (channel x height x width) or (sample x
channel x height x width).
"""
assert (dims == 3 or dims == 4)
if dims == 3:
im = im.transpose((2, 0, 1))
elif dims == 4:
im = im.transpose((3, 2, 0, 1))
else:
raise NotImplementedError
return torch.from_numpy(im)
|
d19a0c0104f4dc9401f70235cadb7266ffd01332
| 17,103 |
def mean_coord_by_values(df, coordinates_vars, var2agg):
"""Compute the average positions for the values of a variable.
Parameters
----------
df: pd.DataFrame
the data in dataframe form.
coordinates_vars: list
the list of the coordinates variables.
var2agg: srt
the id of the categorical varible which we want to use to collapse
locations.
Parameters
----------
table: pd.DataFrame
the data of the mean average locations.
"""
#table = df.pivot_table(index=var2agg, values=coordinates_vars)
table = df[coordinates_vars+[var2agg]].groupby(var2agg).mean()
return table
|
d373fd79e83de47736eb8ade7c688d97783e15ca
| 172,263 |
def get_numbered_link_label(link_label: str, number: int) -> str:
"""Number a link"""
return "%s:_%d" % (link_label, number)
|
a6522cf5187d4776eddcf8ede9b699ce8b34c131
| 146,788 |
def index_is_sorted(series, ascending=True, exception=True):
"""
check if the (datetime-) index of a pandas.Series is sorted
:param pandas.Series series: the series holding the index to check
:param bool ascending: if true, check for ascending order, if false for
descending order
:param bool exception: if True, raise an exception in case of unsorted index
:return: True if index is sorted, False otherwise.
:rtype: bool
"""
if not all(series.index.sort_values() == series.index):
if exception:
raise ValueError('time series index is not sorted (ascending)')
else:
return False
return True
|
7bdc8160cca4ae18d625566f7a8438fea40522f5
| 300,317 |
def tls_params(mqtt_config):
"""Return the TLS configuration parameters from a :class:`.MQTTConfig`
object.
Args:
mqtt_config (:class:`.MQTTConfig`): The MQTT connection settings.
Returns:
dict: A dict {'ca_certs': ca_certs, 'certfile': certfile,
'keyfile': keyfile} with the TLS configuration parameters, or None if
no TLS connection is used.
.. versionadded:: 0.6.0
"""
# Set up a dict containing TLS configuration parameters for the MQTT
# client.
if mqtt_config.tls.hostname:
return {'ca_certs': mqtt_config.tls.ca_file,
'certfile': mqtt_config.tls.client_cert,
'keyfile': mqtt_config.tls.client_key}
# Or don't use TLS.
else:
return None
|
4b5d214a50fea60f5cb325fc7a0c93dfa9cb3c02
| 707,787 |
import gzip
def gunzip_bytes_obj(bytes_obj: bytes) -> str:
"""Decompress gzip-compatible binary string."""
return gzip.decompress(bytes_obj).decode()
|
6200b44d8bdb6cdf2eef82ee9606771fac9b8a33
| 269,928 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.