content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def TruncateString(raw_string, max_length):
"""Returns truncated raw_string based on max length.
Args:
raw_string: String to be truncated.
max_length: max length of string.
Returns:
string: The truncated string.
"""
if len(raw_string) > max_length:
return raw_string[:max_length]
return raw_string | 3cd270e30d66199350169bef50db72a1e7c5ce49 | 392,725 |
import six
def _MergeDicts(a, b):
"""Shallow merge two dicts.
If both dicts have values for a same key and both are lists, it concats them.
Args:
a: a dict object. This will be updated with merged items.
b: a dict object.
Returns:
a merged dict object.
"""
for key, value in six.iteritems(b):
if key not in a:
a[key] = value
continue
value_a = a[key]
if not isinstance(value_a, list) or not isinstance(value, list):
raise ValueError(
'values for key %s are not lists: value1 = %s, value2 = %s' %
(key, value_a, value))
a[key] += value
return a | 4bf7a5ba20806003e7cd92fc6f9aa415301d8bc6 | 636,116 |
def calc_metrics(tp, p, t, percent=True):
"""
compute overall precision, recall and FB1 (default values are 0.0)
if percent is True, return 100 * original decimal value
"""
precision = tp / p if p else 0
recall = tp / t if t else 0
fb1 = 2 * precision * recall / (precision + recall) if precision + recall else 0
if percent:
return 100 * precision, 100 * recall, 100 * fb1
else:
return precision, recall, fb1 | 27a17e41d6234ae1bbd2e0dcca3231b858625699 | 475,128 |
def remove_doubles(lst):
"""given a sorted list returns a new sorted list with duplicates removed"""
if len(lst) == 1:
return [lst[0]]
newlist = [lst[0]]
for i in range(1,len(lst)):
if newlist[-1] != lst[i]:
newlist.append(lst[i])
return newlist | a1721b8b0975166a9239e09b6a40153d73214e8f | 497,739 |
def month_to_season(monthnum):
"""
Converts month numerical value to season numerical value.
0 - Winter
1 - Spring
2 - Summer
3 - Fall
"""
if (0 <= monthnum <= 1) or (monthnum == 11):
return 0
elif 2 <= monthnum <= 4:
return 1
elif 5 <= monthnum <= 7:
return 2
elif 8 <= monthnum <= 10:
return 3 | ab8401da76bf9719036f9ec3243d447295da7511 | 155,600 |
def _calc_type_bit_size(bit_size: int) -> int:
"""Calculate the bit length of a data type which can express the given bit length"""
if bit_size <= 8:
return 8
elif bit_size <= 16:
return 16
else:
return 32 | 00c5f91cf7ae96a12659efd4805558e043fc81d9 | 357,688 |
import random
def check_or_get_value(value, valid_value_set, is_continuous=False):
"""
Check if the given value of the specified property is a valid one, or randomly
select one from the valid value set if value is True, and return the value.
is_continuous denotes whenther the value is continuous (True) or discrete (False).
"""
if not is_continuous:
if value is None:
assert len(valid_value_set) > 0, \
"invalid value set %s is provided" % (valid_value_set, )
return random.choice(valid_value_set)
else:
assert value in valid_value_set, \
"input value %s is not in valid value set %s" % (value, valid_value_set)
return value
else:
if value is None:
assert len(valid_value_set) == 2 and valid_value_set[0] < valid_value_set[1], \
"invalid value range $s is provided" % (valid_value_set, )
return random.uniform(*valid_value_set)
else:
assert value >= valid_value_set[0] and value <= valid_value_set[1], \
"input value %s is not in valid value set %s" % (value, valid_value_set)
return value | d3a7554ecd150a71b95278e40927a34107526795 | 351,945 |
from typing import Dict
from typing import OrderedDict
def _get_required_files(logdir: str, load_map: Dict[str, str]) -> Dict[str, str]:
"""
Generate required files for load model, criterion,
scheduler, optimizer specified in ``load_map``.
Expected that ``load_map`` contains keys:
``"model"``, ``"criterion"``, ``"optimizer"``, ``"scheduler"``.
Otherwise an empty dict will be generated.
Arguments:
logdir: directory with logs
load_map (Dict[str, str]): dict with specification what should be loaded
Returns:
Mapping from file to parts required from this file.
"""
if load_map is None:
return OrderedDict()
default_states = {"best", "best_full", "last", "last_full"}
required_full_checkpoint = ["criterion", "optimizer", "scheduler"]
steps = ["global_epoch_step", "global_batch_step", "global_sample_step"]
experiment_parts = ["model"] + required_full_checkpoint + steps
# keep required parts
experiment_parts = list(filter(lambda part: part in load_map, experiment_parts))
# avoid unnecessary loading
if "model" in experiment_parts and len(experiment_parts) > 1:
required_full_checkpoint.append("model")
# mapping - <filename>: <list of parts to load from this file>
required_files = OrderedDict()
for part in experiment_parts:
fname = load_map[part]
required_full = fname.endswith("_full")
# specified default state
if fname in default_states:
if part in required_full_checkpoint and not required_full:
fname = fname + "_full"
fname = f"{logdir}/{fname}.pth"
# in other case specified path to checkpoint
required_files[fname] = required_files.get(fname, []) + [part]
return required_files | acabfd9e52a1e272889d218ad0b0ed5bbf284e44 | 208,999 |
def conslice(sim_mat, sep):
"""Slices a confusion matrix out of similarity matrix based on sep"""
images = sim_mat[:sep]
slices = []
for i in range(len(images)):
slices.append(images[i][sep:])
return slices | 62e7591015beb317fab694089f5253f97cda7f34 | 667,087 |
def get_pokemon_id(species_ids):
"""Asks the user for a pokemon number for which they'd like the stats.
If the user enters something invalid (ie. anything that's not a valid
number of a pokemon), warns the user and repeats until they enter a valid
pokemon number.
Args:
species_ids: a list of species ids (ints) sorted in increasing order
Returns:
the id chosen by the user as an int
"""
max_pokemon_id = max(species_ids)
# This is the prompt you'll display to the user to ask for their choice.
# Don't modify it!
prompt = 'Enter a pokemon number (1 - {0}): '.format(max_pokemon_id)
# This is the prompt you'll display to the user if they enter something
# invalid. Don't modify it! However, you'll be required to fill in the
# placeholders with the appropriate values.
warning = 'Please enter a number between 1 and {0}'.format(max_pokemon_id)
while True:
species_id = input(prompt)
if species_id.isdigit():
species_id = int(species_id)
if species_id in species_ids:
break
print(warning)
return species_id | dea5a07df0dcb8c1be71a7f3083b2c7c3cced7ab | 347,761 |
from typing import Dict
from typing import Tuple
def create_indexed_ngrams(ngrams: Dict[str, int]) -> Dict[int, Tuple[int, str]]:
"""
Create an indexed version of a ngram list. This basically adds an unique identifier to every
(str, int) tuple.
:param ngrams:
:return:
"""
identifier = 1
output = {}
for (ngram, freq) in ngrams.items():
output[identifier] = (freq, ngram)
identifier += 1
return output | 41b58a74809973b6497a83600f1b973ac0a2986c | 373,053 |
def _get_atom_coord(component, at_id, conformer_type):
"""Retrieve 3D coordinates for the particular atom in a given
conformer.
Args:
component (Component): component to be processed.
at_id (int): atom id to be retrieved.
conformer_type (ConformerType): conformer type
Returns:
rdkit.Geometry.rdGeometry.Point3D: 3D coordinates of the atom.
"""
conf_id = component.conformers_mapping[conformer_type]
return component.mol.GetConformer(conf_id).GetAtomPosition(at_id) | 1af192df7b6ba24cafb771f9d91506508352dcfe | 252,867 |
def extend_set(list_to_extend: list, new_elements: list) -> list:
"""
Helper function to extend a list while maintaining set property.
They are stored as lists, so with this function set property is maintained.
:return
Returns:
list of elements that were added to the set
"""
set_repr = set(list_to_extend)
added_elements = [s for s in new_elements if s not in set_repr]
list_to_extend.extend(added_elements)
return added_elements | c48fed17ed962c4f2f72bc06839d78445da28eb0 | 549,036 |
def get_content_latin1(thepath: str):
"""Returns the file content by interpreting the encoding as "LATIN-1" instead of UTF-8. LATIN-1 has exactly 255 possible characters with no gaps, so it is perfect for reading text and binary data without errors.
Examples:
>>> test = get_content_latin1('test_text.txt')\n
>>> test.splitlines()\n
['ok', 'so here is some', "'blah blah'", 'We are planning', 'To use this ', 'in not only', 'normal testing...', 'but also for "gzip compressed ', 'text searching"', 'I know... sounds cool', "Let's see if it works!"]
>>> binary_test = get_content_latin1('/bin/bash')\n
>>> binary_test[:15]\n
'\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00'
Args:
thepath (str): Reference the file to read.
Returns:
str: Returns the content of the file as one long string
"""
with open(thepath, encoding="latin-1") as f:
content = f.read()
return content | b2ce67345800acc34d43433c65c1fb80132f2761 | 283,828 |
import types
def module_local_public_functions(module):
"""Get a list of function names defined in a module.
Ignores functions that start with `_` and functions
imported from other modules.
"""
local_functions = []
module_name = module.__name__
for name in dir(module):
if not name.startswith('_'):
attr = getattr(module, name)
if isinstance(attr, types.FunctionType) and attr.__module__ == module_name:
local_functions.append(name)
return local_functions | 6a39f1d1f06cabcab5b4e1599d2066d9e2d62da8 | 379,107 |
from typing import Any
def any2str(value: Any) -> str:
"""Converts value to string using `str(value)`.
"""
return str(value) | 5d291c0af31820cb4c7ea0924ff6f1b478ec2c9c | 371,178 |
def get_number_of_pieces_and_kings(spots, player_id=None):
"""
Gets the number of pieces and the number of kings that each player has on the current
board configuration represented in the given spots. The format of the function with defaults is:
[P1_pieces, P2_pieces, P1_kings, P2_kings]
and if given a player_id:
[player_pieces, player_kings]
"""
piece_counter = [0,0,0,0]
for row in spots:
for element in row:
if element != 0:
piece_counter[element-1] = piece_counter[element-1] + 1
if player_id == True:
return [piece_counter[0], piece_counter[2]]
elif player_id == False:
return [piece_counter[1], piece_counter[3]]
else:
return piece_counter | bdf0b12e7eac46247146b5c5112c817900933223 | 599,881 |
from typing import Any
def get_logging_id(obj: Any) -> Any:
"""Returns logging ID for object.
Arguments:
obj: Any object
Returns:
1. `logging_id` attribute if `obj` does have it, or..
2. `__qualname__` attribute if `obj` does have it, or..
3. `str(obj)`
"""
return getattr(obj, 'logging_id', getattr(obj, '__qualname__', str(obj))) | 9e441a232462ff577b2aaef4ac075fdd5a56a85d | 601,059 |
def matches_any(cf_stack_name: str, stack_refs: list):
"""
Checks if the stack name matches any of the stack references
"""
cf_stack_name = cf_stack_name or "" # ensure cf_stack_name is a str
try:
name, version = cf_stack_name.rsplit("-", 1)
except ValueError:
name = cf_stack_name
version = ""
return any(ref.matches(name, version) for ref in stack_refs) | 17f33283983aa04e6162c86c837093f9c5dfddf5 | 70,323 |
def interval_to_errors(value, low_bound, hi_bound):
"""
Convert error intervals to errors
:param value: central value
:param low_bound: interval low bound
:param hi_bound: interval high bound
:return: (error minus, error plus)
"""
error_plus = hi_bound - value
error_minus = value - low_bound
return error_minus, error_plus | ffee403968ddf5fd976df79a90bdbb62474ede11 | 706,297 |
import multiprocessing
def _run_in_process(target, *args, **kwargs):
"""Runs target in process and returns its exitcode after 10s (None if still alive)."""
process = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
process.daemon = True
try:
process.start()
# Do not need to wait much, 10s should be more than enough.
process.join(timeout=10)
return process.exitcode
finally:
if process.is_alive():
process.terminate() | 4ac57947c821a02b8e0ffe55f2f577a7847f248b | 653,458 |
def Ch(x, y, z):
"""Choose function
x chooses if value comes from y or z 1 means the bit comes from y
and 0 means the bit comes from z
Ch(x, y, z) = (x โง y) โ (ยฌx โง z)
"""
return (x & y) ^ ((x ^ 0xffffffff) & z) | f1b474aa2f7083f48b977b7dd34d9c393298e86a | 207,850 |
def find_biggest_pattern_in_patterns(dict):
"""
dict: dictionary of translation vector->pattern
Returns the biggest pattern and its corresponding translation vector.
"""
max_length = -1
pattern = None
trans_vector = None
for key in dict:
if len(dict[key])>max_length:
max_length=len(dict[key])
trans_vector = key
pattern = dict[key]
return pattern, trans_vector | 34e658369145ccb30d25ba5d1d80085a61f68413 | 44,931 |
import requests
import json
def get_status_from_api(url):
"""
Get the current API status.
Paramaters
----------
url (str): The API url.
Return
------
status (dict): The API status.
"""
url += "ping"
r = requests.get(url)
return json.loads(r.content) | ca01e8952aaf67508eac2d22bfe3ba9a88790c2e | 326,941 |
def validateEmail(email, planb):
"""Do a basic quality check on email address, but return planb if email doesn't appear to be well-formed"""
email_parts = email.split('@')
if len(email_parts) != 2:
return planb
return email | 59e20e925257c9cbe753fd205de673506447cf31 | 173,631 |
def str_to_sha256(obj):
"""Convert a hexidecimal string to a bytearray.
Args
----
obj (str): Must be a hexadecimal string.
Returns
-------
(bytearray): bytearray representation of the string.
"""
if isinstance(obj, str):
return bytearray.fromhex(obj)
if isinstance(obj, memoryview) or isinstance(obj, bytearray) or isinstance(obj, bytes):
return obj
if obj is None:
return None
raise TypeError("Un-encodeable type '{}': Expected 'str' or byte type.".format(type(obj))) | 5ce010ccc9a37e716dac17ebf4e4109a56cd85cc | 466,000 |
def get_ordered_values_from_table_by_key(table, reverse=False):
"""
Get value list where the value orders are determined by their keys.
Args:
table: a table of data
reverse: value list in a reversed order
Returns:
- an ordered list of values
"""
keys = [_ for _ in table]
keys.sort(reverse=reverse)
values = [table[k] for k in keys]
return values | 21c3dcef91b3a8e5fb7d9a36aa2acba87e508432 | 618,914 |
from typing import Optional
import torch
def get_n_byte_tensor(n: int, device: Optional[torch.device] = None) -> torch.Tensor:
"""Returns a torch.int8 tensor of size n.
Args:
n: size of the tensor to allocate
device: torch.device to allocate tensor on
Returns:
torch.int8 tensor of size n on device
Notes: Though 1 int8 = 1 byte, memory is allocated in blocks, such that the size
of the tensor in bytes >= n.
"""
return torch.zeros(n, dtype=torch.int8, device=device) | b1923b766ffdb7c82850f317bfc2c8755231ada0 | 569,985 |
def get_vaac(root):
"""Returns the VAAC region of the alert"""
vaac = root.alert.vaac_region.attrib.get('value')
return vaac | ec70f68697688d3c1c1b617c4d18b22362a37f0f | 458,207 |
def pov_2d_array(array_list):
"""Convert a mxn 2d array to POV-Ray format, e.g.
[(1, 2), (3, 4), (5, 6)] --> arrar[3][2] {{1, 2}, {3, 4}, {5, 6}}.
"""
return "array[{}][{}] {{{}}}".format(
len(array_list),
len(array_list[0]),
", ".join("{{{}}}".format(", ".join(str(x) for x in arr)) for arr in array_list)) | 58b9117a0c21e262a48d9e3e606956952f029ca0 | 561,914 |
import logging
def predict(testing_corpus,
instance_extractor,
perceptron,
coref_extractor):
""" According to a learned model, predict coreference information.
Args:
testing_corpus (Corpus): The corpus to predict coreference on.
instance_extractor (InstanceExtracor): The instance extracor that
defines the features and the structure of instances that are
extracted during testing.
perceptron (Perceptron): A perceptron learned from training data.
argmax_function (function): A decoder that computes the best-scoring
coreference structure over a set of structures.
coref_extractor (function): An extractor for consolidating pairwise
predictions into coreference clusters.
Returns:
A tuple containing two dicts. The components are
- **mention_entity_mapping** (*dict(Mention, int)*): A mapping of
mentions to entity identifiers.
- **antecedent_mapping** (*dict(Mention, Mention)*): A mapping of
mentions to their antecedent (as determined by the
``coref_extractor``).
"""
logging.info("Predicting.")
logging.info("\tRemoving coreference annotations from corpus.")
for doc in testing_corpus:
doc.antecedent_decisions = {}
for mention in doc.system_mentions:
mention.attributes["antecedent"] = None
mention.attributes["set_id"] = None
logging.info("\tExtracting instances and features.")
substructures, arc_information = instance_extractor.extract(testing_corpus)
logging.info("\tDoing predictions.")
arcs, labels, scores = perceptron.predict(substructures, arc_information)
logging.info("\tClustering results.")
return coref_extractor(arcs, labels, scores, perceptron.get_coref_labels()) | ca5007178b9c1f9805bf8faa09d72ecbd6d8b967 | 243,941 |
def _try_call(fn, args, kwargs):
"""Convenience function for evaluating argument `fn`."""
if fn is None:
return args[0]
try:
return fn(*args, **kwargs)
except TypeError:
return fn(*args) | f74618ca9892d2da5695143d5864eb50a348211d | 640,803 |
def steps(current, target, max_steps):
""" Steps between two values.
:param current: Current value (0.0-1.0).
:param target: Target value (0.0-1.0).
:param max_steps: Maximum number of steps.
"""
if current < 0 or current > 1.0:
raise ValueError("current value %s is out of bounds (0.0-1.0)", current)
if target < 0 or target > 1.0:
raise ValueError("target value %s is out of bounds (0.0-1.0)", target)
return int(abs((current * max_steps) - (target * max_steps))) | 0287efec583bfb8c37907a34ca5adf7c0aa61886 | 17,519 |
async def statistics_covid(conn, size, sort, offset):
"""Statistics on cases by country
List the number of cases by country, the cases will be splitted in three state 'infected', 'treated' or 'dead'
:param size: The number of countries to fetch.
:type size: int
:param sort: Sort order: * 'asc' - Ascending, from the least affected country to the most affected country. * 'desc' - Descending, from the most affected country to the least affected country.
:type sort: str
:param offset: An offset to allow pagination.
:type offset: int
:rtype: List[{ country: str, total: int, treated: int, infected: int, dead: int}]
"""
query = "SELECT COUNT(*) as count,country, health FROM detected_cases GROUP BY country,health"
proxy = await conn.execute(query)
records = await proxy.fetchall()
# format the response
country_hash = {}
for record in records:
count = record[0]
country = record[1]
health = record[2]
value = country_hash.get(country, {'country': country, 'total': 0, 'infected': 0, 'treated': 0, 'dead': 0})
value[health] = count
value['total'] += count
country_hash[country] = value
response = list(country_hash.values())
if sort == 'asc':
return sorted(response, key=lambda value: value['total'])[offset:(offset+size)]
else:
return sorted(response, key=lambda value: -value['total'])[offset:(offset+size)] | 97255a50f225a6d76072726aaf8c6ee207f61ea9 | 559,222 |
def my_sqrt(x):
"""
Compute integer part of the square root of given number
:param x: given integer
:type x: int
:return: integer part of the square root
:rtype: int
"""
if x < 0:
return -1
elif x == 0:
return 0
left, right = 1, x
while left + 1 < right:
mid = (left + right) // 2
mid_2 = mid * mid
if mid_2 == x:
return mid
elif mid_2 > x:
right = mid
else:
left = mid
return left | d80599b9adcb0b0d6a91d4d30a4d92a904e874c3 | 511,152 |
from typing import Counter
def counter_to_string(counter: Counter, howmany: int = 10) -> str:
"""Turn counter object into a string showing the top howmany items."""
return "\n".join(
[
f"{cnt:5d}: {key}"
for key, cnt in sorted(counter.items(), key=lambda x: x[1], reverse=True)[
:howmany
]
]
) | 1042951c6dffca75ae6d10c007e0f08320e8172b | 538,051 |
from typing import Any
def is_null(value: Any) -> bool:
"""Check if a value is equivalent to null in Dynamo"""
return value is None or (isinstance(value, (set, frozenset)) and len(value) == 0) | 4ad43961673b7e8ae7c9eff8739299fedaae9a4c | 349,791 |
import codecs
def read_file(filename, encoding="UTF-8"):
"""
:param filename: string
:param encoding: string: the encoding of the file to read (standart: `UTF-8`)
:return: list of strings: the lines of the file
"""
f1 = codecs.open(filename, encoding=encoding)
lines = f1.read()
f1.close()
return lines | 81966b9633b7ac2548b092373bdd7cab92278aba | 86,731 |
def _compute_hms(tot_time):
"""
Computes hours, minutes, seconds from total time in seconds.
"""
hrs = tot_time // 3600
mins = (tot_time - hrs * 3600) // 60
secs = (tot_time - hrs * 3600 - mins * 60)
return hrs, mins, secs | 4f55bbe5b70982f2ab8066adf9f14ef96f20182d | 198,842 |
def _CheckNoScopedAllowIO(input_api, output_api):
"""Make sure that ScopedAllowIO is not used."""
problems = []
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
if 'ScopedAllowIO' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use '
'ScopedAllowIO. Post a task to the blocking pool or the FILE thread '
'instead.\n' + '\n'.join(problems))] | 6f59c11bedf15d15afb1301fa55b4396f62b5a3d | 401,988 |
def host_to_ldap_url(hostfqdn):
"""Converts a host fqdn into an appropriate default
LDAP URL.
"""
return "ldap://%s" % hostfqdn | b2715578b4da24bb19eeb4924ba17d6f9f924638 | 279,228 |
def load_dict_from_file(path):
"""Loads key=value pairs from |path| and returns a dict."""
d = {}
with open(path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
if '=' in line:
name, value = line.split('=', 1)
d[name] = value
return d | ba4e945decb18f2885b02549538d0b269b97a045 | 293,054 |
from pathlib import Path
def find_next_available_file(fname_pattern, max_n=1000, start=1):
"""
:param str fname_pattern: File name pattern using "%d" style formatting e.g. "result-%03d.png"
:param int max_n: Check at most that many files before giving up and returning None
:param int start: Where to start counting from, default is 1
"""
for i in range(start, max_n):
fname = fname_pattern % i
if not Path(fname).exists():
return fname
return None | 4ea7c49fedc66532146ce31a4e28bdb492f64194 | 289,406 |
import json
def strval(value):
"""JSON serialize value as appropriate.
This function should only be used internally.
:param dict|list|string|number value: An input value.
:returns string: The output value, suitable for saving by Redis. If
``value`` is a ``dict`` or ``list``, it will be JSON-serialized.
Otherwise it will be left as-is. Note that while Redis only takes
string values, numbers have their string values be themselves in
strings, and the conversion will be done by Redis automatically.
"""
return json.dumps(value) if isinstance(value, (list, dict)) else value | 98be03cbd0593dddd12157e8b73d9cda59954ec6 | 183,562 |
def is_balanced(node):
"""Check if a BST is balanced; returns (True/False, height of tree)."""
# First we ensure the left subtree is balanced; then ensure the right subtree
# is balanced too; and ensure the diff betw heights of left & right subtree <=1
if node is None:
return True, 0
balanced_l, height_l = is_balanced(node.left)
balanced_r, height_r = is_balanced(node.right)
balanced = balanced_l and balanced_r and abs(height_l - height_r) <= 1
height = 1 + max(height_l, height_r)
return balanced, height | f4db8d2d0ce7e773118a30ce19c19d41097f10b7 | 654,662 |
import re
def user_data(image: str, timeout: int, S3_results_bucket: str, flags: str):
"""Generates EC2 instance user data script which is basically
a bash script which will be executed as soon as the machine is
up and running
Args:
image (str): docker image as `repository:tag`
timeout (int): timeout in minutes
flags (str): Will be appended to docker run command e.g. for `--foo=bar --baz=boo`
it will result in `docker run IMAGE --foo=bar --baz=boo`
Returns:
user_data (str): user_data script for EC2 instance returned as in ascii encoding
"""
data = [
"#!/bin/bash",
"set -x",
# Set automatic shutdown after 5 hours
f"sudo shutdown -P {timeout}",
# Login into ECR. This only works because we assigned
# the right IAM instance profile to the EC2 instance
"$(aws ecr get-login --region eu-central-1 --no-include-email)",
# Pull docker job
f"docker pull {image}",
# Start docker job
"mkdir -p /opt/app/output",
"cd /opt/app/",
f"docker run \
-e S3_RESULTS_BUCKET={S3_results_bucket} \
-v $(pwd)/output:/opt/app/output \
{image} python -m xain.benchmark.exec {flags} >& $(pwd)/output/training.log",
# Cancel previous shutdown and shutdown 1m after the job finishes
# The machine is setup to terminate on shutdown
"shutdown -c",
"shutdown -P 1",
]
# Replace multiple whitespaces with single whitespaces to allow
# the use of line breaks in data to increase readability while
# keeping the final output also readable
data = [re.sub(r"\s+", " ", s) for s in data]
return "\n".join(data) | a1600298522526545470d9c05b3702126206c4dd | 228,391 |
def input_tape_string(turing_machine):
"""
Get the input tape's string from the user.
:param turing_machine: The Turing's machine. Used to check if the inserted tape is valid.
:return: The given tape.
"""
tape = list(input("Insert an input for the machine: "))
fault = False
for char in tape:
if not turing_machine.is_symbol(char):
fault = True
while fault:
tape = input("Error. Insert a valid input for the machine: ").split()
fault = False
for char in tape:
if not turing_machine.is_symbol(char):
fault = True
return tape | 6babe2e5691e4a22040954bc0d250f3990b3c5a5 | 502,863 |
def dominance(solution_1, solution_2):
"""
Function that analyze solutions dominance.
Parameters
-----------
:param solution_1: Solution
:param solution_2: Solution
Returns
---------
:return int
If solution_1 dominates solution_2 -> return 1
:return -1
If solution_2 dominates solution_1 -> return -1
:return 0
If neither solution dominates the other -> return 0
"""
dominance_1, dominance_2 = False, False
for i, value in enumerate(solution_1.values):
if value > solution_2.values[i]:
# ย Solution 1 at least greater in one value
dominance_1 = True
elif value < solution_2.values[i]:
# Solution 2 at least greater in one value
dominance_2 = True
# Solution 1 dominates solution 2
if dominance_1 and not dominance_2:
return 1
# Solution 2 dominates solution 1
if not dominance_1 and dominance_2:
return -1
return 0 | 2b6b39e12afed85a9064cb36d0ecb9c3473bac61 | 681,666 |
import io
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, 'buffer'):
return isfile(f.buffer)
elif hasattr(f, 'raw'):
return isfile(f.raw)
return False | ac8f746c047f7ca616fd1c8d87d72f75fbfa0f9f | 106,978 |
def is_visible(self, y):
"""Checks whether a given point is within the currently visible area of the markdown area.
The function is used to handle text which is longer than the specified height of the markdown area and
during scrolling.
:param self: MarkdownRenderer
:param y: y-coordinate
:return: boolean
"""
return not self.is_above_area(y) and not self.is_below_area(y) | aa982d8fadf70f970e084ead9be07916d2599217 | 699,709 |
import csv
def write_excess_gates(excess_dict, fname):
"""
Writes the position and values of gates that have a frequency of
occurrence higher than a particular threshold
Parameters
----------
excess_dict : dict
dictionary containing the gates parameters
fname : str
file name where to store the data
Returns
-------
fname : str
the name of the file where data has written
"""
ngates = len(excess_dict['ray_ind'])
with open(fname, 'w', newline='') as csvfile:
csvfile.write('# Gates exceeding '+str(excess_dict['quant_min']) +
' percentile data file\n')
csvfile.write('# Comment lines are preceded by "#"\n')
csvfile.write(
'# Data collection start time: ' +
excess_dict['starttime'].strftime('%Y-%m-%d %H:%M:%S UTC')+'\n')
csvfile.write(
'# Data collection end time: ' +
excess_dict['endtime'].strftime('%Y-%m-%d %H:%M:%S UTC')+'\n')
csvfile.write('# Number of gates in file: '+str(ngates)+'\n')
csvfile.write('#\n')
fieldnames = [
'ray_ind', 'rng_ind', 'ele', 'azi', 'rng', 'nsamples',
'occurrence', 'freq_occu']
writer = csv.DictWriter(csvfile, fieldnames)
writer.writeheader()
for i, ray_ind in enumerate(excess_dict['ray_ind']):
writer.writerow({
'ray_ind': ray_ind,
'rng_ind': excess_dict['rng_ind'][i],
'ele': excess_dict['ele'][i],
'azi': excess_dict['azi'][i],
'rng': excess_dict['rng'][i],
'nsamples': excess_dict['nsamples'][i],
'occurrence': excess_dict['occurrence'][i],
'freq_occu': excess_dict['freq_occu'][i]})
csvfile.close()
return fname | 4d7c2fb547237b36d0d263dfd536349fbde47b50 | 139,721 |
def kmin(l1, l2, k):
"""
Return the k smaller elements of two lists of tuples, sorted by their first
element. If there are not enough elements, return all of them, sorted.
Params:
l1 (list of tuples): first list. Must be sorted.
l2 (list of tuples): second list. Must be sorted.
k (integer): number of elements to return.
Return:
(list): k smaller elements
"""
results = []
while len(results) < k:
if l1 == []:
if l2 == []:
return results
else:
results.append(l2[0])
l2 = l2[1:]
else:
if l2 == []:
results.append(l1[0])
l1 = l1[1:]
else:
if l1[0][0] > l2[0][0]:
results.append(l2[0])
l2 = l2[1:]
else:
results.append(l1[0])
l1 = l1[1:]
return results | 753fdc838183218db451cdfee403365006c5c50f | 441,696 |
def _ref(name):
"""Return workflow reference name."""
return 'workflows/{0}'.format(name) | 2e0df25dfb0fbbab45042022328f06ec7ec4bb90 | 518,261 |
def worker_exploration(worker_index, num_workers):
"""
Computes an exploration value for a worker
Args:
worker_index (int): This worker's integer index.
num_workers (int): Total number of workers.
Returns:
float: Constant epsilon value to use.
"""
exponent = (1.0 + worker_index / float(num_workers - 1) * 7)
return 0.4 ** exponent | 3d530bbbcf4c1bd42a7149bbbd06dc52c561d7c2 | 217,365 |
def trigrid(tripts):
"""
Return a grid of 4 points inside given 3 points as a list.
INPUT:
- ``tripts`` -- A list of 3 lists of the form [x,y] where x and y are the
Cartesian coordinates of a point.
OUTPUT:
A list of lists containing 4 points in following order:
- 1. Barycenter of 3 input points.
- 2,3,4. Barycenters of 1. with 3 different 2-subsets of input points
respectively.
EXAMPLES::
sage: from sage.matroids import matroids_plot_helpers
sage: points = matroids_plot_helpers.trigrid([[2,1],[4,5],[5,2]])
sage: points
[[3.6666666666666665, 2.6666666666666665],
[3.222222222222222, 2.888888888888889],
[4.222222222222222, 3.222222222222222],
[3.5555555555555554, 1.8888888888888886]]
.. NOTE::
This method does NOT do any checks.
"""
pairs = [[0, 1], [1, 2], [0, 2]]
cpt = list((float(tripts[0][0]+tripts[1][0]+tripts[2][0])/3,
float(tripts[0][1]+tripts[1][1]+tripts[2][1])/3))
grid = [cpt]
for p in pairs:
pt = list((float(tripts[p[0]][0]+tripts[p[1]][0]+cpt[0])/3,
float(tripts[p[0]][1]+tripts[p[1]][1]+cpt[1])/3))
grid.append(pt)
return grid | 2f968aca1f49bdfa60badbe3db29a2143f9693a9 | 153,372 |
async def ping():
""" A quick check to see if the system is running """
return True | 4f569b665106c7b3b571e3718e38832c50cc3541 | 484,370 |
def add_request_headers(headers):
"""Add headers for 3rd party providers which we access data from."""
# Pass our abuse policy in request headers for third-party site admins.
headers["X-Abuse-Policy"] = "https://web.hypothes.is/abuse-policy/"
headers["X-Complaints-To"] = "https://web.hypothes.is/report-abuse/"
return headers | 5c0c1d40fb0d14c62efee51e8dc49a97737d0f8b | 392,591 |
def gcd(a,b):
"""Calculate the Greatest Common Divisor of a and b.
e.g. 8 , 12
becomes 12, 8
becomes 8, 4
becomes 4, 0
returns 4"""
while b:
a, b = b, a%b
return a | 0fe289a79400514c98d5b932cb77122597bffac6 | 411,861 |
from pathlib import Path
def GetRendererLabelFromFilename(file_path: str) -> str:
"""Gets the renderer label from the given file name by removing the '_renderer.py' suffix."""
file_name = Path(file_path).stem
return file_name.rstrip("_renderer.py") | 11e97b9712046840103b7bb5910f2b82109f0545 | 682,529 |
def len_ignore_leading_ansi(s: str) -> int:
"""Returns the length of the string or 0 if it starts with `\033[`"""
return 0 if s.startswith("\033[") else len(s) | 9a01323a3bd5a840760c076f6beddb435d373791 | 87,205 |
def min_max_mean_total_feature(feature_list):
"""
This method calculates the min, max, mean and sum value of a characteristic list.
Used in conjunction with packet count and packet length base netflow features
:param feature_list: List of netflow characteristics values
:return: A list containing min, max, mean and sum value of characteristic list
"""
if feature_list == []:
feature_list = [0]
feature_min = min(feature_list)
feature_max = max(feature_list)
feature_total = sum(feature_list)
feature_mean = feature_total / len(feature_list)
return [feature_min, feature_max, feature_mean, feature_total] | d9ce07297cd463fca4a908f2eb0e4798f69d5323 | 435,309 |
def parse_slice(token):
"""Parse a single slice string
:param token: A string containing a number [3], a range [3:7] or a colon [:]
:returns: An integer for simple numbers, or a slice object
"""
try:
return int(token)
except ValueError:
if token == ':':
return ...
elif ':' in token:
rng = [int(s) for s in token.split(':')]
# The DAP protocol uses slicing including the last index.
# [0:20] in DAP translates to [0:21] in Python.
rng[1] += 1
return slice(*rng) | c93d3182335d6db6a51fac4dd5f0d015e189def4 | 247,181 |
def _prob_mass_function(message: str, word_length: int) -> dict:
"""
Compute probability mass function for a one-dim discete rv, Snippet 18.1, page 266.
:param message: (str or array) encoded message
:param word_length: (int) approximate word length
:return: (dict) of pmf for each word from message
"""
lib = {}
if not isinstance(message, str):
message = ''.join(map(str, message))
for i in range(word_length, len(message)):
message_ = message[i - word_length:i]
if message_ not in lib:
lib[message_] = [i - word_length]
else:
lib[message_] = lib[message_] + [i - word_length]
pmf = float(len(message) - word_length)
pmf = {i: len(lib[i]) / pmf for i in lib}
return pmf | 6be381a441123124583d9b681ace8134252e3d87 | 160,857 |
def _hyperparams_text_to_dict(cfg_text):
"""Converts hyperparams config text to a dictionary of key-value pairs."""
txt_list = cfg_text.split("\n")
pair_list = []
for v in txt_list:
if not v:
continue
vals = v.split(" : ")
if len(vals) != 2:
raise ValueError(v)
pair_list.append(vals)
return dict(pair_list) | 9139817dfbb13c916f7d099a29c0b2b06412e5c1 | 489,895 |
def get_address_string_from_bytes(address_raw, delimiter=":"):
"""
Convert a Bluetooth address in bytes form into a delimited string representation.
:param address_raw: Bluetooth address, as a bytes instance.
:param delimiter: Delimiter to use between octets.
:return: A string representing `address_raw` delimited by `delimiter`.
"""
return delimiter.join("{:02x}".format(ord(octet)) for octet in address_raw[::-1]) | 88d52596d71d446f6e37e94b36cb29ba00ce6dc9 | 513,753 |
def get_songs_from_playlist(player, playlist_name):
"""Returns a list of songs from the given playlist"""
lists = player.get_sonos_playlists()
for playlist in lists:
if playlist.title == playlist_name:
return player.music_library.browse(playlist) | 381c664664071f0a788e0b0a76f1d1779fde1821 | 317,805 |
import shutil
def rm_directory_tree(directory):
""" Recursively removes a directory and its subdirectories and files."""
try:
shutil.rmtree(directory)
except OSError as e:
raise(e)
return True | 2f1e5ff66657f90350e2e06ab60f96308901b278 | 183,225 |
import codecs
def open_write(file_path):
"""
Opens a file for writing in UTF-8
:rtype: file
:param file_path: Path to file
:type file_path: string
:return: A file opened for writing
"""
return codecs.open(file_path, "w", encoding="utf-8") | 33d07324e88c649cbb7ec48b58d65904430f4fee | 345,915 |
def _minor(x, i, j):
"""The minor matrix of x
:param i: the column to eliminate
:param j: the row to eliminate
:returns: x without column i and row j
"""
return [[x[n][m] for m in range(len(x[0])) if m != j]
for n in range(len(x)) if n != i] | 8168b56914109ff52f6e5572dfa533db462d5d0c | 579,963 |
def is_external_plugin(appname):
"""
Returns true when the given app is an external plugin.
Implementation note: does a simple check on the name to see if it's
prefixed with "kolibri_". If so, we think it's a plugin.
"""
return not appname.startswith("kolibri.") | 969b6650a2c6917dd40786444c83d88d80236e5c | 377,029 |
from typing import Any
def is_netlist(netlist: Any) -> bool:
"""check if a dictionary is a netlist"""
if not isinstance(netlist, dict):
return False
if not "instances" in netlist:
return False
if not "connections" in netlist:
return False
if not "ports" in netlist:
return False
return True | abe571d3aeba950f9d818751cf44824efa69db1a | 179,430 |
def _serialize(node):
"""
Serialize a xml node tag and its attribute dict in a reproducible way.
@param node: xml node to serialize
"""
tokens = [node.tag]
for k, v in node.attrib.items():
tokens.extend([k, v])
return ' '.join(tokens) | 644e274c3b23f6605d90ede0628c13d4f25981a7 | 308,016 |
import hashlib
def _ntowfv1(password):
"""
[MS-NLMP] v28.0 2016-07-14
3.3.1 NTLM v1 Authentication
Same function as NTOWFv1 in document to create a one way hash of the password. Only
used in NTLMv1 auth without session security
:param password: The password of the user we are trying to authenticate with
:return digest: An NT hash of the password supplied
"""
digest = hashlib.new('md4', password.encode('utf-16le')).digest()
return digest | 901373ba06589b0a8ac7089e7b3421a3afdec745 | 225,947 |
import csv
def get_email_addresses() -> dict:
"""
Function that reads the emails.csv file and returns a dictionary that can server as a one way mapping based on the data from the file.
The first column is the name of the person and the second column the email address.
Leading and trailing spaces are removed
:return: the dictionary with the mapping
"""
mapping = {}
with open("emails.csv", mode="r", newline="") as csvfile:
reader = csv.reader(csvfile, delimiter=",", quotechar="|")
for row in reader:
if len(row) < 2:
continue
mapping[row[0].lower().strip()] = row[1].strip()
return mapping | 74b2401d6ef5505d21e84e6b82573586d5619c06 | 431,376 |
from datetime import datetime
def midnightify(date: datetime) -> datetime:
"""Return midnightified datetime."""
return date.replace(hour=0, minute=0, second=0, microsecond=0) | f46fc92c415081cbda62ac5f20b140049529060f | 532,698 |
def to_var(field):
"""Converts a field name int a variable (snake_case)
Args:
string: a string to be converted to a var
Returns:
a string in lower snake case
"""
return field.replace(' ', '_').lower() | 09833cba0e7c06a4353875ba1beb28530691822e | 573,960 |
def get_two_by_two_edges(*edges):
""" create the list of edges
Parameters
----------
* edges : list or tuple
each consecutive elements will be an edge
Returns
-------
list of 2-uple for the edges
"""
# Examples :
# G = test_graph_from_edges((1,2,3),(4,3))
# 1 -> 2 -> 3, and 3 -> 4
two_by_two_edges = []
for list_of_edge in edges:
if len(list_of_edge) <= 1:
raise ValueError("edges must be of length at least 2")
if not isinstance(list_of_edge, (tuple, list)):
raise TypeError("argument should be tuple or list, instead i got : '%s' " % type(list_of_edge))
for e1, e2 in zip(list_of_edge[:-1], list_of_edge[1:]):
two_by_two_edges.append((e1, e2))
return two_by_two_edges | 2de744f1dc4c6cfc2a03f7044cbc8648504d5ae4 | 195,800 |
def generate_neighbours(coordinates):
"""
Returns the coordinates of potential neighbours of a given cell
:param coordinates: (tuple) the coordinates of the cell
:return: (list(tuples(int, int))) the list of the coordinates of the potential neighbours of a cell
Examples:
>>> generate_neighbours((0, 0))
[(0, -1), (-1, -1), (-1, 0), (0, 1), (1, 0), (1, -1)]
>>> generate_neighbours((4, 2))
[(4, 1), (3, 1), (3, 2), (4, 3), (5, 2), (5, 1)]
"""
x = coordinates[1]
y = coordinates[0]
if y % 2 == 0: # If the number of the line is even
return [(y, x-1), (y-1, x-1), (y-1, x), (y, x+1), (y+1, x), (y+1, x-1)]
else:
return [(y, x-1), (y-1, x), (y-1, x+1), (y, x+1), (y+1, x+1), (y+1, x)] | ed56f84d5701ca40fda6a7543e031df9210f7de8 | 469,777 |
def get_dnum(det, caps=False, prefix=True):
""" Convert a detector index into a string used by the settings dictionary
or other bits of code. Best to keep at two digits
Parameters
----------
det : int
Detector index
caps : bool, optional
Return all caps?
prefix : bool, optional
Include the prefix?
Returns
-------
dnum : str
A string used by the settings dictionary
"""
dnum = '{0:02d}'.format(det)
if prefix:
if caps:
dnum = 'DET'+dnum
else:
dnum = 'det'+dnum
# Return
return dnum | 9cc819da7c3015df993477169643d46b08edd784 | 427,381 |
def GetFileWithReplace(path, replace):
"""Return content of file after replacing any keys in replace with values."""
fp = open(path)
content = fp.read()
fp.close()
for key in replace.iterkeys():
content = content.replace(key, replace[key])
return content | 1dacb099da3c26dbb03b8fd6b4913e49c15ae96d | 523,487 |
import re
def str_list_to_tuple_str_series(col, regex_pattern='[A-Z]\d+'):
"""
Convert string of lists into tuples of strings,
for each row in column.
regex_pattern determines string tokens.
"""
if not isinstance(col[0], str):
print("error: str expected, instead {} found.".format(type(col[0])))
return col
else:
p = re.compile(regex_pattern)
return col.apply(p.findall).apply(tuple) | 0c29b31614f037675733d6018d9b666fff195200 | 86,745 |
def get_resource_type_name(resource_values):
"""Gets resource type name from resource values."""
resource_type = resource_values['Type']
return resource_type.split('::')[-1] | 62944170fe9885ac632e2048f89b9336efdcd979 | 571,180 |
import importlib
def class_for_name(module_name, class_name):
"""
Import a class dynamically
:param module_name: the name of a module
:param class_name: the name of a class
:type module_name: string
:type class_name: string
:return: Return the value of the named attribute of object.
:rtype: object
"""
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
c = getattr(m, class_name)
return c | 4192581999812d103f2e15f757e6592bc68661a6 | 232,042 |
def correct_date(date):
"""
Converts the date format to one accepted by SWA
SWA form cannot accept slashes for dates and is in the format YYYY-MM-DD
:param date: Date string to correct
:return: Corrected date string
"""
if date is None:
return ""
else:
a, b, c = date.split("/")
if len(a) == 4:
# Assumed format is year, month, day
return "%s-%s-%s"%(a, b, c)
else:
# Assumed format is month, day, year
return "%s-%s-%s" % (c, a, b) | 6b17124b4b5e4a3740b56210fc88ea79ca816434 | 686,459 |
def get_projection_name(srs):
"""
Extract the projection name from a
spatial reference system
Parameters
----------
srs : object
OSR spatial reference system
Returns
-------
: string
The projection name
"""
proj_name = srs.GetAttrValue("PROJECTION", 0)
return proj_name | 092991989fdf9b09445941ad6e79380ec155d1ed | 435,382 |
def thermal_time_constant_at_rated_load(C, P, dTOr):
""" Returns the average oil time constant in minutes (for rated load)
As per IEEE C57.91-2011
C = Thermal capacity of oil
P = Supplied losses (in W) at the load considered
OilRise = The average oil temperature rise above ambient temperature
in K at the load considered
"""
tau = (C * dTOr * 60) / P
return tau | f64eaba59b7b2faee1cb30d9b7b8cdf0a61797b4 | 540,626 |
def progress_bar(current_work, total_work):
"""์งํ ์ํ๋ฅผ ๋ํ๋ด๋ ๋ฌธ์์ด์ ๋ฐํํฉ๋๋ค.
Args:
current_work: ํ์ฌ์ ์์
๋จ๊ฒ๋ฅผ ๋ํ๋ด๋ ์ ์
total_work: ์ด ์์
์ ๊ฐ์
"""
length = 50 # ํ๋ก๊ทธ๋ ์ค ๋ฐ์ ๊ธธ์ด
done = round(current_work / total_work * length)
progress_str = [
"[",
done * "โ",
(length - done) * "-",
"]",
f" {round(current_work / total_work * 100)}%",
]
return "".join(progress_str) | 8dce5a869f83521a69b29c13b31a44d9b4c77b2e | 388,957 |
def _is_camel_case_ab(s, index):
"""Determine if the index is at 'aB', which is the start of a camel token.
For example, with 'workAt', this function detects 'kA'."""
return index >= 1 and s[index - 1].islower() and s[index].isupper() | c21ec7d8aa7e786d1ea523106af6f9426fea01d8 | 2,600 |
import requests
def feedstock_name(package):
"""
Check to see if a package has a conda-forge feedstock
Parameters
------------
package : str
Name of a package to check
Returns
-------------
name : str or None
None if it doesn't exist
"""
# base url to check
base = 'https://github.com/conda-forge/{}-feedstock'
#check_yes = requests.get(base.format('triangle'))
#check_no = requests.get(base.format('blahahshdrraaa1123'))
#assert check_no.status_code == 404
#assert check_yes.status_code == 200
# make sure name is clean
package = package.lower().strip()
# map packages to different name if known here
name_map = {'msgpack': 'msgpack-python'}
if package in name_map:
package = name_map[package]
# check the feedstock on github
fetch = requests.get(base.format(package))
exists = fetch.status_code == 200
print(f'{package} exists={exists}')
if exists:
return package
return None | c22ce99e165abb83ce718d55f5351f9983f61720 | 116,469 |
def bbox_area(bbox):
"""Compute the area of the annotation bounding box."""
return (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1]) | deeb10e7ed56f8b417a2c83014a218993d961884 | 334,009 |
def lenum(l, start=0):
"""
Indexes a list from the left side.
Examples
--------
>>> lenum(['a', 'b', 'c'])
[(0, 'a'), (1, 'b'), (2, 'c')]
"""
return list(enumerate(l, start=start)) | a78bc54867684859f354c53680cd7c36460e3a3a | 246,247 |
import filecmp
def issamefile(file1, file2):
"""Tests if the two files have the same contents."""
return filecmp.cmp(file1, file2, False) | 6f590c3c459063eada095113b7cece99597dcf05 | 261,276 |
def average_precision(gt, pred):
"""
Computes the average precision.
This function computes the average prescision at k between two lists of
items.
Parameters
----------
gt: set
A set of ground-truth elements (order doesn't matter)
pred: list
A list of predicted elements (order does matter)
Returns
-------
score: double
The average precision over the input lists
"""
if not gt:
return 0.0
score = 0.0
num_hits = 0.0
for i,p in enumerate(pred):
if p in gt and p not in pred[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
return score / max(1.0, len(gt)) | ca265471d073b6a0c7543e24ef0ba4f872737997 | 709,784 |
import torch
from typing import Optional
from typing import Tuple
def assign_labels(
spikes: torch.Tensor,
labels: torch.Tensor,
n_labels: int,
rates: Optional[torch.Tensor] = None,
alpha: float = 1.0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# language=rst
"""
Assign labels to the neurons based on highest average spiking activity.
:param spikes: Binary tensor of shape ``(n_samples, time, n_neurons)`` of a single
layer's spiking activity.
:param labels: Vector of shape ``(n_samples,)`` with data labels corresponding to
spiking activity.
:param n_labels: The number of target labels in the data.
:param rates: If passed, these represent spike rates from a previous
``assign_labels()`` call.
:param alpha: Rate of decay of label assignments.
:return: Tuple of class assignments, per-class spike proportions, and per-class
firing rates.
"""
n_neurons = spikes.size(2)
if rates is None:
rates = torch.zeros((n_neurons, n_labels), device=spikes.device)
# Sum over time dimension (spike ordering doesn't matter).
spikes = spikes.sum(1)
for i in range(n_labels):
# Count the number of samples with this label.
n_labeled = torch.sum(labels == i).float()
if n_labeled > 0:
# Get indices of samples with this label.
indices = torch.nonzero(labels == i).view(-1)
# Compute average firing rates for this label.
rates[:, i] = alpha * rates[:, i] + (
torch.sum(spikes[indices], 0) / n_labeled
)
# Compute proportions of spike activity per class.
proportions = rates / rates.sum(1, keepdim=True)
proportions[proportions != proportions] = 0 # Set NaNs to 0
# Neuron assignments are the labels they fire most for.
assignments = torch.max(proportions, 1)[1]
return assignments, proportions, rates | fa8752ef9ef5263d384d6841720c24b22c56067e | 95,824 |
def _GetClearedFieldsForHeaderAction(header_action, field_prefix):
"""Gets a list of fields cleared by the user for HeaderAction."""
cleared_fields = []
if not header_action.requestHeadersToRemove:
cleared_fields.append(field_prefix + 'requestHeadersToRemove')
if not header_action.requestHeadersToAdd:
cleared_fields.append(field_prefix + 'requestHeadersToAdd')
if not header_action.responseHeadersToRemove:
cleared_fields.append(field_prefix + 'responseHeadersToRemove')
if not header_action.responseHeadersToAdd:
cleared_fields.append(field_prefix + 'responseHeadersToAdd')
return cleared_fields | 1e54b3da8b0c8de2983d7762f68e8c1765db9378 | 237,131 |
def api_repo_url(org_name):
"""
With the supplied organization name, constructs a GitHub API URL
:param org_name: GitHub organization name
:return: URL to GitHub API to query org's repos
"""
return 'https://api.github.com/orgs/{}/repos'.format(org_name) | e602303d680bef850e7053ff589a3a76382d4833 | 89,142 |
def get_fields(fields):
"""
From the last column of a GTF, return a dictionary mapping each value.
Parameters:
fields (str): The last column of a GTF
Returns:
attributes (dict): Dictionary created from fields.
"""
attributes = {}
description = fields.strip()
description = [x.strip() for x in description.split(";")]
for pair in description:
if pair == "": continue
pair = pair.replace('"', '')
key, val = pair.split()
attributes[key] = val
# put in placeholders for important attributes (such as gene_id) if they
# are absent
if 'gene_id' not in attributes:
attributes['gene_id'] = 'NULL'
return attributes | 30777838934b18a0046017f3da6b3a111a911a9c | 709,555 |
import itertools
def chunked(iterable, size):
"""Split iterable `iter` into one or more `size` sized tuples"""
it = iter(iterable)
return iter(lambda: tuple(itertools.islice(it, size)), ()) | 474e121727f4c29a8d98a50c1e3694cc6e04d906 | 281,967 |
def _is_tarfile(filename):
"""Returns true if 'filename' is TAR file."""
return (filename.endswith(".tar") or filename.endswith(".tar.gz") or
filename.endswith(".tgz")) | 761b776e0e8078ddd4bee694e0a9d853dd2e31fd | 9,098 |
def _format_type(json_value):
"""Helper to determine the Python type of the provided value from CLI.
Args:
json_value: (str) Value providing from CLI.
Returns:
json_value: The provided input coerced into proper Python Type.
"""
if (json_value.startswith('-') and json_value[1:].isdigit()) or (
json_value.isdigit()):
return int(json_value)
if (json_value.startswith('-') and json_value[1].isdigit()) or (
json_value[0].isdigit()):
return float(json_value)
if json_value.capitalize() == 'True':
return True
if json_value.capitalize() == 'False':
return False
return json_value # The value is a string. | 0f6dd0750ec8cd42c8741898687b4c1adb42a687 | 570,370 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.