content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def leavesToString(leaves):
"""Return the leaves as concatenated string with whitespace separation."""
retstr = ""
for e in leaves:
retstr = retstr + e[0] + " "
return retstr
|
a43fdfebbd000e4948c38557276b1fbba2aced57
| 121,757 |
def replay_args(parser, show_all_task_softmax=True):
"""This is a helper function of function :func:`parse_cmd_arguments` to add
an argument group for options regarding Generative Replay.
Args:
parser (argparse.ArgumentParser): The argument parser to which the
group should be added.
show_all_task_softmax (bool): Whether option `all_task_softmax` should
be shown.
Note:
Only sensible for classification tasks.
Returns:
The created argument group, in case more options should be added.
"""
agroup = parser.add_argument_group('Replay options')
agroup.add_argument('--use_replay', action='store_true',
help='Whether generative replay should be used as a ' +
'method to prevent catastrophic forgetting. ' +
'If option "hnet_all" is enabled, a replay ' +
'model per task is learned within one task-' +
'conditioned hypernetwork. Otherwise, ' +
'forgetting within the replay model is ' +
'prevented by replaying data from a ' +
'checkpointed replay model before starting to ' +
'learn a new task.')
if show_all_task_softmax:
# This enables what we call CL3.
agroup.add_argument('--all_task_softmax', action='store_true',
help='If enabled, the replay model is used to ' +
'train a multi-head classifier whose ' +
'softmax predictions are computed across ' +
'all output heads.')
agroup.add_argument('--replay_pm_strength', type=float, default=1.,
help='The strength of the prior-matching term if ' +
'replay is used. Default: %(default)s.')
agroup.add_argument('--replay_rec_strength', type=float, default=1.,
help='The strength of the reconstruction term if ' +
'replay is used. Default: %(default)s.')
agroup.add_argument('--replay_distill_reg', type=float, default=1.,
help='The strength of the soft-target distillation ' +
'loss if replay is used. Default: %(default)s.')
agroup.add_argument('--replay_true_data', action='store_true',
help='This is a sanity check. If enabled, actual ' +
'data from previous tasks will be replayed. ' +
'The autoencoder is still trained, even though ' +
'the decoder has no influence in training the ' +
'target model.')
agroup.add_argument('--coreset_size', type=int, default=-1, metavar='N',
help='This option is only valid in combination with ' +
'option "use_replay". If "-1", then coresets ' +
'are deactivated. Otherwise, a positive integer ' +
'is expected denoting the size of a coreset per ' +
'task. In this case, no decoder (VAE) will be ' +
'trained. Instead, data from the coreset is ' +
'replayed. Default: %(default)s.')
return agroup
|
ecc69b700a0221a9be7400d203f6dab2a7745bdd
| 663,911 |
from typing import Tuple
def grammar_resource(location: str) -> Tuple[str, str]:
"""
Given a module-like location, return the package and resource of a grammar
:param location: a module-like location, such as ``"bootpeg.grammar.peg"``
:return: a package and resource, such as ``"bootpeg.grammar", "peg.bpeg"``
"""
if location == "__main__":
location = __import__("__main__").__spec__.name
package, _, name = location.rpartition(".")
return package, f"{name}.bpeg"
|
2581f2cc7115f6871ebd7974be99f22e3835d9d1
| 154,768 |
def conflicting(a, b):
"""
Totals the number of conflicting characters between two strings
Parameters
----------
a: str
b: str
Returns
-------
int
"""
comp = zip(a, b)
diff = abs(len(a)-len(b))
d = sum(1 for x,y in comp if x != y)
return d + diff
|
65e111786e56da9610e290fd5a56c41039690f68
| 301,810 |
import json
def convert_json(data):
"""Convert string to JSON if possible or return None otherwise.
Args:
data (str): Data.
Returns:
dict: Loaded dict.
"""
try:
return json.loads(data)
except ValueError:
return None
|
668c1fa29ec48e3f311ee93b2e5d0c64c6708518
| 201,928 |
def get_grid_sizes(image_shape, base_grid_size):
"""utility function that compute the sizes of the grid system
Arguments:
image_shape {tuple} -- the shape of the image
base_grid_size {int} -- the size of the cell in base grid
Returns:
list -- a list of the grid sizes
"""
if base_grid_size == 32:
return [int(image_shape[0] / x) for x in [32, 16, 8]]
else:
values = []
for x in [base_grid_size, base_grid_size / 2, base_grid_size / 4]:
# if image_shape[0] % x != 0:
# raise ValueError(
# f'the base grid size {base_grid_size} is not divisible per the image size'
# )
values.append(int(image_shape[0] / x))
return values
|
ebae894942d4d09010e8819b12178dcf59d0ac79
| 436,251 |
def remove_seq_duplicates(question):
""" Removes consequent sbj placeholders"""
i = 0
while i < len(question) - 1:
if question[i] == question[i + 1] and question[i] == "sbj":
del question[i]
else:
i = i + 1
return question
|
08445451ae9f9258607d24eb202471a24d588d2e
| 243,924 |
import pkg_resources
def entrypoints(section):
"""
Returns the Entry Point for a given Entry Point section.
:param str section: The section name in the entry point collection
:returns: A dictionary of (Name, Class) pairs stored in the entry point collection.
"""
return {ep.name: ep.load() for ep in pkg_resources.iter_entry_points(section)}
|
55c82839129a3aa6a321a812d93795594ee9d333
| 305,551 |
def userpar_override(pname, args, upars):
"""
Implement user parameter overrides. In this implementation, user
parameters *always* take precedence. Any user parameters passed to the
Primitive class constructor, usually via -p on the 'reduce' command
line, *must* override any specified recipe parameters.
Note: user parameters may be primitive-specific, i.e. passed as
-p makeFringeFrame:reject_method='jilt'
in which case, the parameter will only be overridden for that primitive.
Other primitives with the same parameter (e.g. stackFlats reject_method)
will not be affected.
This returns a dict of the overridden parameters and their values.
"""
parset = {}
for key, val in list(upars.items()):
if ':' in key:
prim, par = key.split(':')
if prim == pname:
parset.update({par: val})
elif key in args:
parset.update({key: val})
return parset
|
4634d73b332f85c68507ededcc85913a080b970c
| 463,446 |
def is_bit_set(number: int, position: int) -> bool:
"""
Is the bit at position set?
Details: Shift the bit at position to be the first (smallest) bit.
Then check if the first bit is set by anding the shifted number with 1.
>>> is_bit_set(0b1010, 0)
False
>>> is_bit_set(0b1010, 1)
True
>>> is_bit_set(0b1010, 2)
False
>>> is_bit_set(0b1010, 3)
True
>>> is_bit_set(0b0, 17)
False
"""
return ((number >> position) & 1) == 1
|
638df8c7ab0976d31dd268a1496f8d210d9fd27e
| 147,486 |
import torch
def per_face_normals(mesh : torch.Tensor):
"""Compute normals per face.
Args:
mesh (torch.Tensor): #F, 3, 3 array of vertices
"""
vec_a = mesh[:, 0] - mesh[:, 1]
vec_b = mesh[:, 1] - mesh[:, 2]
normals = torch.cross(vec_a, vec_b)
return normals
|
0f6c51125ec33dc5aa8faa913b5fefba03bdd1f5
| 64,497 |
from typing import Collection
from typing import Tuple
import hashlib
def triple_set_hash(
mapped_triples: Collection[Tuple[int, int, int]],
) -> str:
"""
Compute an order-invariant hash value for a set of triples given as list of triples.
:param mapped_triples:
The ID-based triples.
:return:
The hash digest as hex-value string.
"""
# sort first, for triple order invariance
return hashlib.sha512("".join(map(str, sorted(mapped_triples))).encode("utf8")).hexdigest()
|
f66b409ca6f5ad586ecb33e4d1f7eef8e5a5c3e4
| 262,295 |
from typing import List
def read_tour(path: str) -> List[int]:
"""Reads a TSPLIB-formatted TSP tour file, and returns it in the form of an ordered list of point IDs. """
tour = []
with open(path, "r") as f:
for line in f:
if "TOUR_SECTION" in line:
break
for line in f:
line = line.strip()
if "-1" in line or "EOF" in line or not line:
break
fields = line.strip().split()
point_id = int(fields[0])
tour.append(point_id)
return tour
|
9ac341a6343804a59e48130f764d91b39dcc6f2c
| 292,584 |
import getpass
def get_default_db_name() -> str:
"""Uses the username to generate the db name
Returns:
str: The default database name
"""
user: str = getpass.getuser()
return f"{user}_antares_launcher_db.json"
|
3d906383849f648125747d4d9a7f586bfaed0a12
| 276,539 |
import string
def text_template(text, variables):
"""Substitutes '${PLACEHOLDER}'s within the text with the
corresponding values from variables."""
template = string.Template(text)
return template.safe_substitute(variables)
|
e1397376fe225f875c67d883954549bc774294fb
| 555,927 |
def removecommongaps(s1, s2):
"""Remove common gap characters between the two sequences.
Return s1, s2 with these characters removed.
"""
if len(s1) != len(s2):
raise ValueError('Sequences must be same length')
return (
''.join(b1 for b1, b2 in zip(s1, s2) if b1 != '-' or b2 != '-'),
''.join(b2 for b1, b2 in zip(s1, s2) if b1 != '-' or b2 != '-'),
)
|
a73c4227b00203033d0aacebc5d8f378fd5ce009
| 75,664 |
import torch
def _make_aligned_labels(inputs):
"""Uses the shape of inputs to infer batch_size, n_classes, and n_sample_per_class. From this, we build the one-hot
encoding label tensor aligned with inputs. This is used to keep the lable information when tensors are flatenned
across the n_class and n_sample_per_class.
Args:
inputs: tensor of shape (batch, n_classes, n_sample_per_class, z_dim) containing encoded examples for a task.
Returns:
tensor of shape (batch, n_classes, n_sample_pc, n_classes) containing the one-hot encoding label of each example
"""
batch, n_classes, n_sample_pc, z_dim = inputs.shape
identity = torch.eye(n_classes, dtype=inputs.dtype, device=inputs.device)
return identity[None, :, None, :].expand(batch, -1, n_sample_pc, -1).contiguous()
|
7eaeb9f627b31f1b58ef58be3858dde13e3e7d4a
| 482,607 |
def load(fl, normalise=False):
"""
load airfoil
args:
fl (str): filename
kwargs:
normalise (bool): flag determining whether the airfoil is normalised to
unit length
returns:
[(x,y),...] airfoil point coordinates
"""
d = []
print("loading airfoil %s" % fl)
for i in open(fl, "r").readlines():
try:
xy = [float(j) for j in i.split()]
if len(xy) in [2, 3]:
d.append(xy)
except:
pass
x, y = list(zip(*d))
if normalise:
mx = min(x)
dx = max(x) - min(x)
print("normalise factor %f" % dx)
x = [(i - mx) / dx for i in x]
y = [i / dx for i in y]
return list(zip(x, y))
|
4032a703a66465799aadfa08dd7888682b0a767d
| 666,156 |
def guess_num() -> str:
"""
049
Create a variable called comp_num and set the value to 50. Ask the user to enter a number. While their guess
is not the same as the comp_num value, tell them if their guess is too low or too high and ask them to have
another guess. If they enter the same value as comp_num, display the message “Well done, you took [count] attempts”.
"""
comp_num = 50
guess = int(input("Entrer une chifre: "))
attempts = 1
while guess != comp_num:
guess = int(input("Was your guess too high or too low? Enter another number: "))
attempts += 1
return f"Well done you took {attempts} attempts."
|
3467316d23498cca220987b3e0004973010f8901
| 623,708 |
import re
def cond_add_quotes(s):
"""If there are spaces in the input string s, put quotes around the
string and return it... if there are not already quotes in the
string.
@type s: string
@param s: path name
@rtype: string
@return: string with quotes, if necessary
"""
if re.search(r'[ ]',s) and not ( re.search(r'["].*["]',s) or
re.search(r"['].*[']",s) ):
return '\"' + s + '\"'
return s
|
a476e27fea96ef5c26600ccf2a73b43dbfafb21d
| 591,805 |
def _get_or_create_plot_data(data, plotdata):
"""Create a new name for `data` if necessary, or check it is a valid name.
"""
valid_names = plotdata.list_data()
if not isinstance(data, str):
name = plotdata.set_data("", data, generate_name=True)
else:
if data not in valid_names:
msg = '{} is not an existing name for plot data'
raise ValueError(msg.format(data))
name = data
return name
|
6afd6549939f19a561e932f0db297fa532fee898
| 241,485 |
def determine_trial_result(CI_lower, CI_upper, value):
"""
:param CI_lower: confidence interval lower bound
:param CI_upper: confidence interval upper bound
:param value: value to check (does the confidence interval include the value?)
:return: 1 if the CI covers the value, 0 otherwise
"""
if value < CI_lower:
return -1
elif value > CI_upper:
return 1
else:
return 0
|
536bbbeb1c9b1f1911cfd22eb3e56dc7bef3bc54
| 275,764 |
def compute_hash(func, string):
"""compute hash of string using given hash function"""
h = func()
h.update(string)
return h.hexdigest()
|
903bad55afccaf5c186172be066250e7227b26b4
| 553,879 |
def get_field_name(data, original_key, alternative_value):
"""
check which column name used in the BioSamples record, if both provided column names not found, return ''
:param data: one BioSamples record in JSON format retrieved from API
:param original_key: field name to be checked
:param alternative_value: alternative field name to be checked
:return: either original key or alternative value if found in the data, if not found return ''
"""
# biosamples record always has characteristics section
if original_key not in data['characteristics']:
if alternative_value in data['characteristics']:
return alternative_value
else:
return ''
return original_key
|
07b9f01e5d1e0fe58a654c4ea55287a744cd291f
| 33,290 |
def get_default_replacement_func(placeholders):
"""
Get a default function for use by perform_placeholder_replacements()
:param dict placeholders: a dict of placeholders for the replacements
:returns: a function that seach the placeholder dict for a replacement
:rtype: function
"""
return lambda m: placeholders.get(m.group(1), m.group(0))
|
567bbe4ad1726325d8005c0ba3409351fc0e45f5
| 158,310 |
def is_collection(obj):
"""
Check if a object is iterable.
:return: Result of check.
:rtype: bool
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str)
|
70fa0262ea7bf91a202aade2a1151d467001071e
| 5,054 |
def dup_neg(f, K):
"""
Negate a polynomial in ``K[x]``.
**Examples**
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densearith import dup_neg
>>> f = ZZ.map([1, 0, -1])
>>> dup_neg(f, ZZ)
[-1, 0, 1]
"""
return [ -coeff for coeff in f ]
|
28c2eedd3865726ac25275a570b3354a40601b2e
| 500,438 |
def create_dict(breadcrumbs, value=None):
"""
Created a dict out of the breadcrumbs in a recursive manner.
each entry in the breadcrumb should be a valid dictionary key.
If value is None, the last string within' the breadcrumbs becomes the
final value.
:param breadcrumbs:
:param value:
:return:
"""
if value is not None:
if not breadcrumbs:
return value
elif len(breadcrumbs) == 1:
return breadcrumbs[0]
return {breadcrumbs[0]: create_dict(breadcrumbs[1:], value)}
|
f2f4d2be32aa96b31703cc548edff3999bd42bd3
| 49,525 |
def binary(i, width):
"""
>>> binary(0, 5)
[0, 0, 0, 0, 0]
>>> binary(15, 4)
[1, 1, 1, 1]
>>> binary(14, 4)
[1, 1, 1, 0]
"""
bs = bin(i)[2:]
bs = ("0" * width + bs)[-width:]
b = [int(c) for c in bs]
return b
|
0a9ee440d14cc0fccc8b3d7c83c36582a4583749
| 37,280 |
import math
def convert_unit_names(output_unit_list):
"""
A helper function to convert units to text representation.
Parameters
----------
output_unit_list : list of float
A list of the output units to parse.
Returns
-------
conc_unit_names : list of str
A list of concentration unit names.
flux_unit_names : list of str
A list of flux unit names.
"""
conc_unit_names = []
flux_unit_names = []
for output_unit in output_unit_list:
if math.isclose(output_unit, 1e-12):
conc_unit = 'pmol mol$^{-1}$'
flux_unit = 'pmol m$^{-2}$ s$^{-1}$'
elif math.isclose(output_unit, 1e-9):
conc_unit = 'nmol mol$^{-1}$'
flux_unit = 'nmol m$^{-2}$ s$^{-1}$'
elif math.isclose(output_unit, 1e-6):
conc_unit = '$\mu$mol mol$^{-1}$'
flux_unit = '$\mu$mol m$^{-2}$ s$^{-1}$'
elif math.isclose(output_unit, 1e-3):
conc_unit = 'mmol mol$^{-1}$'
flux_unit = 'mmol m$^{-2}$ s$^{-1}$'
elif math.isclose(output_unit, 1e-2):
conc_unit = '%'
flux_unit = '% m$^{-2}$ s$^{-1}$'
elif math.isclose(output_unit, 1.):
conc_unit = 'mol mol$^{-1}$'
flux_unit = 'mol m$^{-2}$ s$^{-1}$'
else:
conc_unit = 'undefined unit'
flux_unit = 'undefined unit'
conc_unit_names.append(conc_unit)
flux_unit_names.append(flux_unit)
return conc_unit_names, flux_unit_names
|
8a84ed26b2341bfa161becec3ebf395cda3f5d61
| 480,787 |
def dist(v1, v2):
""" distance between two vectors. """
d = ((v2.x - v1.x)**2 + (v2.y - v1.y)**2) ** 0.5
return d
|
d06b3fb6543a1531d71db7345912153008e7e7d1
| 40,990 |
from typing import Set
def possible_types(name: str) -> Set[str]:
"""Build a set of all possible types from a fully qualified name."""
labels = name.split('.')
label_count = len(labels)
types = set()
for count in range(label_count):
parts = labels[label_count - count - 4 :]
if not parts[0].startswith('_'):
break
types.add('.'.join(parts))
return types
|
214533e94c3eadbcdfe9f1462e4490a211894a9e
| 131,776 |
def format_tc_rate(rate):
"""
Formats a bits/second rate into a tc rate string
"""
if rate >= 1000000000 and (rate % 1000000000) == 0:
return "%.0fgbit" % (rate / 1000000000.)
elif rate >= 1000000 and (rate % 1000000) == 0:
return "%.0fmbit" % (rate / 1000000.)
elif rate >= 1000:
return "%.0fkbit" % (rate / 1000.)
else:
return "%.0fbit" % rate
|
df96b0139a1303e2d8be6cbda4afa22fa1d63049
| 208,546 |
def best_match(names, i):
"""
Find matches starting from index i.
Matches have to be at least 3 letters, otherwise it's a mess for big sets.
Go with largest set of matches instead of longer matches.
Return (number of matches, matching chars)
"""
matchlen = 3
nmatches = 1
while (i+nmatches < len(names) and names[i+nmatches][:matchlen] ==
names[i][:matchlen]):
nmatches += 1
while matchlen < 8 and all([names[i+j][:matchlen+1] ==
names[i][:matchlen+1] for j in range(1,nmatches)]):
matchlen += 1
return nmatches, names[i][:matchlen]
|
150ac3b1169714af16b7930662ca0f3498b0cb2b
| 354,924 |
from typing import Optional
from typing import Iterable
from typing import Any
def iterable_to_fields(fields: Optional[Iterable[Any]]) -> str:
"""Convert iterable to query fields string"""
fields = f"{', '.join(i for i in fields)}" if fields else "*"
return fields
|
d11f3f984059033523fafbb781c1400b7b04f462
| 353,832 |
def _find_provider(hass, prov_type):
"""Return provider for type."""
for provider in hass.auth.auth_providers:
if provider.type == prov_type:
return provider
return None
|
e8cfa630f961f330785064c9b8e0d51d2231ef35
| 692,885 |
from pathlib import Path
def get_project_root() -> Path:
"""Return the path to project (i.e. the top level dat_analysis folder which contains src etc"""
return Path(__file__).parent.parent.parent
|
546015b3555ff7ace6109e064dd94ae7db9491de
| 32,980 |
def get_best_rssi_per_location(sensors):
"""Find the strongest signal in each location."""
best_rssi = {}
for sensor in sensors.values():
location = sensor["location"]
rssi = sensor["rssi"]
if rssi and (location not in best_rssi or rssi > best_rssi[location]):
best_rssi[location] = rssi
return best_rssi
|
0d4a51efe0468251aa47b0cd551c35c88e7f62b0
| 93,632 |
import random
def populate(num_rats, min_wt, max_wt, mode_wt):
"""Initialize a population with a triangular distribution of weights."""
return [int(random.triangular(min_wt, max_wt, mode_wt))\
for i in range(num_rats)]
|
e69809b25ea63fba0487794e6d0f4982897461c2
| 672,421 |
def get_manga_target(flag_id, bitmasks, header):
"""Get MANGA_TARGET[``flag_id``] flag.
Parameters:
flag_id (str):
Flag ID number (e.g., "1" for MANGA_TARGET1).
bitmasks (dict):
`Maskbit` objects.
header (`astropy.io.fits.header.Header`):
File header.
Returns:
`Maskbit`
"""
flag_id = str(int(flag_id))
manga_target = bitmasks['MANGA_TARGET{}'.format(flag_id)]
try:
manga_target.mask = int(header['MNGTRG{}'.format(flag_id)])
except KeyError:
manga_target.mask = int(header['MNGTARG{}'.format(flag_id)])
return manga_target
|
cbf678fed5000367bc3536bdd1d36ffe94ab1331
| 217,639 |
def parse_datafile(path, data_names, transform_funcs, cols_to_parse=[]):
"""Parses a data file given by path and structured as a table where rows are separated by \n
and columns are separated by any of whitespaces. The first line in the file will be ignored.
Processed columns are given by cols_to_parse (all columns will be processed if it is empty).
Corresponding names and transformation functions for columns in cols_to_parse are given by
data_names and transform_funcs. Transformation function must be a mapping string -> type.
Returns a dictionary where a key corresponds to a column name (i.e., taken from data_names)
and a value corresponds to a list of the columns values taken from all rows.
"""
if cols_to_parse == []:
cols_to_parse = range(len(data_names))
if len(data_names) != len(transform_funcs) or len(data_names) != len(cols_to_parse):
raise Exception('Number of data names, transform functions and columns to be parsed is inconsistent')
data = {}
for data_name in data_names:
data[data_name] = []
f = open(path, 'r') # if not found, expection will be raised anyway
lines = f.readlines()
for line in lines[1:]: # skip the first line
tmp = line.split()
if len(tmp) < len(data_names):
raise Exception('Number of given data names is larger than number of columns we have in the data file.')
for i, data_name in enumerate(data_names):
val = tmp[cols_to_parse[i]]
data[data_name].append(transform_funcs[i](val))
return data
|
ad89308c50a293f8347c89fba19a7e19c9ce4008
| 536,174 |
import importlib
def import_(module_name, name):
"""Imports an object by a relative module path::
Profiler = import_('profiling.profiler', 'Profiler')
"""
module = importlib.import_module(module_name, __package__)
return getattr(module, name)
|
26346e3d51fc2683fd3d1680ad1a89bd3713fdc6
| 430,940 |
def momentum_update(update, learning_rate, mu, momentum, use_nesterov):
"""Computes a momentum update for a single parameter."""
momentum = (mu * momentum) + update
if use_nesterov:
update = learning_rate * ((mu * momentum) + update)
else:
update = learning_rate * momentum
return update, momentum
|
ce73d3a04616647daccd6646e32667d75b7a0195
| 553,076 |
def _capability(interface,
version=3,
supports_deactivation=None,
cap_type='AlexaInterface'):
"""Return a Smart Home API capability object.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#capability-object
There are some additional fields allowed but not implemented here since
we've no use case for them yet:
- properties.supported
- proactively_reported
- retrievable
`supports_deactivation` applies only to scenes.
"""
result = {
'type': cap_type,
'interface': interface,
'version': version,
}
if supports_deactivation is not None:
result['supportsDeactivation'] = supports_deactivation
return result
|
b12abc29c9fa1dd5bb7e8795be6dd6de8c865f40
| 664,814 |
def _find_subcommand(args):
"""Return the subcommand that has been passed, if any.
Parameters
----------
args : list
The argument list.
Returns
-------
Optional[:class:`str`]
If a subcommand is found, returns the string of its name. Returns None
otherwise.
Notes
-----
This assumes that "manim" is the first word in the argument list, and that
the subcommand will be the second word, if it exists.
"""
subcmd = args[1]
if subcmd in [
"cfg"
# , 'init',
]:
return subcmd
else:
return None
|
2921dbb86b73a28ef51fb7a74638a86da47a6fed
| 519,327 |
import re
def get_all_sections_of_type(rose_conf, pat):
"""
Extract all the sections matching a given regex pattern
rose_conf : original rose configuration
pat : regex pattern
"""
res = ''
for section in rose_conf.sections():
m = re.match(pat, section)
if m:
res += ',' + m.group(1)
return res[1:].split(',')
|
d9951120e662b71cb566bcdaf40faae67f4f7b53
| 471,017 |
def urlify(input_string):
"""Urlify a string."""
return input_string.replace(' ', '%20')
|
f69b0f97c4a67370eaccdad4730fd5335410598f
| 286,056 |
import re
def natural_sort_key(sort_key, _nsre=re.compile('([0-9]+)')):
"""
Pass to ``key`` for ``str.sort`` to achieve natural sorting.
For example, ``["2", "11", "1"]`` will be
sorted to ``["1", "2", "11"]`` instead of ``["1", "11", "2"]``
:param sort_key: Original key to be processed
:return: A list of string and integers.
"""
keys = []
for text in _nsre.split(sort_key):
if text.isdigit():
keys.append(int(text))
else:
keys.append(text)
return keys
|
69a2a18701fdcf72041cdac9037c19b3f2561c6c
| 657,965 |
def changeTwoStar(G, A, i):
"""
Change statistic for two-star
*--o
\
o
"""
return (G.degree(i) * (G.degree(i) - 1))/2.0 if G.degree(i) > 1 else 0
|
912a862f93c8331bb128c74de2c92ce6688e34eb
| 170,642 |
import re
def get_ns_dict(xml):
"""Take an xml string and return a dict of namespace prefixes to
namespaces mapping."""
nss = {}
def_cnt = 0
matches = re.findall(r'\s+xmlns:?(\w*?)\s*=\s*[\'"](.*?)[\'"]', xml)
for match in matches:
prefix = match[0]; ns = match[1]
if prefix == '':
def_cnt += 1
prefix = '_' * def_cnt
nss[prefix] = ns
return nss
|
065b67ac38fb52057c2f79377991c1d4ec53be77
| 403,888 |
import socket
def ip_check(serverip):
""" Taken from EARCIS code.
Check if the server IP passed in is a valid IPv4/v6 address, if not, return False; otherwise, return True.
Input: (serverip) % (str)
Output: Boolean
"""
try:
socket.inet_aton(serverip) #Would fail if it is not a valid IPv4 address.
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, serverip) #Would fail if it is not a valid IPv6 address.
except socket.error:
return False
return True
|
202c6d7167d3dafafc922743ee3fe56fa14d5bb3
| 478,843 |
import re
def resolve_cpus(name, target):
"""
Returns a list of cpu numbers that corresponds to a passed name.
Allowed formats are:
- 'big'
- 'little'
- '<core_name> e.g. 'A15'
- 'cpuX'
- 'all' - returns all cpus
- '' - Empty name will also return all cpus
"""
cpu_list = list(range(target.number_of_cpus))
# Support for passing cpu no directly
if isinstance(name, int):
cpu = name
if cpu not in cpu_list:
message = 'CPU{} is not available, must be in {}'
raise ValueError(message.format(cpu, cpu_list))
return [cpu]
# Apply to all cpus
if not name or name.lower() == 'all':
return cpu_list
# Deal with big.little substitution
elif name.lower() == 'big':
name = target.big_core
if not name:
raise ValueError('big core name could not be retrieved')
elif name.lower() == 'little':
name = target.little_core
if not name:
raise ValueError('little core name could not be retrieved')
# Return all cores with specified name
if name in target.core_names:
return target.core_cpus(name)
# Check if core number has been supplied.
else:
core_no = re.match('cpu([0-9]+)', name, re.IGNORECASE)
if core_no:
cpu = int(core_no.group(1))
if cpu not in cpu_list:
message = 'CPU{} is not available, must be in {}'
raise ValueError(message.format(cpu, cpu_list))
return [cpu]
else:
msg = 'Unexpected core name "{}"'
raise ValueError(msg.format(name))
|
b4ababdbe1f89daa719909d0c202bba3bd6109d3
| 564,524 |
import torch
def flatten(params):
"""
Turns a module's parameters (or gradients) into a flat numpy array
params: the module's parameters (or gradients)
"""
with torch.no_grad():
return torch.cat([p.data.view(-1) for p in params])
|
1e278028f1878aa1cadc730fe03007cb6062f4c3
| 624,359 |
import math
def area(r):
"""
Function calculating area of circle of radius r
"""
return math.pi*r**2
|
14a1d36cad6789662d053b3d71a1ec8993ca225d
| 330,813 |
def dynamic_range_compression(db, threshold, ratio, method='downward'):
"""
Execute dynamic range compression(https://en.wikipedia.org/wiki/Dynamic_range_compression) to dB.
:param db: Decibel-scaled magnitudes
:param threshold: Threshold dB
:param ratio: Compression ratio.
:param method: Downward or upward.
:return: Range compressed dB-scaled magnitudes
"""
if method is 'downward':
db[db > threshold] = (db[db > threshold] - threshold) / ratio + threshold
elif method is 'upward':
db[db < threshold] = threshold - ((threshold - db[db < threshold]) / ratio)
return db
|
1c5434a4bda28959bfa4b71a594e2cf6fbd71b79
| 670,801 |
def __splitTime(sec):
"""Splits a timestamp into hour, minute, and second components.
:type sec: int
:param sec: timestamp as an epoch
:rtype: tuple
:return: (hour, min, sec)
"""
minute, sec = divmod(sec, 60)
hour, minute = divmod(minute, 60)
return hour, minute, sec
|
cc2c80f1c5e9df8c0f4b374bfe31cc2132767cc0
| 571,299 |
import torch
def gram_matrix(input_tensor):
"""
Compute Gram matrix
:param input_tensor: input tensor with shape
(batch_size, nbr_channels, height, width)
:return: Gram matrix of y
"""
(b, ch, h, w) = input_tensor.size()
features = input_tensor.view(b, ch, w * h)
features_t = features.transpose(1, 2)
# more efficient and formal way to avoid underflow for mixed precision training
input = torch.zeros(b, ch, ch).type(features.type())
gram = torch.baddbmm(
input, features, features_t, beta=0, alpha=1.0 / (ch * h * w), out=None
)
# naive way to avoid underflow for mixed precision training
# features = features / (ch * h)
# gram = features.bmm(features_t) / w
# for fp32 training, it is also safe to use the following:
# gram = features.bmm(features_t) / (ch * h * w)
return gram
|
f79e037d7b827d6d8a1da53c7773d9b74f393732
| 452,377 |
def _EraseTombstone(device, tombstone_file):
"""Deletes a tombstone from the device.
Args:
device: An instance of DeviceUtils.
tombstone_file: the tombstone to delete.
"""
return device.RunShellCommand(
'rm /data/tombstones/' + tombstone_file, root=True)
|
00e6f316062785d7465f501ea743a2dc94864aef
| 9,419 |
def dec(x):
"""Decrements its argument"""
return x - 1
|
a7c5b7275d2837ccfdaf0f9c1197ad15d9dedf73
| 383,015 |
def make_url(action: str, team: str = 'authenticate') -> str:
""" Make an url based on team and action
:param team: Name of team
:param action: An Teamwork action
:return: An url based on url and action
"""
url = f'https://{team}.teamwork.com/{action}.json'
return url
|
96e0e965bf1f3886204bff38a448452ac1aab2eb
| 171,782 |
def distinct_series(df, keep="first"):
"""Verb: select distinct values from Series
Parameters
-----------
keep : which instance to keep in case of duplicates (see :meth:`pandas.Series.duplicated`)
"""
duplicated = df.duplicated(keep)
return df[~duplicated]
|
174c7eee3315d8b6e91704633351b3e124a4b273
| 567,193 |
def basename(name):
"""
Generate the basename from a ros name.
Args:
name (:obj:`str`): ros name
Returns:
:obj:`str`: name stripped up until the last slash or tilde character.
Examples:
.. code-block:: python
basename("~dude")
# 'dude'
basename("/gang/dude")
# 'dude'
"""
return name.rsplit('/', 1)[-1].rsplit('~', 1)[-1]
|
5738713b8c46f6b792d86ddde44c56ac44c1f230
| 458,853 |
def validate_fields(fields, about_file_path, running_inventory, base_dir,
reference_dir=None):
"""
Validate a sequence of Field objects. Return a list of errors.
Validation may update the Field objects as needed as a side effect.
"""
errors = []
for f in fields:
val_err = f.validate(
base_dir=base_dir,
about_file_path=about_file_path,
running_inventory=running_inventory,
reference_dir=reference_dir,
)
errors.extend(val_err)
return errors
|
3be183856a6c736d240ef9ea68126d4afa166846
| 131,497 |
def bonds2str(bonds):
"""
Convert list of bonds into TopoCIF string.
"""
bs = []
for b in bonds:
bs.append(" ".join(
[f"{x:<5}" for x in b[:3]] + [f"{1:>3}"] + \
["{:>2} {:>2} {:>2}".format(*b[3])] + [f"{1:>3}"] + \
["{:>2} {:>2} {:>2}".format(*b[4])] + [f"{'V':>2}{1:>2}"]
))
return "\n".join(bs)
|
1b2a9644ed3ff67ba78f2744d73c7b63460aebef
| 468,016 |
import collections
def duplicate_indices(iterable):
"""Make an iterator that returns duplicate items from the ``seq`` along
with the the indices of that item. ::
>>> seq = ['a', 'b', 'c', 'b', 'd']
>>> dups = duplicate_indices(seq)
>>> next(dups)
('b', [1, 3])
"""
tally = collections.defaultdict(list)
for i, item in enumerate(iterable):
tally[item].append(i)
dups = ((item, idxs) for item, idxs in tally.items() if len(idxs) > 1)
return dups
|
6b3b693a6f38e13b6015aea3376223338d47edd2
| 396,035 |
import unicodedata
import re
def friendly_filename_or_url(value: str) -> str:
"""
Normalize any string to be file name and URL friendly.
Convert to lowercase, remove non-alpha characters,
and convert spaces to hyphens.
"""
# Taken from:
# https://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename/295466#295466
value = str(unicodedata.normalize('NFKD', value).encode('ascii', 'ignore'))
value = str(re.sub('[^\w\s-]', '', value).strip().lower())
value = str(re.sub('[-\s]+', '-', value))
return value
|
8245946e0aa82bba36b89791ae145e9e0c778c89
| 255,721 |
def icontains(header_value: str, rule_value: str) -> bool:
"""
Case insensitive implementation of str-in-str lookup.
Parameters
----------
header_value : str
String to look within.
rule_value : str
String to look for.
Returns
-------
bool
Whether *rule* exists in *value*
"""
return rule_value.lower() in header_value.lower()
|
00f56eea5d63f1f31fc2eeee1b0bd54fed6b6a56
| 363,918 |
def hot_to_smile(onehot_encoded, alphabet):
"""
Go from one-hot encoding to smile string
"""
# From one-hot to integer encoding
integer_encoded = onehot_encoded.argmax(1)
# print('integer_encoded ', integer_encoded)
int_to_char = dict((i, c) for i, c in enumerate(alphabet))
# print('DECODING: ', int_to_char)
# integer encoding to smile
regen_smile = "".join(int_to_char[x] for x in integer_encoded)
regen_smile = regen_smile.strip()
return regen_smile
|
7cb360754fa80b63428f4bb360237ab2efa0ace5
| 525,825 |
def Weight_In_Pounds(Weight):
"""Format Weight into Pounds String"""
if Weight < 0 :
raise ValueError("Number Cannot Be Negative")
Weight_Display = f"{Weight} lbs"
return Weight_Display
|
94d519f0f21bd607b7ee07d374db3e81bf553396
| 371,842 |
def nextmonth(yr, mo):
"""get next month from yr/mo pair"""
return (yr+mo / 12, mo % 12+1)
|
149b203b7988980e1ab3f9ca02b3817874437a64
| 116,097 |
from typing import List
def get_canonical_label(num_list: List[dict], denom_list: List[dict], lang: str = "en") -> str:
"""Use the processed numerators and denominators from a unit input to create a label."""
if not denom_list:
# No denominators
return " ".join([n[f"label_{lang}"] for n in num_list])
elif not num_list:
# No numerators
result = ["reciprocal"]
result.extend([d[f"label_{lang}"] for d in denom_list])
return " ".join(result)
# Mix of numerators and denominators
result = []
result.extend([n[f"label_{lang}"] for n in num_list])
result.append("per")
result.extend([d[f"label_{lang}"] for d in denom_list])
return " ".join(result)
|
2b7e37d92ad9ae51f0857473a764d8d69ddf57fd
| 537,477 |
def differences_product(diff):
"""Product of count of differences 1 and 3."""
return diff.count(1) * diff.count(3)
|
3bd6c2f809a082e87a5523e30137f9161da7ace1
| 132,785 |
def get_coord_mapping_for_ref(aln):
"""
:param aln: sequence alignnment for the reference
:return: dict of (0-based aln pos, 0-based ref pos) and dict of (0-based ref pos, 0-based aln pos)
"""
aln_i = 0
ref_i = 0
map_aln_to_ref = {}
map_ref_to_aln = {}
for i,s in enumerate(aln):
if s != '-':
map_ref_to_aln[ref_i] = aln_i
map_aln_to_ref[aln_i] = ('M', ref_i)
ref_i += 1
else:
# insertion w.r.t reference
map_aln_to_ref[aln_i] = ('I', ref_i)
aln_i += 1
return map_aln_to_ref, map_ref_to_aln
|
ab25555a6386ea5bb662b954f63363292b0299b6
| 165,304 |
import re
def filter_paragraphs(paragraphs, contains=None):
"""Filter paragraphs to only those containing one of a list of strings
Parameters
----------
paragraphs : list of str
List of plaintext paragraphs from an article
contains : str or list of str
Exclude paragraphs not containing this string as a token, or
at least one of the strings in contains if it is a list
Returns
-------
str
Plaintext consisting of all input paragraphs containing at least
one of the supplied tokens.
"""
if contains is None:
pattern = ''
else:
if isinstance(contains, str):
contains = [contains]
pattern = '|'.join(r'[^\w]%s[^\w]' % shortform
for shortform in contains)
paragraphs = [p for p in paragraphs if re.search(pattern, p)]
return '\n'.join(paragraphs) + '\n'
|
be80e7e56b487fa4badad19eea10eb7159a5fa60
| 171,949 |
def zip_tasks_verbose_output(table, stdstreams):
"""Zip a list of strings (table) with a list of lists (stdstreams)
:param table: a formatted list of tasks
:param stdstreams: for each task, a list of lines from stdout/stderr tail
"""
if len(table) != len(stdstreams):
raise ValueError('Can only zip same-length lists')
output = []
for i in range(len(table)):
output.append(table[i])
output.extend([line for line in stdstreams[i]])
return output
|
33d74cd274ec39330cbc127a3088a430b80d234a
| 701,523 |
def calculate_route_cost(route_locations, route, jobs, cost_matrix):
"""
Calculates cost of each route.
:param route_locations: Job locations indexes in route.
:param route: Job ids in route.
:param jobs: Jobs information, which includes job ids, location indexes and delivery.
:param cost_matrix: Array of location to location travel cost.
:return: Route cost including traveling cost and service cost
"""
route_cost = 0
for node in range(len(route_locations) - 1):
traveling_cost = cost_matrix[route_locations[node]][route_locations[node + 1]]
service_cost = jobs[route[node]][2]
route_cost = route_cost + traveling_cost + service_cost
return route_cost
|
683961e28cfe7cdd662a5e0dca877495280559ce
| 173,353 |
def formatted_time(t, sep=' '):
"""Return a number of seconds as a string in days, hours, mins and
maybe secs."""
t = int(t)
fmts = (('{:d}d', 86400), ('{:02d}h', 3600), ('{:02d}m', 60))
parts = []
for fmt, n in fmts:
val = t // n
if parts or val:
parts.append(fmt.format(val))
t %= n
if len(parts) < 3:
parts.append(f'{t:02d}s')
return sep.join(parts)
|
29d15c88289a8c23a3d1a6f4b95d6aaee253b655
| 304,428 |
def read_split_file(split_file):
"""
Read text file with pre-defined split, returning list of examples.
One example per row in text file.
"""
with open(split_file) as f:
# file may contain integer indices or string identifiers (e.g. PDB
# codes)
lines = f.readlines()
try:
split = [int(x.strip()) for x in lines]
except ValueError:
split = [x.strip() for x in lines]
return split
|
2430bbee9a80688749fea6ddd10b718eff7bcd0e
| 85,277 |
import socket
def get_node_ep(hostname, remote_addr):
"""
Return the endpoint to be used for the node based by trying to resolve the hostname provided
:param hostname: the provided hostname
:param remote_addr: the address the request came from
:returns: the node's location
"""
try:
socket.gethostbyname(hostname)
return hostname
except socket.gaierror:
return remote_addr
|
2c452149bbea8375abe09ac38096b3bb4e8342b2
| 169,103 |
def facet_processing(facets):
"""
Convert PySolr facet return to regular dictionary form
"""
facet_fields_dict = {}
for facet_field, facet_data in facets.items():
facet_pairs = zip(facet_data[::2], facet_data[1::2])
facet_dict = {}
for key, value in facet_pairs:
facet_dict[key] = value
facet_fields_dict[facet_field] = facet_dict
return facet_fields_dict
|
b4ecb556dc377ab1ac97fb516f3d00b47fb1f7ba
| 444,247 |
import torch
import math
def pure_tone(freq: float, sr: float = 128, dur: float = 4, device=None):
"""
Return a pure tone, i.e. cosine.
Args:
freq (float): frequency (in Hz)
sr (float): sample rate (in Hz)
dur (float): duration (in seconds)
"""
time = torch.arange(int(sr * dur), device=device).float() / sr
return torch.cos(2 * math.pi * freq * time)
|
9995fe5252096eb2a15658a6678c6cbd209972d1
| 217,517 |
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all altseps into seps."""
return s.replace('\\', '/').lower()
|
d8c0b9c0e178fee092444f40d53e070138294169
| 302,962 |
import collections
def __label_units(units_dict, unit_id_dict):
"""Remaps the keys of dictionary to their label in another dictionary
Args:
units_dict (dict): Dictionary of unit_ids to steps_when_seen.
unit_id_dict (dict): Dictionary of unit_ids to unit_labels.
Returns:
Dictionary: { unit_id : step_when_seen } <int> : <int>
Example:
With a dictionary of {... 43 : "Viking" ... }
we would remap a units_dict of:
{ ... 43 : 800 ... } to { ... "Viking" : 800 ... }
"""
labeled_units_seen_dict = collections.OrderedDict()
for unit_id, step in units_dict.items():
label = unit_id_dict[int(unit_id)]
labeled_units_seen_dict[label] = step
return labeled_units_seen_dict
|
998a8eddaeba0e60252832940a100fe144177f02
| 244,458 |
def noop_parser(_name, _entity_type, _node):
"""Macro and component parser that does nothing and returns an empty dict.
Used as default parsers for Macro.
"""
return {}
|
ac1283b5864dd961d452a11cd2b8c8f9baee272d
| 526,992 |
import re
def get_space_normalized_segment(segment):
"""Remove leading/trailing as well as multiple spaces from a string.
:param str segment: a segment (sentence)
:return: a string with no leading/trailing spaces and only containing single whitespaces
:rtype: str
"""
return re.sub(r'\s{2,}', ' ', segment).strip()
|
5f1c04f28e5fda8fc501f979652846aacbb33cd8
| 650,305 |
import torch
def no_length_penalty(lengths):
"""A dummy function that returns a no penalty (1)."""
return torch.ones_like(lengths).to(torch.float).unsqueeze(-1)
|
d4aa6dbabdb06feaf10f5567950273b7d6d12700
| 276,543 |
import math
def get_mux_dimensions(I, max_height):
"""Finds the row and column count for the given mux.
Parameters
----------
I : int
Input count.
max_height : int
Maximum height in gate pitches.
Returns
-------
Tuple[int]
Row and column count.
"""
col_cnt = int(min(max_height / 2.0, math.ceil(I ** 0.5)))
row_cnt = int(math.ceil(I / float(col_cnt)))
return row_cnt, col_cnt
|
83075d72344d914d5e02566c4bdc270477c17e51
| 253,082 |
def fahrenheit_to_kelvin(temperature):
"""Convert from Fahrenheit to Kelvin
:param temperature: degree understood to be in Fahrenheit
NOTE:
Fahrenheit to Kelvin formula: K = 5/9(°F - 32) + 273.15
Subtract by 32, multiply by 5/9, then add 273.15.
"""
return (5.0 / 9.0) * (temperature - 32) + 273.15
|
85c9f76f2738a9c59d4785cd3c084f91dc6c4045
| 460,780 |
def _ReviewersFromChange(change):
"""Return the reviewers specified in the |change|, if any."""
reviewers = set()
reviewers.update(change.ReviewersFromDescription())
reviewers.update(change.TBRsFromDescription())
# Drop reviewers that aren't specified in email address format.
return set(reviewer for reviewer in reviewers if '@' in reviewer)
|
df35481850c25024dac205922291d1f257609dd7
| 470,121 |
import operator
def add_globals(env):
""" Adds built-in procedures and variables to env. """
env.update({
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq
})
env.update({'True': True, 'False': False})
return env
|
cb5653dd447e8d4992addaec616d1e5f7870b462
| 188,615 |
from typing import Sequence
from typing import Tuple
from typing import List
from typing import Dict
import csv
def parse(raw: Sequence[str]) -> Tuple[List[str], List[List[str]], List[List[float]], Dict[str, int]]:
"""Returns a tuple of (hdrs, data, timeseries, column idx map)"""
data = list(csv.reader(raw))
print(len(data))
print(len(data[0]))
# check that it's actually a matrix
print(set(len(r) for r in data))
hdrs, data = data[0], data[1:]
ts = [[float(x) for x in r[12:]] for r in data]
col = {l: i for i, l in enumerate(hdrs)}
return hdrs, data, ts, col
|
b420ca46d86670dc9f97fac35166ffa5c98fa1ab
| 83,440 |
def gather_file_lines(filename):
"""Read all file lines into a list in memory"""
with open(filename, 'r') as mdf:
return mdf.readlines()
|
4e1fcf629746fd70c0cf57daeb31344c95fdb8a0
| 579,496 |
def merge_dicts(inputs):
"""
Merge multiple input dicts into a single dict.
Parameters
----------
inputs : list
List of dictionaries.
"""
output = {}
for i in inputs:
output.update(i)
return output
|
77bd9906136bce7ff7b18f62f43ca4b8cba685a8
| 363,882 |
def annualize_ret(returns, periods):
"""
Annualizes a set of returns
"""
cum_ret = (1 + returns).prod()
n_periods = returns.shape[0]
return cum_ret ** (periods / n_periods) - 1
|
cca4d8f28b26a63b53cc0235fcba7b43f0c8a69d
| 195,589 |
import math
def secondsToPhraseTime(seconds):
"""
Converts seconds to a phrase time (ex: 65 = 1 minute 5 seconds).
"""
ONE_DAY = 60 * 60 * 24
ONE_HOUR = 60 * 60
ONE_MINUTE = 60
ONE_YEAR = 60 * 60 * 24 * 365
remaining_seconds = seconds
result_string = ''
if remaining_seconds > ONE_YEAR:
years = remaining_seconds / ONE_YEAR
years = math.floor(years)
remaining_seconds = remaining_seconds - years * ONE_YEAR
result_string += '1 year ' if 1 == years else str(int(years)) + ' years '
if ONE_DAY < remaining_seconds:
days = remaining_seconds / ONE_DAY
days = math.floor(days)
remaining_seconds = remaining_seconds - days * ONE_DAY
result_string += '1 day ' if 1 == days else str(int(days)) + ' days '
if ONE_HOUR < remaining_seconds:
hours = remaining_seconds / ONE_HOUR
hours = math.floor(hours)
remaining_seconds = remaining_seconds - hours * ONE_HOUR
result_string += '1 hour ' if 1 == hours else str(int(hours)) + ' hours '
if ONE_MINUTE < remaining_seconds:
minutes = remaining_seconds / ONE_MINUTE
minutes = math.floor(minutes)
remaining_seconds = remaining_seconds - minutes * ONE_MINUTE
result_string += '1 minute ' if 1 == minutes else str(int(minutes)) + ' minutes '
result_string += '1 second ' if 1 == remaining_seconds else str(int(remaining_seconds)) + ' seconds '
return str(result_string)
|
728eb5e377e4bf202d84231b1937921c9184fb31
| 178,983 |
def dictAsKvParray(data, keyName, valueName):
"""
Transforms the contents of a dictionary into a list of key-value tuples.
For the key-value tuples chosen names for the keys and the values will be used.
:param data: The dictionary from which the date should be obtained. \t
:type data: Dict<mixed, mixed> \n
:param keyName: The name assigned to the key of the tuples. \t
:type keyName: string \n
:param valueName: The name assigned to the value of the tuples \t
:type valueName: string \n
:returns: A list of key-value tuples with the chosen names for the keys and values. \t
:rtype: [{keyName: mixed, valueName: mixed}] \n
"""
res = []
for key in data.keys():
res.append({keyName: key, valueName: data[key]})
return res
|
00aad99776eec512be1e510b9c783e4118b31cd9
| 70,364 |
def get_split_indices(word, curr_tokens, include_joiner_token, joiner):
"""Gets indices for valid substrings of word, for iterations > 0.
For iterations > 0, rather than considering every possible substring, we only
want to consider starting points corresponding to the start of wordpieces in
the current vocabulary.
Args:
word: string we want to split into substrings
curr_tokens: string to int dict of tokens in vocab (from previous iteration)
include_joiner_token: bool whether to include joiner token
joiner: string used to indicate suffixes
Returns:
list of ints containing valid starting indices for word
"""
indices = []
start = 0
while start < len(word):
end = len(word)
while end > start:
subtoken = word[start:end]
# Subtoken includes the joiner token.
if include_joiner_token and start > 0:
subtoken = joiner + subtoken
# If subtoken is part of vocab, "end" is a valid start index.
if subtoken in curr_tokens:
indices.append(end)
break
end -= 1
if end == start:
return None
start = end
return indices
|
203543d4b46942a4aa6dc80918f27cadd0a6b05c
| 450,088 |
def existing_query(query):
"""
Returns:
Query: With a single "True" view (maps all rows) called "one".
"""
query.data = {
'views': {
'one': {
'map': 'function(doc, meta){emit(1, doc);}',
},
},
}
query.create_update()
return query
|
0843310c59ae4f45679d9f42a8005f87fbb4ac5c
| 366,063 |
import re
def _parse_s3_uri(s3_uri):
"""Parse an Amazon S3 URI into bucket and key entities
Parameters
----------
s3_uri : str
The Amazon S3 URI
Returns
-------
dict
dict with keys "bucket" and "key"
"""
# Parse s3_uri into bucket and key
pattern = r"(s3:\/\/)?(?P<bucket>[^\/]*)\/(?P<key>.*)"
m = re.match(pattern, s3_uri)
if m is None:
raise ValueError(
f"s3_uri is not a valid URI. It should match the regex "
f"pattern {pattern}. You provided {s3_uri}."
)
else:
return m.groupdict()
|
4f2e90a8b39ce38112e284a02564e743ecdd7a41
| 306,787 |
def prf(correct, pred_sum, gold_sum):
"""
Calculate precision, recall and f1 score
Parameters
----------
correct : int
number of correct predictions
pred_sum : int
number of predictions
gold_sum : int
number of gold answers
Returns
-------
tuple
(p, r, f)
"""
if pred_sum:
p = correct / pred_sum
else:
p = 0
if gold_sum:
r = correct / gold_sum
else:
r = 0
if p + r:
f = 2 * p * r / (p + r)
else:
f = 0
return p, r, f
|
403e5149bca0740f879616367e555ac75cc95801
| 488,462 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.