content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import torch
def pairwiseL2Dist(x1, x2):
""" Computes the pairwise L2 distance between batches of feature vector sets
res[..., i, j] = ||x1[..., i, :] - x2[..., j, :]||
since
||a - b||^2 = ||a||^2 + ||b||^2 - 2*a^T*b
Adapted to batch case from:
jacobrgardner
https://github.com/pytorch/pytorch/issues/15253#issuecomment-491467128
"""
x1_norm2 = x1.pow(2).sum(dim=-1, keepdim=True)
x2_norm2 = x2.pow(2).sum(dim=-1, keepdim=True)
res = torch.baddbmm(
x2_norm2.transpose(-2, -1),
x1,
x2.transpose(-2, -1),
alpha=-2
).add_(x1_norm2).clamp_min_(1e-30).sqrt_()
return res
|
f87a9e07e09ea247d4747b161a8af28ac2648473
| 395,985 |
import markdown
def markdownify(value):
"""Converts Markdown string to HTML
"""
html = markdown.markdown(value)
return html
|
dd130325cfbc6f04deb539b3047817e41f3d17a8
| 124,666 |
def strep(s):
"""Format a string for display as a line context."""
return (s
.replace(' ', '␣')
.replace('\n', '⏎ ')
.replace('\t', '⇥')
)
|
6be32c63f2d88615552483fd7c8bd1dfe1a8e36d
| 287,705 |
from typing import Counter
def create_vocabulary(train_set, vocab_size):
"""Creates vocabulary out of the training set tokens.
Arguments:
train_set: CoNLL2003 train_set from HuggingFace
vocab_size (int): Maximum number of tokens in the vocab
Returns:
vocab (dict): Vocabulary of all tokens in the training set
key: token
value: ordinal number of token in the vocabulary
"""
all_tokens = []
for token_subseq in train_set["tokens"]:
all_tokens += token_subseq
# Perform some pre-processing of the tokens
all_tokens_lower = list(map(str.lower, all_tokens))
all_tokens_strip = list(map(str.strip, all_tokens_lower))
# Count the occurence of every word
counter = Counter(all_tokens_strip)
# Extract VOCAB_SIZE - 2 since we will define tokens for padding elements
# and words which aren't present in the training set
most_frequent = counter.most_common(vocab_size - 2)
# Initialize the vocabulary
vocab = {
"UNK": 0,
"PADD": 1
}
ind = len(vocab)
# Populate the vocab
for token, _ in most_frequent:
vocab[token] = ind
ind += 1
print("\nCreated vocabulary of {} tokens.".format(ind))
return vocab
|
d7d87b703e8fe9a7682d75797db09f94efc56da5
| 264,177 |
def parse_fastq(fh):
""" Parse reads from a FASTQ filehandle. For each read, we
return a name, nucleotide-string, quality-string triple. """
reads = []
while True:
first_line = fh.readline()
if len(first_line) == 0:
break # end of file
name = first_line[1:].rstrip()
seq = fh.readline().rstrip()
fh.readline() # ignore line starting with +
qual = fh.readline().rstrip()
reads.append((name, seq, qual))
return reads
|
d33d3efebdd1c5f61e25397328c6b0412f1911dd
| 1,996 |
def minimum_grade(parsed_list, passing_grade, overall_min=True):
"""
This function calculates the minimum grade from the given grades.
:param parsed_list: the parsed list of the grades
:param passing_grade: the grade passing threshold
:param overall_min: True, when calculating the minimum of all the grades,
False, when calculating the minimum of the passing grades
:return: the minimum element of the given as input parsed list
"""
if overall_min is False:
passing_grades_list = (
[item for item in parsed_list if item >= passing_grade]
)
if len(passing_grades_list) > 0:
min_value = min(passing_grades_list)
else:
min_value = "-"
else:
min_value = min(parsed_list)
return min_value
|
2ffbecd3177964ae89be8bedf8ee7a0884affea0
| 634,844 |
from typing import Union
def expand_abnormality_questions(
answer: str, matched_word: str, modality: Union[None, str]
):
"""Create new questions for abnormality with given information.
Args:
answer: Original answer to the question
matched_word: The keyword which labeled the original question as abnormality
modality: name of the scan type present in original question.
Returns:
dict of generated questions including the original question
"""
binary, categorical = {}, {}
abnormal_keywords = [
"abnormal",
"abnormality",
"abnormalities",
"alarming",
"wrong",
]
if modality == "ct":
modality = "ct scan"
if modality == "pet":
modality = "pet scan"
if modality != None:
if answer in ["yes", "no"]:
if (answer == "no") and (matched_word not in abnormal_keywords):
flag = "abnormal"
elif (answer == "no") and (matched_word in abnormal_keywords):
flag = "normal"
elif (answer == "yes") and (matched_word not in abnormal_keywords):
flag = "normal"
elif (answer == "yes") and (matched_word in abnormal_keywords):
flag = "abnormal"
else:
raise ValueError(
"[Error @ `expand_abnormality_questions`] Something wrong with the conditions"
)
else:
flag = "abnormal"
categorical[f"what is most alarming about this {modality}?"] = answer
categorical[f"what is most alarming about the {modality}?"] = answer
categorical[f"what is abnormal in this {modality}?"] = answer
categorical[f"what is abnormal in the {modality}?"] = answer
categorical[f"what abnormality is seen in this {modality}?"] = answer
categorical[f"what abnormality is seen in the {modality}?"] = answer
categorical[f"what is the primary abnormality in this {modality}?"] = answer
categorical[f"what is the primary abnormality in the {modality}?"] = answer
binary[f"is this a normal {modality}?"] = "yes" if flag == "normal" else "no"
binary[f"is the {modality} normal?"] = "yes" if flag == "normal" else "no"
binary[f"is this {modality} normal?"] = "yes" if flag == "normal" else "no"
binary[f"does this {modality} look normal?"] = (
"yes" if flag == "normal" else "no"
)
binary[f"are there abnormalities in this {modality}?"] = (
"no" if flag == "normal" else "yes"
)
binary[f"is there an abnormality in the {modality}?"] = (
"no" if flag == "normal" else "yes"
)
binary[f"is there evidence of any abnormalities?"] = (
"no" if flag == "normal" else "yes"
)
binary[f"is there something wrong in the {modality}?"] = (
"no" if flag == "normal" else "yes"
)
else:
if answer in ["yes", "no"]:
if (answer == "no") and (matched_word not in abnormal_keywords):
flag = "abnormal"
elif (answer == "no") and (matched_word in abnormal_keywords):
flag = "normal"
elif (answer == "yes") and (matched_word not in abnormal_keywords):
flag = "normal"
elif (answer == "yes") and (matched_word in abnormal_keywords):
flag = "abnormal"
else:
raise ValueError(
"[Error @ `expand_abnormality_questions`] Something wrong with the conditions"
)
else:
flag = "abnormal"
categorical[f"what is most alarming about this image?"] = answer
categorical[f"what is most alarming about the image?"] = answer
categorical[f"what is abnormal in this image?"] = answer
categorical[f"what is abnormal in the image?"] = answer
categorical[f"what abnormality is seen in this image?"] = answer
categorical[f"what abnormality is seen in the image?"] = answer
categorical[f"what is the primary abnormality in this image?"] = answer
categorical[f"what is the primary abnormality in the image?"] = answer
binary[f"is this a normal image?"] = "yes" if flag == "normal" else "no"
binary[f"is the image normal?"] = "yes" if flag == "normal" else "no"
binary[f"is this image normal?"] = "yes" if flag == "normal" else "no"
binary[f"does this image look normal?"] = "yes" if flag == "normal" else "no"
binary[f"are there abnormalities in this image?"] = (
"no" if flag == "normal" else "yes"
)
binary[f"is there an abnormality in the image?"] = (
"no" if flag == "normal" else "yes"
)
binary[f"is there evidence of any abnormalities?"] = (
"no" if flag == "normal" else "yes"
)
binary[f"is there something wrong in the image?"] = (
"no" if flag == "normal" else "yes"
)
return {"binary": binary, "categorical": categorical}
|
34b58b9d87fcaab1df052fcf9069f6432d0fb006
| 400,986 |
def argv_to_module_arg_lists(args):
"""Converts module ldflags from argv format to per-module lists.
Flags are passed to us in the following format:
['global flag', '--module', 'flag1', 'flag2', '--module', 'flag 3']
These should be returned as a list for the global flags and a list of
per-module lists, i.e.:
['global flag'], [['flag1', 'flag2'], ['flag1', 'flag3']]
"""
modules = [[]]
for arg in args:
if arg == '--module':
modules.append([])
else:
modules[-1].append(arg)
return modules[0], modules[1:]
|
847597d09e56af4221792a9a176bddfea334e622
| 17,132 |
import ast
def _convert_to_expression(node):
""" convert ast node to ast.Expression if possible, None if not """
node = ast.fix_missing_locations(node)
if isinstance(node, ast.Module):
if len(node.body) != 1:
return None
if isinstance(node.body[0], ast.Expr):
expr = node.body[0]
# an expression that was compiled with mode='exec'
return ast.Expression(lineno=0, col_offset=0, body=expr.value)
if isinstance(node, ast.Expression):
return node
if isinstance(node, ast.expr):
return ast.Expression(lineno=0, col_offset=0, body=node)
if isinstance(node, ast.Expr):
return ast.Expression(lineno=0, col_offset=0, body=node.value)
|
a24c8c304b21d2a4f90e7946267620a853db1a4b
| 647,305 |
def read_lines(file_path):
""" Read lines from the file and return then as a list. """
lines = []
with open(file_path, 'r', encoding='utf8') as asm_file:
lines = asm_file.readlines()
return lines
|
a6f97f60edefac2207369624c4421829b8637fbe
| 677,239 |
def _ev(x, pdist):
"""
expectation value via x, p(x)
"""
ev = 0
for idx, p in enumerate(pdist):
ev += x[idx] * pdist[idx]
return ev
|
b126c712d81f124e3140f84d44cf75c0ad46034e
| 150,855 |
def get_port_information(vips, matching_no):
"""
Get information on what sampling ports are used in a match, and check that
everything is set up correctly.
Return the number of the first port, and a boolean indicating if the second port is used.
"""
sample_port_1 = int(vips.getValue(f'Template matching {matching_no} - first sampling port'))
use_port_2 = vips.getValue(f'Template matching {matching_no} - match on two ports')
sample_port_2 = sample_port_1 + 1 if use_port_2 else 0
if sample_port_2 > 8:
raise ValueError(f'Cannot perform template matching on two ports if the first is port 8!')
# Matching can only happen on ports with sampling activated
if sample_port_1 not in vips.sampling_ports or sample_port_2 not in [0, *vips.sampling_ports]:
raise ValueError(f'Template matching {matching_no}: '
f'Sampling needs to be enabled on the ports set as sampling ports!')
return sample_port_1, use_port_2
|
b338c9413f0e9056c7c6cc7f3048b0318b59c8d9
| 565,376 |
def check_list(in_lst, dtype=str):
"""Helper function to ensure input is a list of correct data type."""
assert isinstance(in_lst, (list, dtype, tuple))
if isinstance(in_lst, list):
for itm in in_lst:
assert isinstance(itm, dtype)
else:
in_lst = [in_lst]
return in_lst
|
597c7cbde33dd0dd53ede958338b8bd71ca14458
| 123,930 |
def manifest_to_file_list(manifest_fn):
"""
Open a manifest file and read it into a list.
Entries in the list are relative, i.e. no leading
slash.
manifest_fn -- the manifest file to read
"""
image_manifest_list = []
with open(manifest_fn) as image:
image_manifest_list = [x[1:] for x in image.read().splitlines()]
return image_manifest_list
|
982f02e0b00fad20af8d50d44673d65d9bba5a37
| 7,438 |
def fp(v, g, x):
"""FP-neuron function, as described in Equation 7 in the Paper.
Parameters
----------
v : np.array
filter vector 1
g : np.array
filter vector 2
x : np.array
position
Returns
-------
float
function value
"""
sp_vx = (v * x).sum(-1)
sp_gx = (g * x).sum(-1)
return sp_vx * sp_gx
|
d24d445ab8238e4c78cfec9347bd66065245f5b4
| 178,861 |
def are_all_letter_in_grid(grid, text_box):
"""
Args:
grid (Grid): either the main grid or the temporary grid
text_box (TextBox): the text box where the player/user types the words/characters
Returns:
boolean: True if all letters are in the grid. Otherwise, False
"""
for char in text_box.text:
found = False
for row in grid.cubes:
if found: break
for cube in row:
if char == cube.text_str:
found = True
break
if not found: return False
return True
|
82559cb858aee8c34e93d606529d57f712c172bb
| 420,390 |
def integers_from_file(filename):
""" Return a list of the integers in FILENAME, assuming one per line. """
integers = []
with open(filename,"r") as f:
for line in f:
integers.append(int(line))
return integers
|
885406ff59a760bcfc7c2b800a34044da3855150
| 256,122 |
def dunder_get(_dict, key):
"""Returns value for a specified dunderkey
A "dunderkey" is just a fieldname that may or may not contain
double underscores (dunderscores!) for referrencing nested keys in
a dict. eg::
>>> data = {'a': {'b': 1}}
>>> nesget(data, 'a__b')
1
key 'b' can be referrenced as 'a__b'
:param _dict : (dict)
:param key : (str) that represents a first level or nested key in the dict
:rtype : (mixed) value corresponding to the key
"""
parts = key.split('__', 1)
key = parts[0]
try:
result = _dict[key]
except KeyError:
return None
except TypeError:
try:
result = getattr(_dict, key)
except AttributeError:
return None
return result if len(parts) == 1 else dunder_get(result, parts[1])
|
3cfdccd5cf78c3bcbd7942e836583fa68ef656b7
| 379,072 |
import calendar
def days_in_year(year: int) -> int:
"""Returns the number of days in the year
Args:
year (int): The year.
Returns:
int: The number of days in the year.
"""
return 366 if calendar.isleap(year) else 365
|
b8a03d7bca79c3513ac4d278028b6c65b4fc3329
| 651,255 |
from typing import Dict
def values_from_bucket(bucket: Dict[str, float]) -> set:
"""Get set of price formatted values specified by min, max and interval.
Args:
bucket: dict containing min, max and interval values
Returns:
Formatted set of values from min to max by interval
"""
rng = [int(100 * bucket[_k]) for _k in ('min', 'max', 'interval')]
rng[1] += rng[2] # make stop inclusive
return {_x / 100 for _x in range(*rng)}
|
848af64d3396cc77c3fa109821d72eb80961eec0
| 15,310 |
def formatfloat(x):
"""Convert x to a %.3f-format string."""
ret = "%.3f" % float(x)
if float(x) >= 0.0:
return f" {ret}"
return ret
|
a70e9550d09f44bff44d22987c35be2233e4af56
| 612,502 |
def isHappy(n):
"""
Is happy takes in a number and returns True if it is a happy number, False otherwise. A happy number
is a number defined by the following process: Starting with any positive integer, replace the number by the
sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay),
or it loops endlessly in a cycle which does not include 1.
"""
seen = {n: 1}
while True:
new_sq = sum([int(d) ** 2 for d in str(n)])
if n == 1:
return True
if new_sq in seen:
return False
else:
n = new_sq
seen[n] = 1
|
8c313a114d6c381d1a6ea05c520f75a8be368f8b
| 491,294 |
def isnpdot(val):
"""
check if the entry starts with 'np.'
Parameters
----------
val: str
a string entry
Returns
-------
bool
True if the entry is a string and the first three characters are 'np.',
False otherwise
"""
if isinstance(val, str):
if val[0:3] == 'np.':
return True
else:
return False
else:
msg = 'entry must be string'
raise ValueError(msg)
|
c0ad9f9f7847d9510aa38837d34e230388f03abc
| 233,741 |
import json
def get_best_hyperparams(hyperparams_dict, fit_params_dict, best, file_name=None):
"""
Helper function to extract the numerical values of best hyperparameters from hyperopt into a more easily usable format.
:param hyperparams_dict: Dictionary of hyperparameter values
:param fit_params_dict: Dictionary of fit parameter values
:param best: The best hyperparameters as returned by hyperopt
:param file_name: Directory plus name of the file you want to save the best parameters to. File name must end in .json as this is the expected output format
:return: Parameter dictionary. Contains both model hyperparameters and epochs parameter for model fit.
"""
# Extract hyperparameters for the model
best_params = {}
for key, val in best.items():
if key in hyperparams_dict:
input_ = hyperparams_dict[key]
if input_[0] == 'choice':
best_params[key] = input_[1][val]
else:
best_params[key] = val
# The only other parameter I need to get out is the number of epochs to train for.
# I'll put it all into the best_params dictionary, but I'll need to pop it out before defining the model
best_params['num_epochs'] = fit_params_dict['num_epochs'][1][best['num_epochs']]
if file_name is not None:
json_out =json.dumps(best_params)
f = open(file_name, "w")
f.write(json_out)
f.close()
return best_params
|
1bac0d463ba5cf5a69912673d1235794d9a448ff
| 26,226 |
import hashlib
def sha256_encode(text):
"""
Returns the digest of SHA-256 of the text
"""
_hash = hashlib.sha256
if type(text) is str:
return _hash(text.encode('utf8')).digest()
elif type(text) is bytes:
return _hash(text).digest()
elif not text:
# Generally for calls where the payload is empty. Eg: get calls
# Fix for AttributeError: 'NoneType' object has no attribute 'encode'
return _hash("".encode('utf8')).digest()
else:
return _hash(str(text).encode('utf-8')).digest()
|
88a39fb82beefa4f89abda7f995347ca16573d28
| 686,371 |
def redcap_event_to_vbr_protocol(event_name: str) -> int:
"""Map redcap event name to VBR protocol."""
# NOTE - this must be manually synced with src/scripts/data/protocol.csv
events = {
"informed_consent_arm_1": 2,
"baseline_visit_arm_1": 3,
"6wks_postop_arm_1": 30,
"3mo_postop_arm_1": 31,
"event_1_arm_1": 50,
}
try:
return events[event_name]
except KeyError:
raise ValueError("Unknown redcap event name: %s", event_name)
|
8c1198c7037d29123b86ac927fed7c931022a99b
| 276,910 |
def followers_count(user):
"""
Returns user followers count
:param user: An User instance
"""
if not user or user.is_anonymous():
return 0
return user.followers()
|
8c386bc2199661b2a771371a0e7c1833569a71c9
| 693,383 |
def divisors_list(n):
""" Returns the list of divisors of n """
return [d for d in range(1, n // 2 + 1) if n % d == 0] + [n]
|
b440cac93ce4ebc787d3dbc3760645a2a5d0d960
| 269,279 |
def try_clone(repo, path):
"""
Clone a repository and wait for it to finish
:param repo: repository
:param path:
:return: returncode
"""
return repo.aclone(path).wait()
|
d9cf7388fc9c2eae29cdb87a9f86264a1c32e284
| 149,618 |
import struct
def readu16(file):
""" Reads two bytes bytes from the file, treating them as a 16 bit unsigned integer."""
data = file.read(2)
return struct.unpack("H", data)[0]
|
19b3b476dbbfea96c5259c99ac8dee1191d6bc9d
| 464,900 |
import json
def load_json(input_file):
"""
Load a JSON file into a JSON object
:param input_file:
:type input_file:
:return: loaded JSON object
:rtype: list
"""
with open(input_file, 'r') as f:
output_json = json.load(f)
return output_json
|
48a47abfafb99e3cf20935f1a2fc1419fd05d765
| 156,357 |
def diff(new, old):
"""
Compute the difference in items of two revisioned collections. If only
`new' is specified, it is assumed it is not an update. If both are set,
the removed items are returned first. Otherwise, the updated and edited
ones are returned.
:param set new: Set of new objects
:param set old: Set of old objects
:return: A tuple consisting of `(added, removed, is_update)`.
:rtype: tuple
"""
if old is not None:
is_update = True
removed = set(new.removed(old))
updated = set(new.updated(old))
else:
is_update = False
updated = new
removed = set()
return updated, removed, is_update
|
49aa2a4eb770ba812f5bf81131d7bad6fc01abde
| 211,765 |
import functools
import warnings
def deprecated(since=None, until=None, reason=None):
"""
Warns when a deprecated callable is used.
Examples:
```python
from flashback import deprecated
@deprecated
def func():
pass
func()
#=> func is deprecated.
@deprecated(since="v2", until="v3", reason="it has moved")
def func():
pass
func()
#=> func is deprecated since v2 and will be removed in v3 because it has moved.
```
Params:
since (str): the date/version the callable was deprecated
until (str): the date/version the callable will be removed
reason (str): the reason of the deprecation
Returns:
Callable: a wrapper used to decorate a callable
"""
def wrapper(func):
message = f"{func.__name__} is deprecated"
if since:
message += f" since {since}"
if until:
message += f" and will be removed in {until}"
if reason:
message += f" because {reason}."
else:
message += "."
doc = func.__doc__ or ""
if len(doc) > 0:
doc += "\n\n"
doc += f".. deprecated:: {message}"
func.__doc__ = doc
@functools.wraps(func)
def inner(*args, **kwargs):
warnings.warn(message, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return inner
return wrapper
|
82ceee1271ba6daa2a5217a0b914819eacb0aa05
| 220,825 |
import json
def mock_requests_get(*args, **kwargs):
""" Mock requests.get() function for testing meme parsing functions.
"""
class MockResponse(object):
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
url = args[0]
target_url = "http://version1.api.memegenerator.net/" \
"Instances_Select_ByPopular"
with open('./test/memegenerator_sample.json') as data_file:
mock_json_1 = json.load(data_file)
if url == target_url:
return MockResponse(mock_json_1, 200)
else:
return MockResponse('This is a mocking function.', 200)
|
fb5f03ba2bb30093b1565e2419f354c76d4b3ca8
| 210,269 |
def cast_byte_timeout(arg):
"""
Приведение времени в секундах к коду таймаута.
>>> cast_byte_timeout(0.0)
0
>>> cast_byte_timeout(0.15)
150
>>> cast_byte_timeout(0.30)
151
>>> cast_byte_timeout(15.0)
249
>>> cast_byte_timeout(30.0)
250
>>> cast_byte_timeout(105.0)
255
>>> cast_byte_timeout(0.16) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Неверное значение таймаута - 0.16 с.
Значение должно соответсвовать одному из диапазонов: 0-0.15 с., 0.30-15 с., 30-105 с.
>>> cast_byte_timeout(0.31) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Неверное значение таймаута - 0.31 с.
Значение в диапазоне 0.30-15 с. должно быть кратно 0.15 с.
>>> cast_byte_timeout(31.0) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Неверное значение таймаута - 31.0 с.
Значение в диапазоне 30-105 с. должно быть кратно 15 с.
:type arg: float
:param arg: время в секундах
:rtype: int
:return: код таймаута
"""
if 0.0 <= arg <= 0.15:
return int(arg * 1000)
elif 0.30 <= arg <= 15:
div = arg / 0.15
if not div.is_integer():
raise ValueError(
'Неверное значение таймаута - {} с. '
'Значение в диапазоне 0.30-15 с. должно быть кратно 0.15 с.'.format(arg)
)
return int(div) + 149
elif 30 <= arg <= 105:
div = arg / 15
if not div.is_integer():
raise ValueError(
'Неверное значение таймаута - {} с. '
'Значение в диапазоне 30-105 с. должно быть кратно 15 с.'.format(arg)
)
return int(div) + 248
else:
raise ValueError(
'Неверное значение таймаута - {} с. '
'Значение должно соответсвовать одному из диапазонов: '
'0-0.15 с., 0.30-15 с., 30-105 с.'.format(arg)
)
|
7d73215777318414bd510721ef288b8b94b473e2
| 364,979 |
def player_last_awarded_submission(profile):
"""Returns the last awarded submission date for the profile."""
entry = profile.scoreboardentry_set.order_by("-last_awarded_submission")
if entry:
return entry[0].last_awarded_submission
else:
return None
|
15e3ec8b0b0fc218c89a3dbe0484b30191dd399f
| 640,276 |
def _hemi_direction(hemisphere):
"""Return `1` for 'north' and `-1` for 'south'"""
return {'north': 1, 'south': -1}[hemisphere]
|
8305bafc5b3d4fb6039d28a2c7aa9fbf3d62ecb7
| 581,796 |
import re
def nonschema_title_formatter(title):
"""
Formatting a path, that is absent in schema, to human-readable form
:param title: str
:return: formatted title
>>> nonschema_title_formatter('legalEntityTypeDetail')
'Legal Entity Type Detail'
>>> nonschema_title_formatter('fuenteFinanciamiento')
'Fuente Financiamiento'
>>> nonschema_title_formatter('Óóó-Ñññ_Úúú')
'Óóó Ñññ Úúú'
"""
title = title.replace("_", " ").replace("-", " ")
title = re.sub(r"(?<![A-Z])(?<!^)([A-Z])", r" \1", title)
title = title.replace(" ", " ").replace("/", ": ")
if title.startswith(": "):
title = title[2:]
title = title.title()
return title
|
b04d359280f4a8593a6f48081b29b0acd9e21641
| 463,325 |
def cloner_tableau(arr: list) -> list:
"""
Description:
Permet de cloner un tableau dans un tableau.
L'opérateur ( * ) permet de déballer un itérable pour en créer un nouveau.
Paramètres:
arr: {list} -- Tableau à cloner.
Retourne:
{list} -- le tableau cloné.
Exemple:
>>> arr = [0, 0, 0]]
>>> cloner_tableau(arr)
[0, 0, 0, [0, 0, 0]]
"""
return [*arr, arr]
|
eae9b4c6f127827f377050f3328f2aeacc38bf05
| 291,876 |
import uuid
import base64
def cspvalues(ui):
"""Obtain the Content-Security-Policy header and nonce value.
Returns a 2-tuple of the CSP header value and the nonce value.
First value is ``None`` if CSP isn't enabled. Second value is ``None``
if CSP isn't enabled or if the CSP header doesn't need a nonce.
"""
# Without demandimport, "import uuid" could have an immediate side-effect
# running "ldconfig" on Linux trying to find libuuid.
# With Python <= 2.7.12, that "ldconfig" is run via a shell and the shell
# may pollute the terminal with:
#
# shell-init: error retrieving current directory: getcwd: cannot access
# parent directories: No such file or directory
#
# Python >= 2.7.13 has fixed it by running "ldconfig" directly without a
# shell (hg changeset a09ae70f3489).
#
# Moved "import uuid" from here so it's executed after we know we have
# a sane cwd (i.e. after dispatch.py cwd check).
#
# We can move it back once we no longer need Python <= 2.7.12 support.
# Don't allow untrusted CSP setting since it be disable protections
# from a trusted/global source.
csp = ui.config(b'web', b'csp', untrusted=False)
nonce = None
if csp and b'%nonce%' in csp:
nonce = base64.urlsafe_b64encode(uuid.uuid4().bytes).rstrip(b'=')
csp = csp.replace(b'%nonce%', nonce)
return csp, nonce
|
cf967415321b31d6f9c802a794c5177942173f0c
| 210,715 |
import re
def remove_regex_pattern_from_keys(d: dict, pattern_to_remove: str, **regex_compile_kwargs) -> dict:
"""Remove `pattern_to_remove` from all keys in `d`.
Return a new dict with the same values as `d`, but where the key names
have had `pattern_to_remove` removed.
"""
new_dict = {}
regex = re.compile(pattern_to_remove, **regex_compile_kwargs)
for old_key, value in d.items():
new_key = regex.sub(string=old_key, repl="")
new_dict[new_key] = value
return new_dict
|
8ae1918dbbc4f6054e95606a2019d7409749c71f
| 387,931 |
def _stripBold(s):
"""Returns the string s, with bold removed."""
return s.replace('\x02', '')
|
da94e26846091ac3c4a501b86cd00a18bf06b1c1
| 689,640 |
import itertools
def get_all_combos(list_of_lists):
"""
Get all the position-specific combos of a list of lists.
This is taken directly from Stack Overflow:
http://stackoverflow.com/questions/798854/all-combinations-of-a-list-of-lists
:param list_of_lists: A list of lists we would like combos of.
:rtype: list[list]
"""
return list(itertools.product(*list_of_lists))
|
aee5c69aa2e86b6362fa885b7575ebf36c4c81ba
| 162,751 |
def convertToIndexList(vertList):
""" convert components given to a list of indices
:param vertList: list of components
:type vertList: list
:return: list of integers representing the components values
:rtype: list
"""
indices = []
for i in vertList:
index = int(i[i.index("[") + 1: -1])
indices.append(index)
return indices
|
ad45ce456bae4c4caba9a60d101a4f853121a08f
| 414,782 |
def sign(x):
"""
Objective: take the sign of x
e.g.,
>>>sign(-1.2)
-1
>>>sign(5)
1
"""
if x>0: return 1
elif x<0: return -1
else: return 0
|
795b8813b95321168de5b7fc5d84b5b474325062
| 586,096 |
def partition(seq, fn):
"""Partitions one sequence into two sequences, by testing whether each element
satisfies fn or not. """
pos, neg = [], []
for elt in seq:
if fn(elt):
pos.append(elt)
else:
neg.append(elt)
return pos, neg
|
45964faef26715d0edcd36b7dd102e642e9e7154
| 239,893 |
def with_precision(val, precision):
"""
Round `val` to the closest multiple of `precision`.
"""
return float(round(val / precision) * precision)
|
b83c1373147368ee35cf27db1926449c8fad37a2
| 363,766 |
def extract_code(lines, node, lstrip="", ljoin="\n", strip=""):
"""Get corresponding text in the code
Arguments:
lines -- code splitted by linebreak
node -- PyPosAST enhanced node
Keyword Arguments:
lstrip -- During extraction, strip lines with this arg (default="")
ljoin -- During extraction, join lines with this arg (default="\n")
strip -- After extraction, strip all code with this arg (default="")
"""
first_line, first_col = node.first_line - 1, node.first_col
last_line, last_col = node.last_line - 1, node.last_col
if first_line == last_line:
return lines[first_line][first_col:last_col].strip(strip)
result = []
# Add first line
result.append(lines[first_line][first_col:].strip(lstrip))
# Add middle lines
if first_line + 1 != last_line:
for line in range(first_line + 1, last_line):
result.append(lines[line].strip(lstrip))
# Add last line
result.append(lines[last_line][:last_col].strip(lstrip))
return ljoin.join(result).strip(strip)
|
2a468e136f822db96963247ea86877b1e3c872f9
| 165,890 |
def is_directory_automatically_created(folder: str):
"""
Verifies the name of the directory -> if it contains a month it returns True, otherwise False.
"""
months = [
"(01)Janvier",
"(02)Fevrier",
"(03)Mars",
"(04)Avril",
"(05)Mai",
"(06)Juin",
"(07)Juillet",
"(08)Aout",
"(09)Septembre",
"(10)Octobre",
"(11)Novembre",
"(12)Decembre",
]
return any(month in folder for month in months)
|
7e78466a662d7bcd899d1a15994bf03b2cf8c3da
| 101,869 |
def string_char(char):
"""Turn the character into one that can be part of a filename"""
return '_' if char in [' ', '~', '(', ')', '/', '\\'] else char
|
e318811d68858ff090447fa0e0e0aab9133461c8
| 60,823 |
def calculate_degrees_between_angle(norm_1, norm_2, angle):
"""
Takes two normalized values in a range of [0, 1] and calculates a proportion inside an angle.
:param norm_1: First normalized value.
:param norm_2: Second normalized value.
:param angle: Angle in which to calculate the proportion.
:return: Calculated angle.
"""
norm_total = norm_1 + norm_2
min_norm = min([norm_1, norm_2])
i = int(min_norm == norm_2)
x = angle * min_norm / norm_total
if i == 0:
return angle - x
else:
return x
|
cefdb4f09d24cf8db3274f07e04a63dba6555864
| 381,601 |
from typing import Dict
from typing import Any
from typing import Set
def scalar_training_metrics_names(exp: Dict[str, Any]) -> Set[str]:
"""
Given an experiment history, return the names of training metrics
that are associated with scalar, numeric values.
This function assumes that all batches in an experiment return
consistent training metric names and types. Therefore, the first
non-null batch metrics dictionary is used to extract names.
"""
for trial in exp["trials"]:
for step in trial["steps"]:
metrics = step.get("metrics")
if not metrics:
continue
return set(metrics.get("avg_metrics", {}).keys())
return set()
|
16290499e6f100fa7b81e4858f85accc60562922
| 377,578 |
import uuid
def create_id() -> str:
"""
Create a unique ID, UUIDv4 style
Returns:
str:
"""
return str(uuid.uuid4())
|
ab9e629d032e23f581dd0bf0602599059669a461
| 501,312 |
def v_first_group_option(group_options):
"""
Returns the first value in a menu-select that uses option_groups. This is
useful when you create an options structure and you want the first item as
the placeholder or the default selected value.
Parameters
----------
group_options : list[dict]
The this of group options
Returns
-------
str
The text of the first option
"""
return group_options[0]['options'][0]['text']['text']
|
99250919c116c2a10a9df736122d7bafbc8bcc89
| 282,702 |
def calc_text_width(widget, text):
"""Estimate the width that the given text would take within the widget."""
return (widget.fontMetrics().width(text) +
widget.fontMetrics().width('M') +
widget.contentsMargins().left() +
widget.contentsMargins().right())
|
977914f060d4029f1b6c7d0ffe947fde256b9829
| 531,554 |
def docs_append_to_section(docstring, section, add):
"""Append extra information to a specified section of a docstring.
Parameters
----------
docstring : str
Docstring to update.
section : str
Name of the section within the docstring to add to.
add : str
Text to append to specified section of the docstring.
Returns
-------
str
Updated docstring.
Notes
-----
This function assumes numpydoc documentation standard.
"""
return '\n\n'.join([split + add if section in split else split \
for split in docstring.split('\n\n')])
|
c4c58d809aaeff2a9f68c193894dae155fa7a9dd
| 662,006 |
import re
def parse_show_interface_mgmt(raw_result):
"""
Parse the 'show interface mgmt' command raw output.
:param str raw_result: vtysh raw result string.
:rtype: dict
:return: The parsed result of the show interface mgmt command in a \
dictionary of the form:
::
{
'address_mode': 'dhcp',
'ipv4': '20.1.1.2/30',
'default_gateway_ipv4': '20.1.1.1',
'ipv6': '2011::2/64',
'ipv6_link_local': 'fe80::4a0f:cfff:feaf:6358/64',
'default_gateway_ipv6': '2011::1',
'primary_nameserver': '232.54.54.54',
'secondary_nameserver': '232.54.54.44'
}
"""
show_re = (
r'\s*Address Mode\s*:\s*(?P<address_mode>\S+)\s*'
r'\s*IPv4 address/subnet-mask\s*:\s*(?P<ipv4>[0-9./]+)?\s*'
r'\s*Default gateway IPv4\s*:\s*(?P<default_gateway_ipv4>[0-9.]+)?\s*'
r'\s*IPv6 address/prefix\s*:\s*(?P<ipv6>[0-9a-f:/]+)?\s*'
r'\s*IPv6 link local address/prefix\s*:\s*'
r'(?P<ipv6_link_local>[0-9a-f:/]+)?\s*'
r'\s*Default gateway IPv6\s*:\s*'
r'(?P<default_gateway_ipv6>[0-9a-f:]+)?\s*'
r'\s*Primary Nameserver\s*:\s*(?P<primary_nameserver>[0-9.:a-f]+)?\s*'
r'\s*Secondary Nameserver\s*:\s*(?P<secondary_nameserver>[0-9.:a-f]+)?\s*' # noqa
)
re_result = re.search(show_re, raw_result)
assert re_result
result = re_result.groupdict()
return result
|
6944b872a6672a51237645bf58bbeaece534a756
| 586,601 |
import string
def idx_to_label(n: int) -> str:
"""Convert a number to a corresponding letter in the alphabet.
In case the number is higher than the number of letters in the english alphabet, then
a second character is appended.
>>> For instance:
>>>idx_to_label(0)
>>> 'a'
>>> idx_to_label(25)
>>> 'z'
>>> idx_to_label(26)
>>> 'aa'
This function was inspired after:
https://stackoverflow.com/questions/2267362/how-to-convert-an-integer-to-a-string-in-any-base
:param n: the input number
:return: the corresponding string
"""
alphabet_size = 26
digits = []
n += 1
while n:
digits.append(int(n % alphabet_size - 1))
n //= alphabet_size
digits.reverse()
return ''.join([string.ascii_lowercase[i] for i in digits])
|
67e4bba437016dddb6d90f286e1d65f2bfec8caf
| 16,728 |
def unescape(text):
"""
Returns the given text with ampersands, quotes and angle brackets decoded
for use in URLs.
This function undoes what django.utils.html.escape() does
"""
return text.replace(''', "'").replace('"', '"').replace('>', '>').replace('<', '<' ).replace('&', '&')
|
a2de8102996f220996b34ae9228ece9cc8424e97
| 512,463 |
from typing import Union
def listify(data: Union[str, int, float, list, set, tuple, bool]) -> list:
"""
A convenience function that converts data into a list unless it's already one.
Bool is a subset of int.
:param data: Data to turn into a list
:return: list
"""
if isinstance(data, list):
return data
elif isinstance(data, (set, tuple)):
return list(data)
else:
return [data]
|
b45b15ffcbbd5359cf4d11e6d1c9e619220f308c
| 419,840 |
from typing import Iterable
from typing import Generator
def listify(x, *args):
"""Convert `x` to a `list`."""
if args:
x = (x,) + args
if x is None:
result = []
elif isinstance(x, list): result = x
elif isinstance(x, str) or hasattr(x, "__array__") or hasattr(x, "iloc"):
result = [x]
elif isinstance(x, (Iterable, Generator)):
result = list(x)
else:
result = [x]
return result
|
532be9b805c8e503d0873eeaa6df5cd635317216
| 358,070 |
def createTable(values):
"""Creates a table from a list of one dimensional dicts. Keys of the first dict will be used as headings.
:param values list woth dicts that only have one layer of dicts (subdicts lead to ugly results)
and each dict needs to have the same keys or they won't be displayed or an error will be raised
:returns: HTML table content that can be put into a HTML table tag
"""
# table body
tableMain = """
<tr>
%(heading)s
</tr>
%(content)s
"""
# get all the keys from
keys = [x for x in values[0].keys()]
headings = ""
for key in keys:
headings += "<th>%s</th>" % key
content = ""
try:
for dataset in values:
content += "<tr>\n"
for key in keys:
if key == "bbox":
content += " <td class='bbox'>%s</td>\n" % dataset[key]
else:
content += " <td>%s</td>\n" % dataset[key]
content += "</tr>\n"
except KeyError as e:
raise KeyError("dicts must have the same keys")
table = tableMain % {"heading": headings, "content": content}
return table
|
71bc2265363fc93a54928683a1d106c13f9efe1e
| 106,132 |
def parse_index_specification(spec):
"""Parses an index specification used as arguments for the -f
and -F options."""
result = []
for part in spec.split(","):
part = part.strip()
if "-" in part:
lo, hi = list(map(int, part.split("-", 1)))
result.extend(range(lo, hi + 1))
else:
result.append(int(part))
return result
|
e61bf0ac9f5313106569f0ad1fe364ed5e47f1c1
| 431,430 |
def find_metadata_item(metadata_items, key_name):
""" Finds a metadata entry by the key name. """
for item in metadata_items:
if item['key'] == key_name:
return item
return None
|
3f5f9a7eb36825fb399588c222f651332abe2a2f
| 222,893 |
import re
def is_valid_zcs_container_id(zcs_container_id):
"""
Validates Zadara Container Services (ZCS) container IDs, also known as the
ZCS container "name". A valid ZCS container name should look like:
container-00000001 - It should always start with "container-" and end with
8 hexadecimal characters in lower case.
:type zcs_container_id: str
:param zcs_container_id: The ZCS container name to be validated.
:rtype: bool
:return: True or False depending on whether zcs_container_id passes
validation.
"""
if zcs_container_id is None:
return False
match = re.match(r'^container-[0-9a-f]{8}$', zcs_container_id)
if not match:
return False
return True
|
9b8ef0e6281def4d09e787123e9582c9c4be9c84
| 44,675 |
def get_cell_corners(cell:list, width:float, height:float, rel_loc='mid') -> list:
"""
Returns the corners of a cell (x1, x2, y1, y2).
:param cell: A tuple of lat/Y,lon/X-coordinates of the cell
:param width: Width of the cell
:param height: Height of the cell
:param rel_loc: Which position of the cell does `cell` indicate: center (mid)
:returns: y1, y2, x1, x2
:Example:
"""
y, x = cell
if rel_loc == 'mid':
x1 = x - width/2
x2 = x + width/2
y1 = y - height/2
y2 = y + height/2
elif rel_loc == 'ul':
x1 = x
x2 = x + width
y1 = y - height
y2 = y
else:
raise AttributeError('Invalid rel_loc. Supported: mid, ul')
corners = [
[y2, x1],
[y2, x2],
[y1, x2],
[y1, x1],
]
return corners
|
32e041dbabb4b968e799bdb65e995a33ae69d55e
| 120,465 |
def point_distance_wrapper(line:list):
"""
wrapper function to return a function that finds the distance between point and the line
:param line: line to be compared to (gradient, y_intercept)
:return: function that takes in a point and finds the distance from the input point to the line
"""
def point_distance(point:list):
# calculates horizontal distance of point from line
return abs(point[0] - (point[1] - line[1]) / line[0])
return point_distance
|
0f08668efcb3107fd190169151580cd7874d1bce
| 434,153 |
def rpc_get_methods(client, current=None, include_aliases=None):
"""Get list of supported RPC methods.
Args:
current: Get list of RPC methods only callable in the current state.
include_aliases: Include aliases in the list with RPC methods.
"""
params = {}
if current:
params['current'] = current
if include_aliases:
params['include_aliases'] = include_aliases
return client.call('rpc_get_methods', params)
|
7473720d7bfe354300176a107660ad4bdd4d4a88
| 664,891 |
def calc_sums(variable):
"""
Aggregates the instance dimension of the variable tensor object (see documentation in ../process_raw_data.py)
by summing up the values of the instances.
note: this function is called after the database check -> all values are available and valid.
:param variable: data structure as defined (int or float as value type)
:return: sums_list (one sum for each iteration; float), n (int)
"""
any_instance = next(iter(variable))
iterations = len(variable[any_instance]['values'])
n = len(variable)
sums = [0.0] * iterations
for instance_name, instance in variable.items():
for i, value in enumerate(instance['values']):
sums[i] += float(value)
return sums, n
|
b88a4352926b23dd27ea598d746619e8c9911ada
| 620,083 |
def request_too_large(e):
"""Generates a valid ELG "failure" response if the request is too large"""
return {'failure':{ 'errors': [
{ 'code':'elg.request.too.large', 'text':'Request size too large' }
] } }, 400
|
eac64f3ab4316a59e23c60a0d29146de953a615d
| 83,439 |
import re
def create_rules(fileObj):
"""Creates a dictionary of key - values pairs. Used by replaceChars
function
Args:
fileObj: A file object with lines in the following format (no spaces)
old_char=new_char
Returns:
A python dictionary with key - value pairs
"""
rules={}
with open (fileObj, "r") as rules_file:
for line in rules_file:
rules_list = line.split('=')
rules[rules_list[0]] = re.sub(r'\n$', '', rules_list[1])
#print(rules)
return rules
|
bd20cad1022bc9691326f6e33acd3421755afb81
| 147,821 |
def remove_suffix(string: str, suffix: str) -> str:
"""Remove suffix from the end of the string if present."""
if suffix and string.endswith(suffix):
return string[:-len(suffix)]
return string[:]
|
a3f4ff3c0299f91302b29f36a43c43f94dc53e4d
| 433,016 |
def demandNameItem(listDb,phrase2,mot):
"""
put database name of all items in string to insert in database
listDb: list with datbase name of all items
phrase2: string with database name of all items
mot: database name of an item
return a string with database name of all items separated with ','
"""
for i in range(len(listDb)):
mot = str(listDb[i])
phrase2 += mot
if not i == len(listDb)-1:
phrase2 += ','
return phrase2
|
67af8c68f0ba7cd401067e07c5de1cd25de9e66c
| 590 |
def add_data_args(parser):
"""Train/valid/test data arguments."""
group = parser.add_argument_group('data', 'data configurations')
group.add_argument('--train_h5_path', type=str, help='Pathway to the train H5 file.')
group.add_argument('--val_h5_path', type=str, help='Pathway to the val H5 file.')
group.add_argument('--test_h5_path', type=str, help='Pathway to the test H5 file.')
group.add_argument('--policy_type',
type=str,
choices=['TreeGatePolicy', 'TBranT', 'BranT'],
help='Type of policy to use.')
return parser
|
90fb746fad6d80e319fce51c6be221ba8f79db37
| 530,234 |
from typing import List
def compute_ap(pos: List[str], amb: List[str], ranked_list: List[str]):
"""Compute average precision against a retrieved list of images. There are some bits that
could be improved in this, but is a line-to-line port of the original C++ benchmark code.
Args:
pos (List[str]): List of positive samples. This is normally a conjugation of
the good and ok samples in the ground truth data.
amb (List[str]): List of junk samples. This is normally the junk samples in
the ground truth data. Omitting this makes no difference in the AP.
ranked_list (List[str]): List of retrieved images from query to be evaluated.
Returns:
float: Average precision against ground truth - range from 0.0 (worst) to 1.0 (best).
"""
intersect_size, old_recall, ap = 0.0, 0.0, 0.0
old_precision, j = 1.0, 1.0
for e in ranked_list:
if e in amb:
continue
if e in pos:
intersect_size += 1.0
recall = intersect_size / len(pos)
precision = intersect_size / j
ap += (recall - old_recall) * ((old_precision + precision) / 2.0)
old_recall = recall
old_precision = precision
j += 1.0
return ap
|
ea653c614990b7b588e2e3413fc5b3dd278c4092
| 612,851 |
def get_call_name(call):
"""Get name of a `mock.call` function."""
call_str = str(call)
call_str = call_str[call_str.find('call.') + len('call.'):]
call_str = call_str[:call_str.find('(')]
return call_str
|
b08974edfcea74adb65bd0d98fddcb313ddf8da6
| 517,887 |
def update_yaml_with(target, source):
"""Merge the source YAML tree into the target. Useful for merging config files."""
if isinstance(target, dict) and isinstance(source, dict):
for k, v in source.items():
if k not in target:
target[k] = v
else:
target[k] = update_yaml_with(target[k], v)
return source
|
244b81220622a1ebd308519e730111a78f32cce1
| 441,523 |
def __inverse_name(name):
"""get the name of the inversed registration pair"""
name = name + '_inverse'
return name
|
c1a8321380aba1caf65553646d313d5092d0ddc8
| 179,671 |
def create_chunks(ascdata, max_bytes_in_barcode):
"""Chunk ascdata into a list of blocks with size max_bytes_in_barcode or less.
Only specific ASCII characters are allowed in ascdata so we don't worry about Unicode.
Each block begins with ^<sequence number><space> (1-based).
This allows to easily put them back together in the correct order."""
# Slicing ascdata reduces processing time to about 5% compared to handling each char separately
chunks = []
chunk_idx = 0
while chunk_idx < len(ascdata):
chunkdata = "^" + str(len(chunks)+1) + " "
charnum = max_bytes_in_barcode - len(chunkdata)
chunks.append(chunkdata + ascdata[chunk_idx:chunk_idx+charnum])
chunk_idx += charnum
return chunks
|
8505a3ebfdcd286ed7472ab23e57f2dc7ca27491
| 331,938 |
def rightmost(root):
"""
Returns the rightmost leaf in this tree.
:param root: The root node of the tree.
:type root: d20.ast.ChildMixin
:rtype: d20.ast.ChildMixin
"""
right = root
while right.children:
right = right.children[-1]
return right
|
85f5ed1f0cc326fd1211295432b41c63bfac0311
| 124,888 |
import pathlib
def get_expected_file_in_directory(file: pathlib.Path, directory: pathlib.Path) -> pathlib.Path:
"""
Return the expected file in the directory -- the file may exist or not.
"""
expected_file = directory / file.name
return expected_file
|
57f24a0f53376988a84b0a84e3cd24d4dcfe02d5
| 431,436 |
import random
import string
def gen_random_str(min_length, max_length, prefix=None, suffix=None,
has_letter=True, has_digit=False, has_punctuation=False):
"""
指定一个前后缀、字符串长度以及字符串包含字符类型,返回随机生成带有前后缀及指定长度的字符串
:param:
* min_length: (int) 字符串最小长度
* max_length: (int) 字符串最小长度
* prefix: (string) 字符串前缀
* suffix: (string) 字符串后缀
* has_letter: (bool) 字符串时候包含字母,默认为 True
* has_digit: (bool) 字符串是否包含数字,默认为 False
* has_punctuation: (bool) 字符串是否包含标点符号,默认为 False
:return:
* random_str: (string) 指定规则的随机字符串
举例如下::
print('--- gen_random_str demo ---')
print(gen_random_str(5, 7))
print(gen_random_str(5, 7, prefix='FISHBASE_'))
print(gen_random_str(5, 7, prefix='FISHBASE_', suffix='.py'))
print(gen_random_str(5, 7, has_digit=True, has_punctuation=True))
print(gen_random_str(5, 7, prefix='FISHBASE_', has_digit=True, has_punctuation=True))
print('---')
执行结果::
--- gen_string_by_range demo ---
q4uo6E8
FISHBASE_8uCBEUH
FISHBASE_D4wRX2.py
FISHBASE_65nqlNs
FISHBASE_3"uFm$s
---
"""
if not all([isinstance(min_length, int), isinstance(max_length, int)]):
raise ValueError('min_length and max_length should be int, but we got {} and {}'.
format(type(min_length), type(max_length)))
if min_length > max_length:
raise ValueError('min_length should less than or equal to max_length')
# 避免随机源为空
if not any([has_letter, has_digit, has_punctuation]):
raise ValueError('At least one value is True in has_letter, has_digit and has_punctuation')
random_str_len = random.randint(min_length, max_length)
random_source = ''
random_source += string.ascii_letters if has_letter else ''
random_source += string.digits if has_digit else ''
random_source += string.punctuation if has_punctuation else ''
# 避免出现 ValueError: Sample larger than population or is negative
if random_str_len > len(random_source):
random_source *= (random_str_len // len(random_source) + 1)
mid_random_str = ''.join(random.sample(random_source, random_str_len))
prefix = prefix if prefix else ''
suffix = suffix if suffix else ''
random_str = ''.join([prefix, mid_random_str, suffix])
return random_str
|
d7039df8299a858c6fbe62619230740e466772e2
| 46,505 |
import time
def game(engine1, engine2, print_move_times=False):
"""engine1 and engine2 are instances of EngineInterface.
engine1 makes the first move.
Return (result, engine1 time, engine2 time)
result is 0 if the game is a draw, 1 if engine1 win and 2 if engine2 win.
"""
total_time_engine_1 = 0
total_time_engine_2 = 0
engine1.new_game()
engine2.new_game()
while True:
t0 = time.perf_counter()
column_number = engine1.engine_move()
t1 = time.perf_counter()
total_time_engine_1 += t1 - t0
if print_move_times:
print(engine1.name + ": Move time:", t1 - t0, "s")
engine1.make_move(column_number)
engine2.make_move(column_number)
if engine1.four_in_a_row():
return (1, total_time_engine_1, total_time_engine_2)
if engine1.draw():
return (0, total_time_engine_1, total_time_engine_2)
t0 = time.perf_counter()
column_number = engine2.engine_move()
t1 = time.perf_counter()
total_time_engine_2 += t1 - t0
if print_move_times:
print(engine2.name + ": Move time:", t1 - t0, "s")
engine1.make_move(column_number)
engine2.make_move(column_number)
if engine2.four_in_a_row():
return (2, total_time_engine_1, total_time_engine_2)
if engine2.draw():
return (0, total_time_engine_1, total_time_engine_2)
|
04f6d6ff18aed8922fc3884f8e05ef958d1c0e5f
| 256,774 |
def _initialize_tableau(A_ub, b_ub, A_eq, b_eq, tableau, basis):
"""
Initialize the `tableau` and `basis` arrays in place for Phase 1.
Suppose that the original linear program has the following form:
maximize::
c @ x
subject to::
A_ub @ x <= b_ub
A_eq @ x == b_eq
x >= 0
Let s be a vector of slack variables converting the inequality
constraint to an equality constraint so that the problem turns to be
the standard form:
maximize::
c @ x
subject to::
A_ub @ x + s == b_ub
A_eq @ x == b_eq
x, s >= 0
Then, let (z1, z2) be a vector of artificial variables for Phase 1.
We solve the following LP:
maximize::
-(1 @ z1 + 1 @ z2)
subject to::
A_ub @ x + s + z1 == b_ub
A_eq @ x + z2 == b_eq
x, s, z1, z2 >= 0
The tableau needs to be of shape (L+1, n+m+L+1), where L=m+k.
Parameters
----------
A_ub : ndarray(float, ndim=2)
ndarray of shape (m, n).
b_ub : ndarray(float, ndim=1)
ndarray of shape (m,).
A_eq : ndarray(float, ndim=2)
ndarray of shape (k, n).
b_eq : ndarray(float, ndim=1)
ndarray of shape (k,).
tableau : ndarray(float, ndim=2)
Empty ndarray of shape (L+1, n+m+L+1) to store the tableau.
Modified in place.
basis : ndarray(int, ndim=1)
Empty ndarray of shape (L,) to store the basic variables.
Modified in place.
Returns
-------
tableau : ndarray(float, ndim=2)
View to `tableau`.
basis : ndarray(int, ndim=1)
View to `basis`.
"""
m, k = A_ub.shape[0], A_eq.shape[0]
L = m + k
n = tableau.shape[1] - (m+L+1)
for i in range(m):
for j in range(n):
tableau[i, j] = A_ub[i, j]
for i in range(k):
for j in range(n):
tableau[m+i, j] = A_eq[i, j]
tableau[:L, n:-1] = 0
for i in range(m):
tableau[i, -1] = b_ub[i]
if tableau[i, -1] < 0:
for j in range(n):
tableau[i, j] *= -1
tableau[i, n+i] = -1
tableau[i, -1] *= -1
else:
tableau[i, n+i] = 1
tableau[i, n+m+i] = 1
for i in range(k):
tableau[m+i, -1] = b_eq[i]
if tableau[m+i, -1] < 0:
for j in range(n):
tableau[m+i, j] *= -1
tableau[m+i, -1] *= -1
tableau[m+i, n+m+m+i] = 1
tableau[-1, :] = 0
for i in range(L):
for j in range(n+m):
tableau[-1, j] += tableau[i, j]
tableau[-1, -1] += tableau[i, -1]
for i in range(L):
basis[i] = n+m+i
return tableau, basis
|
53e34d84978bb1604d3b5e8270b4df8f1e16037a
| 113,156 |
def get_distance(distance_df, location_1, location_2):
"""
Return distance value between two locations.
Args:
distance_df (pandas DataFrame) : distance dataframe
location_1 (str) : name of the first location
location_2 (str) : name of the second location
Returns:
float: Distance between two locations.
"""
dist = float(distance_df[(distance_df.location_1 == location_1) & (distance_df.location_2 == location_2)].distance.values[0])
if location_1 == location_2:
dist = 0
return dist
|
35d0f5bbd42de7b3f8bbe0c341c220c2682a333a
| 203,802 |
def get_extra_couchdbs(config, couch_database_url):
"""
Create a mapping from database prefix to database url
:param config: list of database strings or tuples
:param couch_database_url: main database url
"""
extra_dbs = {}
for row in config:
if isinstance(row, tuple):
_, postfix = row
extra_dbs[postfix] = '%s__%s' % (couch_database_url, postfix)
return extra_dbs
|
75357442b19c0297f314ed45682c5f950e8ccedd
| 521,659 |
import types
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
|
37fca64ddaadfc8a6a24dce012af2143038cacd2
| 705,241 |
def is_interior_link(link, grid):
"""Return True if both nodes are core; False otherwise."""
return (
grid.status_at_node[grid.node_at_link_tail[link]] == grid.BC_NODE_IS_CORE
and grid.status_at_node[grid.node_at_link_head[link]] == grid.BC_NODE_IS_CORE
)
|
c4292584b264ac5dd96c4f290c6a728a2ac2cf5f
| 258,880 |
def Sign(number):
"""
Sign(number)
Return the mathematical sign of the given number.
"""
return number and (-1, 1)[number > 0]
|
7a7c4dae06df05715c4d4dd0680b000bccb924d4
| 632,601 |
import inspect
def two_column_header(left: str, right: str) -> str:
"""
Create exam header with two column
:param left: left text
:param right: right text
:return: Latex code
"""
return inspect.cleandoc(rf"""
\begin{{tabular}}{{ *{{2}}{{ p{{ \dimexpr0.5\linewidth-2\tabcolsep\relax }} }} }}
{left} & {right}
\end{{tabular}}
""")
|
4c1315906a9f8b6d79de9871cb20744ec1856cd6
| 483,046 |
import hashlib
def img_name_processor(src):
"""
This function is used to handle the file name of the saved picture.
Hash the URL of the picture as its filename.
:param src: image url
:return: image filename
"""
h5 = hashlib.md5()
h5.update(src.encode('utf-8'))
img = h5.hexdigest() + '.jpg'
return img
|
712d5fea13d6ec92096b0d88b9e06aa4cd18551d
| 516,513 |
def row_to_dict(row):
"""
This takes a row from a resultset and returns a dict with the same structure
:param row:
:return: dict
"""
return {key: value for (key, value) in row.items()}
|
0d1f2d55f50c70906964368706214e2a36c3578d
| 522,761 |
def organism_matches(organism, patterns):
"""Tests organism filter RegEx patterns against a given organism name."""
for pattern in patterns:
if pattern.match(organism):
return True
return False
|
135b6ad0472a2b0904ececc8b8f226c77b2294e6
| 20,897 |
def dict_merge2(*dicts):
""" Return a dict with all values of dicts.
If some key appears twice and contains iterable objects, the values
are merged (instead of overwritten).
"""
res = {}
for d in dicts:
for k in d.keys():
if k in res and isinstance(res[k], (list, tuple)):
res[k] = res[k] + d[k]
elif k in res and isinstance(res[k], dict):
res[k].update(d[k])
else:
res[k] = d[k]
return res
|
dff7f5577ee71652cf09d29c945852584092dcb7
| 148,142 |
from pathlib import Path
def file_exists(file_path: str) -> bool:
"""Checks if a file exists at `file_path`
:param file_path: path to file whose existence is being checked.
:type file_path: str
:rtype: bool
"""
return Path(file_path).is_file()
|
11b14ccf73ed753ccf46a7100f98d63052ea5fc5
| 59,143 |
from typing import Any
def get_record_code(record: dict[str, Any]) -> str:
"""Returns the concatenated codes for a taric record."""
return f"{record['record_code']}{record['subrecord_code']}"
|
11e438fbee7a831494ce47fcd59dce8c3840adcb
| 665,252 |
def template_size(template):
"""
Returns template width and height
:type template: list of list of int
:rtype: dict
"""
size = {
"width": max([len(row) for row in template]) if len(template) > 0 else 0,
"height": len(template),
}
return size
|
b993e1a1d8c7386e2429ce569cae5c4e59e19682
| 313,001 |
def blend_components(original_component, tint_component, tint_strength):
"""Blend the two color components with the given tint strength.
original_color and tint_color are integers in [0, 255].
Formula: http://stackoverflow.com/a/29321264
"""
tint_factor = tint_strength / 100.
return round((
(1 - tint_factor) * original_component ** 2 +
tint_factor * tint_component ** 2) ** 0.5)
|
996504628d26cc59f79b3320baa909a31be3a546
| 249,790 |
def reshape(array, step):
""" Reshapes array of size n to matrix with dimensions n/step by step.
Args:
array: List to reshape.
step: Number of elements in row after reshaping.
"""
result = []
for i in range(0, len(array), step):
result.append(array[i:i+step])
return result
|
7fde2888e5cd2dca4ef498f7c943441044bafa93
| 389,543 |
def non_hydrogen_count(mol):
"""Non-hydrogen atom count """
return sum(1 for _, a in mol.atoms_iter() if a.symbol != "H")
|
956ee6b6c1c46bee54a4e42a65550ffba0612091
| 257,639 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.