content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def nrhs(k: int) -> int:
"""Number of right-hand-sides of a linear system."""
return k
|
6d761d433dc76d926c48c620235207c6c112c336
| 131,800 |
def get_tarball_valid_unpack_directory_name(package_name: str, version_number: str) -> str:
"""
get the name of the folder that should be obtained by unpacking the tarball
:param package_name: name of the package to check
:param version_number: version number
:return: the name of the folder that is obtained by unpacking the tarball
"""
return f"{package_name}_{version_number}"
|
3f0cd6ae0e11f6022340a45e2981fc72247cc04d
| 462,917 |
def presence_of_element_located(locator):
""" An expectation for checking that an element is present on the DOM
of a page. This does not necessarily mean that the element is visible.
locator - used to find the element
returns the WebElement once it is located
"""
def _predicate(driver):
return driver.find_element(*locator)
return _predicate
|
bf69ed7e58e5e6dd0181bba3d965a301798c8f8a
| 366,032 |
import torch
def rotate_vec_by_axisangle(vec, aa_vec):
"""
This function rotates a 3D vector @vec by the direction and angle represented by @aa_vec.
See https://stackoverflow.com/questions/32485772/how-do-axis-angle-rotation-vectors-work-and-how-do-they-compare-to-rotation-matr
for more information.
Args:
vec (tensor): (..., 3) where final dim represents (x, y, z) vectors
aa_vec (tensor): (..., 3) where final dim represents desired (ax, ay, az) axis-angle rotations
Returns:
tensor: (..., 3) where final dim is newly rotated (x, y, z) vectors
"""
# Extract angle and unit vector from axis-angle vectors
angle = torch.norm(aa_vec, dim=-1, keepdim=True)
aa_v = aa_vec / angle
# Map all NaNs to 0
aa_v[aa_v != aa_v] = 0.0
# # Record angles that are zero so we don't get nan's
# idx = torch.nonzero(angle.squeeze(dim=-1)).tolist()
# aa_v = torch.zeros_like(aa_vec)
# aa_v[idx] = aa_vec[idx] / angle[idx]
# Rotate the vector using the formula (see link above)
c, s = torch.cos(angle), torch.sin(angle)
vec_rot = vec * c + torch.cross(aa_v, vec) * s + \
aa_v * (torch.sum(aa_v * vec, dim=-1, keepdim=True)) * (1 - c)
return vec_rot
|
8ab749d7c26a94dfd9c0da80334c69e0e46702a2
| 81,938 |
def clean_text(x):
"""Helper function to clean a string."""
x = str(x)
x = x.lower()
x = x.strip()
x = " ".join(x.split()) # removes extra whitespace between words
return x
|
74dbb5e07c42c668cdbe380670e87e81f4678407
| 19,439 |
def read(fname):
"""Return contents of fname"""
txt = None
with open(fname) as ftoken:
txt = ftoken.read()
return txt
|
0568810d0c330e5b279b55d2ad7be949a9039088
| 304,672 |
import logging
def make_splits(args, dataset):
"""
Make train-test splits in time.
"""
total_hours = len(dataset['counts'])
if args.truncate:
total_hours = min(total_hours, args.truncate)
# Dataset starts on a Saturday.
assert dataset['start_date'][0].strftime('%A') == 'Saturday'
# Ridership is minimum early Sunday morning.
split_hour_of_week = 29
# We train HMM on at least six years of historical data.
min_hours = 6 * 365 * 24
stride = 24 * 7
result = list(range(min_hours + split_hour_of_week,
total_hours - args.forecast_hours,
stride))
logging.info(f'Created {len(result)} test/train splits')
assert result, 'truncated too short'
return result
|
ee0791b2ea96f6bd60a5b6a23c818073082584d3
| 313,225 |
from pathlib import Path
import click
def validate_json_yaml_filename(ctx, param, value):
"""Validate the name of the file has the proper extension and add the extension to the file object"""
if value is None:
return value
if not hasattr(value, "name") or value.name == "<stdout>":
# stdin/stdout case
value.extension = "yaml"
return value
ALLOWED_EXTENSIONS = ["json", "yaml", "yml"]
extension = Path(value.name).suffix[1:]
if extension not in ALLOWED_EXTENSIONS:
raise click.BadParameter(
f"the extension of the file is '{extension}' while it should be one of {ALLOWED_EXTENSIONS}"
)
value.extension = extension
return value
|
2adf1053cd1c4326f8ba2de137ca4f3c81a1c4a6
| 637,115 |
from functools import reduce
def mac_aton(s):
"""Convert a Mac address to an integer."""
try:
mac = list(map(lambda x: int(x, 16), s.split(':')))
mac = reduce(lambda a,b: a+b, [mac[i] << (5-i)*8 for i in range(6)])
except (ValueError, IndexError):
raise ValueError('illegal Mac: {0}'.format(s))
return mac
|
dd29c6e99998bfd0676ae9e7200f02500c2eed0d
| 43,575 |
def len2bytes(payload: bytes) -> bytes:
"""
Generate payload length as 2 bytes, suitable for
constructing RTCM message transport.
:param bytes payload: message payload (i.e. _without_ header, length or CRC)
:return: payload length as 2 bytes padded with leading zeros
:rtype: bytes
"""
return len(payload).to_bytes(2, "big")
|
f7c94acc0de60de1e569b1e2cd4c5d091bd139cc
| 296,599 |
def get_source(driver) -> str:
"""Returns the source code of the webpage"""
return driver.page_source
|
c887ef249aa88dd3216406207f7463718b22c092
| 161,446 |
def get_avg(items):
"""return avg of list items"""
length = len(items)
if length > 0:
return sum(items) / length
return 0
|
3747668ebbb1b84ec2d61314eb411c4b8358dc8b
| 493,652 |
def orientation(p, q, r):
"""
Finds the orientation of an ordered set of vertices(p, q, r).
p: First vertex represented as a tuple.
q: Second vertex represented as a tuple.
r: Third vertex represented as a tuple.
returns:
0 : Collinear points
1 : Clockwise points
2 : Counterclockwise
"""
val = ((q[1] - p[1]) *(r[0] - q[0])) - ((q[0] - p[0]) * (r[1] - q[1]))
if val == 0:
#Collinear
return 0
if val > 0:
# Clock
return 1
else:
# Counterclock
return 2
|
3651c0b6eee3550c427ad03d229d947337e7eed1
| 17,670 |
def map_data(keys: list, values: list, types: list) -> dict:
"""
Returns a dict that maps keys to their values and types
"""
data: dict = {}
for i, value in enumerate(values):
key = keys[i]
key_type = types[i]
data[key] = {
"value": value,
"type": key_type
}
return data
|
32774b33ff8710c8ec185c137f1b6d5dd2e6a575
| 434,551 |
def find_biggest_value_per_day(day_data):
"""
Take pressure data per day and find
biggest value.
If some systolic and other systolic equal,
compare by diastolic
"""
values = [(data[2], data[3]) for data in day_data]
systolic, diastolic = max(values)
return systolic, diastolic
|
f46a866809278b95851c8583a1682de58749a64f
| 48,384 |
def taxid(entry):
"""
Gets the NCBI taxon id as an integer.
"""
base, tid = entry["taxonId"].split(":", 1)
assert base == "NCBITaxon"
return int(tid)
|
00fe879444e9587132cfe697a949c77b9002672d
| 86,634 |
def _get_currency_pair(currency, native):
"""
Format a crypto currency with a native one for the Coinbase API.
"""
return '{}-{}'.format(currency, native)
|
92c3f43d1661f912a6bb63d14df1a7095eb784f3
| 24,891 |
def convert_metadata_pre_1_0_to_1_0(metadata):
"""
Convert pre-1.0 to 1.0 metadata format
:arg metadata: The old metadata
:returns: The new metadata
Changes from pre-1.0 to 1.0:
* ``version`` field renamed to ``metadata_version``
* ``supported_by`` field value ``unmaintained`` has been removed (change to
``community`` and let an external list track whether a module is unmaintained)
* ``supported_by`` field value ``committer`` has been renamed to ``curated``
"""
new_metadata = {'metadata_version': '1.0',
'supported_by': metadata['supported_by'],
'status': metadata['status']
}
if new_metadata['supported_by'] == 'unmaintained':
new_metadata['supported_by'] = 'community'
elif new_metadata['supported_by'] == 'committer':
new_metadata['supported_by'] = 'curated'
return new_metadata
|
81c67795aa27ae47dbc07c95d685c187f66e9258
| 306,525 |
def is_extrusion_line(line: str) -> bool:
"""Check if current line is a standard printing segment.
Args:
line (str): Gcode line
Returns:
bool: True if the line is a standard printing segment
"""
return "G1" in line and " X" in line and "Y" in line and "E" in line
|
364dd589673dc6dd500c02167169a5c1d0daf010
| 151,538 |
from datetime import datetime
def convert_date_to_fns_format(date_db: datetime) -> str:
"""Convert datetime from database to FNS format."""
return date_db.strftime('%Y-%m-%dT%H:%M')
|
60445e51032efe4548b6cd59fe53baf6c53c6f1c
| 72,139 |
import hashlib
def url_to_hashkey(url):
"""Generate a 96 bit hash key from a URL string.
Args:
url: A URL string.
Returns:
A 96 bit hash key.
"""
return hashlib.sha512(url.encode()).hexdigest()[0:24]
|
394ffa42cb995c260f7e5c1d0036a52268e641a2
| 518,696 |
def get_instance(module, name, config, **kwargs):
"""
A reflection function to get backbone/classifier/.
Args:
module ([type]): Package Name.
name (str): Top level value in config dict. (backbone, classifier, etc.)
config (dict): The parsed config dict.
Returns:
Corresponding instance.
"""
if config[name]["kwargs"] is not None:
kwargs.update(config[name]["kwargs"])
return getattr(module, config[name]["name"])(**kwargs)
|
244810ae862527ef323781cb4ac7de144b183256
| 526,717 |
def td2fusec(td):
"""Convert datetime.timedelta to float microseconds"""
sec = td.total_seconds() # float
usec = sec * 1000000
return usec
|
4aac8e0df2286623a512bc3ee2b61bc7607471fe
| 326,192 |
def is_hilbert_number(n):
"""
Simply check whether n is positive and the remainder of n is equal to 1
modulus 4.
"""
return n > 0 and n % 4 == 1
|
b55825b446282343cb87e3d6c83036a6bc9f1a6a
| 687,556 |
def int_equals(a, b):
"""Small helper function, takes two objects and returns True if they are equal when cast to int.
This is mainly intended to facilitate checking two strings that may or may not be ints.
Most importantly, it will return False if either cannot be cast to int."""
try:
return int(a) == int(b)
except ValueError:
return False
|
7ac9eee9ffc3f3efd2848276d08f36364631d580
| 263,638 |
import quopri
def decode_email(email):
"""
Decode email using quoted printable
:param email: The email to decode
:return: The decoded email content
"""
return quopri.decodestring(email.get_payload())
|
87e0c8f294d15cbd479640a43fa6eaf7f3450be2
| 112,018 |
def _bisect_blocks(web3, timestamp, use_left_bound=True):
"""
Perform a binary search on the blockchain for the block that matches the
given timestamp. The `use_left_bound` parameter determines whether to
return the block to the left or right of the timestamp in the event that no
block matches the timestamp exactly.
"""
left_bound = 1
right_bound = web3.eth.blockNumber
left_block = web3.eth.getBlock(left_bound)
if left_block['timestamp'] >= timestamp:
return 'earliest'
right_block = web3.eth.getBlock(right_bound)
if right_block['timestamp'] <= timestamp:
return 'latest'
while left_bound < right_bound - 1:
middle = (left_bound + right_bound) // 2
middle_block = web3.eth.getBlock(middle)
if middle_block['timestamp'] < timestamp:
left_bound = middle
elif middle_block['timestamp'] > timestamp:
right_bound = middle
else:
return middle
else:
if use_left_bound:
return left_bound
else:
return right_bound
|
9eb011ca488b7262e78efd29fe11f3c0136a5933
| 21,809 |
def get_chunks(t_start, t_stop, n_chunks):
"""Group frame indices into given number of 'chunks'.
Args:
t_start (int): Frame index to start at (inclusive)
t_stop (int): Frame index to stop at (exclusive)
n_chunks (int): Number of chunks
Returns:
List of 2-tuples containing (start, stop) for each chunk.
"""
# Validate input
if t_stop <= t_start:
raise ValueError('Start frame not before stop frame')
if n_chunks <= 0:
raise ValueError('Number of chunks not positive int')
if n_chunks > (t_stop - t_start):
raise ValueError('More chunks than frames')
# Determine size of chunks
sz = (t_stop - t_start) // n_chunks
# First n-1 chunks
chunks = []
for k in range(n_chunks - 1):
chunks.append((t_start + k * sz, t_start + (k + 1) * sz))
# Final chunk
chunks.append((t_start + (n_chunks - 1) * sz, t_stop))
return chunks
|
b7477723cd35753699fce913f966618b882cdf21
| 662,686 |
def xor(n1, n2):
"""XORs two numbers"""
return (int(n1) + int(n2)) % 2
|
7ead2d86d1faeaf71a4cb2b8964c7fc5e3b5b7c0
| 435,897 |
def clean_dict(dictionary: dict) -> dict:
"""Recursively removes `None` values from `dictionary`
Args:
dictionary (dict): subject dictionary
Returns:
dict: dictionary without None values
"""
for key, value in list(dictionary.items()):
if isinstance(value, dict):
clean_dict(value)
elif value is None:
dictionary.pop(key)
return dictionary
|
3968b6d354116cca299a01bf2c61d7b2d9610da9
| 703,773 |
def width_from_bitdefs(bitdefs):
"""
Determine how wide an binary value needs to be based on bitdefs used
to define it.
Args:
bitdefs (list(BitDef)): List of bitdefs to find max width of
Returns:
(int): Maximum width
"""
max_index = max([bitdef.end for bitdef in bitdefs])
width = max_index + 1
return width
|
59503f335d6d427579be730806c738108091e9ed
| 703,034 |
from pathlib import Path
def norm_abs_path(path, ref_path):
"""\
Convert `path` to absolute assuming it's relative to ref_path (file or dir).
"""
path, ref_path = Path(path), Path(ref_path).absolute()
ref_dir = ref_path if ref_path.is_dir() else ref_path.parent
return ref_dir / path
|
91033a7cb3631616799c204259b5830c931035f0
| 683,288 |
def lower_first_letter(sentence):
"""Lowercase the first letter of a sentence."""
return sentence[:1].lower() + sentence[1:] if sentence else ""
|
de0f79548d42983093f65970464ca84eed032300
| 696,468 |
def _FollowedByFakeQuant(tensor):
"""Returns True if the tensor is followed by a FakeQuant."""
fake_quant_ops = set([
'FakeQuantWithMinMaxVars', 'FakeQuantWithMinMaxArgs',
'FakeQuantWithMinMaxVarsPerChannel'
])
pass_through_ops = set(['Reshape', 'Identity'])
consumers = tensor.consumers()
while consumers:
c = consumers.pop()
if c.type in fake_quant_ops:
return True
elif c.type in pass_through_ops:
for output in c.outputs:
consumers.extend(output.consumers())
return False
|
3d3f3a189f04089e89a7d082ac388f6e1aee90ec
| 188,613 |
def makeMapping(classes):
"""
Create a mapping between class name and actual classes.
"""
return {
cls.__name__: cls for cls in classes
}
|
dbe43c6e13aa502b4414934e773adfa355a38d24
| 354,797 |
def _transform(persistence_diagram):
"""Transform persistence diagram for persistence image calculation."""
persistence_diagram[:, 1] -= persistence_diagram[:, 0]
return persistence_diagram
|
23c0e46fe3258345ddcaecb2d74fbb6218d48f33
| 534,280 |
def lucas(n):
"""Function that provides the nth term of lucas series."""
x, y = 2, 1
for i in range(n - 1):
x, y = y, x + y
return x
|
6d40e67ce8dd682bb26f9bd16d2dd34d3d1bf541
| 30,381 |
def cmake_cache_option(name, boolean_value, comment=""):
"""Generate a string for a cmake configuration option"""
value = "ON" if boolean_value else "OFF"
return 'set(%s %s CACHE BOOL "%s")\n\n' % (name, value, comment)
|
b00e202d499b3faa9bc1623994f733456c542b40
| 594,528 |
def chunks(l: list, n: int):
"""
Split a list into evenly sized chunks
:param l: list
:param n: chunks
:return: list of lists
"""
return [l[i:i + n] for i in range(0, len(l), n)]
|
0c1dd0fbb7461df98eff59b9cf9f74a2efdedbd4
| 530,318 |
from datetime import datetime
import pytz
def now_in_timezone(tz):
"""
tz should be tzdata identifier, e.g. America/New_York
"""
return datetime.now(pytz.timezone(tz))
|
a25c2b8a10e56204e0e2129bf180afc261db0c9c
| 606,669 |
import torch
def create_optimizer(optimizer_name, model, **kwargs):
"""
Create a torch optimizer of the specified type for the given model.
Args:
optimizer_name: The type of optimizer to create (as string), one of: 'SGD', 'NAG', 'Adam', 'RMSprop'.
model: The model to train with the optimizer.
**kwargs: Additional named parameters to the optimizers init function.
Returns:
The optimizer.
Raises:
ValueError: on an invalid loss name (i.e. not in 'SGD', 'NAG', 'Adam', 'RMSprop').
"""
optimizers = {'SGD': (torch.optim.SGD, {'momentum': 0.9, 'nesterov': False}),
'NAG': (torch.optim.SGD, {'momentum': 0.9, 'nesterov': True}),
'Adam': (torch.optim.Adam, {}),
'RMSprop': (torch.optim.RMSprop, {})}
if optimizer_name not in optimizers:
raise ValueError(f'Invalid optimizer {optimizer_name}. Valid optimizers are: {", ".join(optimizers.keys())}.')
optimizer, init_kwargs = optimizers[optimizer_name]
return optimizer(model.parameters(), **init_kwargs, **kwargs)
|
f239b1b878c0158b1cc289a116cd54ab86fcbdd0
| 245,642 |
def convert_to_x_y(texts, targets):
"""
Converts data into form x,y
Args:
texts (list): List of texts
targets (list): List of targets
Returns:
list: A tuple of form (x,y)
"""
data = [(texts[idx], targets[idx]) for idx in range(len(targets))]
return data
|
c195b84aa7e62717d3fe32b88c193fcd7ce7b889
| 477,818 |
def _set_group(tree, group):
"""Set the group for a tree of dependencies."""
grouped = {
"dependency": tree["dependency"],
"level": tree["level"],
"version": tree["version"],
"group": group,
"children": [],
}
if tree["children"]:
for child in tree["children"]:
grouped["children"].append(_set_group(child, group))
return grouped
|
a0e10f3b26938637d281ca5e82f6da215fb4a7d5
| 565,201 |
import re
def getKeepAlive(response):
"""Extracts and returns the keep alive token from the response.
Extracts and returns the actual keep-alive token from within the response;
this token enables subsequent requests for CSV data.
Args:
response: The Python requests response object, typically corresponding to
a simple GET request on the main refugee explorer page.
Returns:
A keep-alive token which is used to validate subsequent requests.
Raises:
AssertionError: Could not extract a single keep-alive token from the page.
"""
controlId = re.findall('ControlID=([a-fA-F0-9]+)"', response.text)
assert len(controlId) == 1, 'Multiple or no tokens should not happen'
return controlId[0]
|
0486def47c3b460cc775adba7772ee0ba264295d
| 621,793 |
def column_choices_hidden(fields_selected):
"""Include column choices in request query as hidden fields"""
hidden = ''
for f in fields_selected:
hidden += "<input type='hidden' name='" + f + "' value='t'/>"
return hidden
|
cf80e6cceacc908b14d6b9ff86b9488a518505b8
| 336,951 |
def class_counts(rows, labels):
"""
Counts the number of each type of example in a dataset.
:param rows: array of samples
:param labels: rows data labels.
:return: a dictionary of label -> count.
"""
counts = {cls: 0 for cls in set(labels)} # a dictionary of label -> count.
for idx, x in enumerate(rows):
# in our dataset format, the label is always the last column
label = labels[idx]
counts[label] += 1
return counts
|
3e5b4c32885ce6ccdad8a842a2267e9d9dca62b2
| 132,866 |
import re
def format_email(address):
"""
Returns a correctly formatted and de-obfuscated email address
"""
address = re.sub(r"\s*", "", address)
address = re.sub(r"\(.t\)", "@", address)
address = re.sub(r"\(punkt\)", ".", address)
address = re.sub(r"\((strich|minus)\)", "-", address)
return address
|
49e1cdb843ce462a3444610a22d0faec9b215f5f
| 347,325 |
def example_choices(field, arg=3):
"""
Returns a number (default: 3) of example choices for a ChoiceFiled (useful for CSV import forms).
"""
examples = []
if hasattr(field, 'queryset'):
choices = [(obj.pk, getattr(obj, field.to_field_name)) for obj in field.queryset[:arg + 1]]
else:
choices = field.choices
for id, label in choices:
if len(examples) == arg:
examples.append('etc.')
break
if not id:
continue
examples.append(label)
return ', '.join(examples) or 'None'
|
5206254abc339b8941596ccb29538cd4fcc1e8a1
| 626,824 |
def readText(text_file)->str:
"""Read a text from a file."""
text = ""
with open(text_file,'r',encoding='utf8') as rtextFile:
text = rtextFile.read()
text = (text.replace("\n","").replace("\r", ""))
return text
|
e457638df4f47406abaaaa0682d0c0b8e1375a8d
| 256,048 |
def find_smaller_streets(playable_items):
"""
Gets list of playable moves from find_streets and adds smaller possible streets to that list
For (3,4,5,6,7) this will add (3,4,5,6), (3,4,5), (4,5,6). The rest is already covered
:param playable_items: Possible moves, the user can do
:return: list of possible moves, including shorter streets
"""
# print("Finding smaller streets")
# print(streets)
for street in playable_items:
smaller_street = street[:-1]
if len(smaller_street) >= 3:
if smaller_street not in playable_items:
playable_items.append(smaller_street)
return playable_items
|
9860b87fcab0255fb93e9aad3f7a8b57ba658a8e
| 177,224 |
def num_round(num, decimal=2):
"""Rounds a number to a specified number of decimal places.
Args:
num (float): The number to round.
decimal (int, optional): The number of decimal places to round. Defaults to 2.
Returns:
float: The number with the specified decimal places rounded.
"""
return round(num, decimal)
|
a4ee87fa8724690aea611b2d95f884dd072e6511
| 195,819 |
def gq_get_vertex_user(user):
"""Create the gremlin query that returns a Vertex for a user given
its uuid
"""
query = "g.V().hasLabel('user').has('uuid', '{}')".format(user.uuid)
return query
|
52509c27280a3e0fd4eb57b44fe5fbec8c0c3b20
| 685,483 |
def _loggables(self):
"""dict[str, str]: Return a name, category mapping of loggable quantities."""
return {name: quantity.category.name
for name, quantity in self._export_dict.items()}
|
e21e8295fce65ef4822854c2a2ece54bf6a1283b
| 216,682 |
def split_user(user_at_host):
"""
break apart user@host fields into user and host.
"""
try:
user, host = user_at_host.rsplit('@', 1)
except ValueError:
user, host = None, user_at_host
assert user != '', \
"Bad input to split_user: {user_at_host!r}".format(user_at_host=user_at_host)
return user, host
|
16383930a7940236a3796151ffc37bd6485ab98a
| 155,489 |
def isoforest_label_adjust(pred_func):
"""Adjusts isolation forest predictions to be 1 for outliers, 0 for inliers.
By default the scikit-learn isolation forest returns -1 for outliers and 1
for inliers, so this method is used to wrap fit_predict or predict methods
and return 0 for inliers, 1 for outliers.
:param pred_func: Scikit-learn prediction function that returns a flat
:class:`numpy.ndarray` of labels ``-1`` and ``1``.
:type pred_func: function or method
:rtype: function
"""
def adjust_pred_func(*args, **kwargs):
res = pred_func(*args, **kwargs)
res[res == -1] = 1
res[res == 1] = 0
return res
return adjust_pred_func
|
11ac61f691404525a357a32e725c30dd2675a85a
| 36,415 |
import string
import random
def strings(n, chars=string.ascii_letters):
""" Return random string of N characters, sampled at random from `chars`.
"""
return ''.join([random.choice(chars) for i in range(n)])
|
bc7e2cab22b4d0a98b3e93a7199c1ec2b326ee68
| 23,043 |
def exclusive_in(in_list,master_list):
"""
Checks if every element in in_list is in masterlist
:in_list: (list of any type of object) elements
:master_list: (list of any type of object) elements
:returns: (bool) True if every element in in_list is in master_list and
False otherwise
"""
if in_list==[]:
return True
else:
for elem in in_list:
if elem not in master_list:
return False
return True
|
799ca2314eac76afbadf33b4981d49b9872ec23c
| 502,748 |
import re
def make_container_name(image: str, suffix: str = "") -> str:
"""
Make default container name for the given Docker image.
Args:
image: Docker image tag.
suffix: Suffix to append to the container name.
Returns:
Docker container name.
"""
name = re.sub(r"[^a-zA-Z0-9_.-]+", "-", image)
if suffix:
name += "-" + str(suffix)
return name
|
570e3f89a5c76b9b4383e19d462ec1ee04b5118c
| 616,995 |
def create_new_column_with_difference_between_2(df, column1, column2):
"""
Return df with new delta column: column1 - column2
Parameters
----------
df: Pandas dataframe
column1: Column string
column2: Column string
Returns
-------
Df with added delta column
"""
df['delta_' + column1 + "_" + column2] = df[column1] - df[column2]
return df
|
4e4fe4637b378779534fb88387eb404edf0ac57e
| 378,632 |
def get_natural_num(msg):
"""
Get a valid natural number from the user!
:param msg: message asking for a natural number
:return: a positive integer converted from the user enter.
"""
valid_enter = False
while not valid_enter:
given_number = input(msg).strip()
if given_number.isdigit():
num = int(given_number)
valid_enter = True
return num
|
77bed94bf6d3e5ceb56d58eaf37e3e687e3c94ba
| 381 |
def _parse_options_list(options):
"""
Args:
options: list of the BeautifulSoup objects
Returns:
list[dict]: list of parsed info about faculties
"""
faculties_list = []
for option in options:
faculties_list.append({
'name': option.text,
'code': int(option['value'])
})
return faculties_list
|
19fbe248422728ace7133d23104025ddf1a90f96
| 599,845 |
def bold(s):
"""Returns the string s, bolded."""
return f'\x02{s}\x02'
|
a471ae9de5e222a07eeb3e2eb8e2a840820472a9
| 610,932 |
import re
def ProcessGitHubLinks(markup, github):
"""Replaces the GitHub local links by the external variants."""
markup = re.sub(
r'#(\d+)',
r'[#\1](https://github.com/%s/issues/\1)' % github,
markup)
markup = re.sub(
r'\[(.+?)\]\((wiki/.+?)\)',
r'[\1](https://github.com/%s/\2)' % github,
markup)
return markup
|
88284abc8fc76f4b24dd1d5040b0156e4eef0cf7
| 669,142 |
def clear_sentences(data):
"""
Cleaning sentences, removing special characters and articles
"""
sentences = list()
for record in data:
sentence = record['reviewText']
sentence = sentence.lower()
for char in "?.!/;:,":
sentence = sentence.replace(char, '')
sentence = sentence.split(sep=' ')
sentence = [word for word in sentence if len(word) > 1]
sentences.append(sentence)
return sentences
|
91afa65c70be9386a658d0c74f317f647110740b
| 660,997 |
def _hamming_distance(x_param: int) -> int:
"""
Calculate the bit-wise Hamming distance of :code:`x_param` from 0.
The Hamming distance is the number 1s in the integer :code:`x_param`.
:param x_param: A non-negative integer.
:return: The hamming distance of :code:`x_param` from 0.
"""
tot = 0
while x_param:
tot += 1
x_param &= x_param - 1
return tot
|
9508826097c0a08b98d15363505d32ee0b6cc08b
| 283,008 |
def get_shorter_move(move, size):
"""
Given one dimension move (x or y), return the shorter move comparing with opposite move.
The Board is actually round, ship can move to destination by any direction.
Example: Given board size = 5, move = 3, opposite_move = -2, return -2 since abs(-2) < abs(3).
"""
if move == 0:
return 0
elif move > 0:
opposite_move = move - size
else:
opposite_move = move + size
return min([move, opposite_move], key=abs)
|
635ee3038a4b78318658288f7876144f82333ebb
| 38,568 |
def changelog2dict(changelog):
"""
Helper turning internal list-o-releases structure into a dict.
See `parse_changelog` docstring for return value details.
"""
return {r["obj"].number: r["entries"] for r in changelog}
|
31bc860dc5e670d096b3895da99a9ab19be46af9
| 590,211 |
def a_return_and_reconnect(ctx):
"""Send new line and reconnect."""
ctx.ctrl.send("\r")
ctx.ctrl.connect(ctx.device)
return True
|
c861146efc2b2bfca01842489c0cb5e22adbfaa0
| 464,889 |
from datetime import datetime
def process_env_date(env_date) -> datetime:
"""Return the specified date. If the input is 'current', return the current date time.
Argument:
env_date - a string obtained from the ENV_DATE environmental variable. It should
contain the reporting dte.
"""
if env_date.lower() == 'current':
dte = datetime.now()
dte = datetime(dte.year, dte.month, dte.day, 0, 0, 0)
else:
dte = datetime.strptime(env_date, "%Y-%m-%d")
return dte
|
e1dc7626e9a0084bceaf1fe0e58fba79ac286d9a
| 173,742 |
def remove_all(fs, path):
"""Remove everything in a directory. Returns True if successful.
:param fs: A filesystem
:param path: Path to a directory
"""
sub_fs = fs.opendir(path)
for sub_path in sub_fs.listdir():
if sub_fs.isdir(sub_path):
sub_fs.removedir(sub_path, force=True)
else:
sub_fs.remove(sub_path)
return fs.isdirempty(path)
|
6b5a5866eca0548ba61ef3787a61042de587b1f9
| 276,113 |
def removesuffix(string: str, suffix: str) -> str:
"""Remove suffix from string, if present."""
return string[: -len(suffix)] if suffix and string.endswith(suffix) else string
|
3c375c633dc7e73fed5898a0886da9f9b545c045
| 407,444 |
def lint_codeblock(styleguide, tmp_path):
"""Fixture which runs the styleguide's `lint` subcommand when executed."""
def run_linter(
codeblock,
*styleguide_args,
# We ignore unused import warnings as most of the codeblocks which produce this warning
# are just demonstrations of bad/good imports. Adding usage of the imports would add
# code to trivial examples, detracting from the interesting lines.
ignore_unused_imports=True,
):
extend_ignore = [
# Undefined name. Defining all the names in each example would detract from the
# interesting lines.
"F821",
# Missing docstring in XYZ.
"D1",
]
if ignore_unused_imports:
extend_ignore.append("F401")
styleguide_args += ("--extend-ignore", ",".join(extend_ignore))
test_file = tmp_path / "test.py"
test_file.write_text(codeblock.contents, encoding="utf-8")
return styleguide("lint", *styleguide_args, test_file)
return run_linter
|
67ad6ed2184c9d30de9d60491463c7ac18fe5690
| 633,983 |
def readMetadata(lines):
"""
Read metadata tags and values from a TNTP file, returning a dictionary whose
keys are the tags (strings between the <> characters) and corresponding values.
The last metadata line (reading <END OF METADATA>) is stored with a value giving
the line number this tag was found in. You can use this to proceed with reading
the rest of the file after the metadata.
"""
metadata = dict()
lineNumber = 0
for line in lines:
lineNumber += 1
line.strip()
commentPos = line.find("~")
if commentPos >= 0: # strip comments
line = line[:commentPos]
if len(line) == 0:
continue
startTagPos = line.find("<")
endTagPos = line.find(">")
if startTagPos < 0 or endTagPos < 0 or startTagPos >= endTagPos:
print("Error reading this metadata line, ignoring: '%s'" % line)
metadataTag = line[startTagPos+1 : endTagPos]
metadataValue = line[endTagPos+1:]
if metadataTag == 'END OF METADATA':
metadata['END OF METADATA'] = lineNumber
return metadata
metadata[metadataTag] = metadataValue.strip()
print("Warning: END OF METADATA not found in file")
return metadata
|
e4a93539ccb203b350d17c33fd0daa4cb9afbac9
| 674,327 |
def round_described(described, decimals):
""" For printing purposes the results should be rounded.
This is done with this function and the desired number of
decimals are passed per parameter as an argument.
Parameters
----------
described : DataFrame
The DataFrame you want to print
decimals : dict
Keys are parameters (columns of the described DF) and values
are the number of decimal places you want to see.
Returns
-------
DataFrame
Rounded version of ``described``
"""
for k, v in decimals.items():
described = described.round(
{
(k, "mean"): v,
(k, "std"): min(v + 1, 3),
(k, "std%"): min(v + 1, 3),
(k, "avg_dev"): v,
(k, "min"): v,
(k, "25%"): v,
(k, "median"): v,
(k, "75%"): v,
(k, "max"): v,
}
)
return described.round(
{
("DENSITY", "percentage"): 1,
("DURATION", "in_hours"): 1,
("DURATION", "longest_in_hours"): 1,
("DURATION", "num_splits"): 0,
("num_breakdowns", "per_hour"): 2,
("REFILL", "index"): 0,
("REFILL", "delta_in_hours"): 1,
}
)
|
87db2a4c29514af423686e1f3acf24ab14eb95ef
| 603,999 |
def recur_dictify(frame):
"""
h/t: http://stackoverflow.com/a/19900276/843419
:param frame: a pandas data frame
:return: a nested dictionary with the columns as the keys and the final one
as the value. Best if the keys are arranged and sorted and there are no
duplicates.
"""
if len(frame.columns) == 1:
if frame.values.size == 1:
return frame.values[0][0]
return frame.values.squeeze()
grouped = frame.groupby(frame.columns[0])
d = {k: recur_dictify(g.ix[:, 1:]) for k, g in grouped}
return d
|
ebbfa7ff5ccd578481a8466e8e6dd56b0baa93ba
| 555,473 |
def build_trials_response(trials: list):
"""
Build the response representing a list of trials according to the API specification.
Parameters
----------
trials: list
A list of :class:`orion.core.worker.trial.Trial`.
Returns
-------
A JSON-serializable list of trials as defined in the API specification.
"""
response = []
for trial in trials:
response.append({"id": trial.id})
return response
|
2857a9b410f662d1d79ab25ce5dda007ea1e6c8a
| 467,895 |
import html
def _unescape(text: str) -> str:
"""Unescape Html Script."""
if text and isinstance(text, str):
return html.unescape(text)
return text
|
1d833c11d64052cf2c2b5dec33c35d47d904c73e
| 244,514 |
def random_walk(game):
"""
Solves MasterMind by throwing out random codes.
On average takes about 5 time n guesses, where n
is the number of possible solutions of the Mastermind Game.
For 6\ :sup:`4`, this means about 6000 guesses.
Returns the solution translated back into the game colors.
"""
trial = []
i = 0
while trial != game.challenge:
trial = game.create_code()
#print(trial)
i += 1
return [game.colordict[trial[_]] for _ in game.slots], i
|
e108c821abf6ba54400c5170d006bd2948d5f7cd
| 590,624 |
def find_publications_with_tmt(data):
"""
This function looks for publications with text mined terms.
It returns a list of tuples with the publication ID as the first value and the publication source as the second value.
"""
page = []
for publication in data['resultList']['result']:
if publication['hasTextMinedTerms'] == 'Y':
publication = (publication['id'], publication['source'])
page.append(publication)
return page
|
2fca61abc755651eda99039187c6c0a16b7ab981
| 604,599 |
def z(j, k):
"""Calculates equivalence scale. z(2,1) is the equivalence scale for a household with two adults and one child
Parameters
----------
j : float
the number of adults in the household.
k : float
the number of children (under 18 years old).
Returns
-------
type
float
"""
return (j + 0.7* k)**0.75
|
23aa9b21d04e0f3a9f7e2494aa7c9187d80c3944
| 470,292 |
def run_unhovered(var):
"""
Calls the unhovered method on the variable, if it exists.
"""
if var is None:
return None
if isinstance(var, (list, tuple)):
for i in var:
run_unhovered(i)
return
f = getattr(var, "unhovered", None)
if f is not None:
f()
|
d5bf9a1bc1cf25b5bf6ad606a71540f6e3864070
| 506,091 |
import pathlib
from typing import List
def parse_go_prot_map(go_prot_map_file: pathlib.Path) -> List[str]:
"""
Parses the go_prot_map file
Returns the list of uniprot protein ids associated with the GO term
Parameters
----------
go_prot_map_file : pathlib.Path
File containing uniprot protein ids for the GO term
Each file name must contain the GO term and each line must be a prot id
Returns
-------
List[str]
The list of protein ids associated with the GO term
"""
with open(go_prot_map_file) as fid:
prot_list = fid.read().split("\n")
return prot_list
|
0818dfc47d15d4247e1c47b44c46ecb7f042a395
| 303,895 |
def get_compiler_version(ctx):
"""Get the compiler version.
Args:
ctx: Rule context.
Returns:
String: Version string.
"""
return ctx.toolchains["@io_tweag_rules_haskell//haskell:toolchain"].version
|
087b0740545e3c6e862122aaa125387ca6c598f1
| 568,200 |
def get_overtime(row):
"""
Whether or not the game was decided in overtime
"""
return ('OT' in row['Home Score']) or ('OT' in row['Away Score'])
|
c85592541c36f7db557d957b7c7babfd7666e01f
| 59,139 |
from typing import Any
def call_if_func(f: Any, **kwargs) -> Any:
"""Calls function if it's a function
Useful to create objects from functions
if it's an object it just returns the object
"""
return f(**kwargs) if callable(f) else f
|
0926c472b8b476c7a60d23e13dc87033fb7c2c17
| 546,734 |
def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
"""Clean up the test name for display.
By default we strip out the tags in the test because they don't help us
in identifying the test that is run to it's result.
Make it possible to strip out the testscenarios information (not to
be confused with tempest scenarios) however that's often needed to
indentify generated negative tests.
"""
if strip_tags:
tags_start = name.find('[')
tags_end = name.find(']')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
if strip_scenarios:
tags_start = name.find('(')
tags_end = name.find(')')
if tags_start > 0 and tags_end > tags_start:
newname = name[:tags_start]
newname += name[tags_end + 1:]
name = newname
return name
|
46427eeac8855fe8e77e8d2f77c32ccedc51283c
| 579,188 |
def fix_indentation(text):
"""Replace tabs by spaces"""
return text.replace('\t', ' '*4)
|
45012dd80215a54d7ed62923e1ab89e530fdd803
| 365,725 |
def column_not_auto_integer_pk(column):
"""
A check for whether a column is an auto incrementing integer used for a primary key.
"""
return not (
column.autoincrement == "auto"
and column.primary_key
and column.type.python_type is int
)
|
bc0b8dc7dafdc933fd08ea9a40ffbc0e6ce6f11c
| 141,268 |
def date2str(dt, format_str=None):
"""
Convert list of datetime objects to legible strings
Parameters
----------
dt : datetime.datetime
Single or list of datetime object(s)
format_str : string
Timestamp string formatting, default: '%Y-%m-%d %H:%M:%S.%f'.
See datetime.strftime documentation for timestamp string formatting
Returns
-------
time : string
Converted timestamps
"""
if format_str is None:
format_str = '%Y-%m-%d %H:%M:%S.%f'
if not isinstance(dt, list):
dt = [dt]
return [t.strftime(format_str) for t in dt]
|
aee6ed28d3a23c02ae05885543170de0854a7e4a
| 64,572 |
import json
def load(config_path: str):
"""
Loads in json data and returns to user, assuming it has already been validated.
:param config_path: the string path to load up
:return: json data (usually dict or list)
"""
with open(config_path, "r") as handle:
# print('load "{}" --> key "{}"'.format(config, key))
return json.load(handle)
|
786afa8a3ed1d83ee0ea95790211e9ad67fb0c28
| 557,371 |
import math
def degrees_to_radians(deg):
"""Converts degrees to radians.
Args:
deg: degrees
Returns:
radians
"""
return deg * math.pi / 180
|
df10298108e85dd92d962351ad0d763821626759
| 60,198 |
import random
def class_colors(names):
"""
Create a dict with one random BGR color for each
class name
"""
return {name: (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)) for name in names}
|
a91f84aa1e3012fc553744e941abd5e9aeca9074
| 650,121 |
import csv
def read_raw_data(filename):
"""
Reads raw data from filename and returns it as
list of lists.
filename:
File to read from.
returns:
list of lists of data, where each sub list is
is a row, and each sub list item is a row value
"""
with open(filename, 'r') as raw_data_file:
raw_data_reader = csv.reader(raw_data_file)
raw_data = list(raw_data_reader)
return raw_data
|
6a0b335333617156b4bb5c686b7f5d133cb96b67
| 183,199 |
def rare_species(df, frequency_threshold=10, target_column='species_id', filter_out=True):
"""Operation to filter rare species, i.e species with a frequency that is below a threshold."""
target_serie = df[target_column]
# get frequency of specie for every occurrence
counts = target_serie.map(target_serie.value_counts())
mask = counts < frequency_threshold
if filter_out:
mask = ~mask
return df[mask].copy()
|
bc0ab2025dd82cf2d1ca7d5e963caac37b24e4e0
| 292,790 |
def currency(amount):
"""
Returns the dollar amount in US currency format.
"""
if amount >= 0:
return '${:,.2f}'.format(amount)
else:
return '-${:,.2f}'.format(-amount)
|
95fc422eca552a59aa144406d4b7c32cff4a8cff
| 403,475 |
def parse_readelf_line(x):
"""Return the version from a readelf line that looks like:
0x00ec: Rev: 1 Flags: none Index: 8 Cnt: 2 Name: GLIBCXX_3.4.6
"""
return x.split(':')[-1].split('_')[-1].strip()
|
b92b473fd6d0591312c9a163aee94b55706df8c8
| 121,824 |
def format_messages(messages_dict):
""" Formats input messages in Polymer format to native Android format. This means replacing
hyphens with underscores in keys and escaping apostrophes in values. """
formatted_messages = {}
for k,v in messages_dict.items():
formatted_messages[k.replace("-", "_")] = v.replace("'", "\\'")
return formatted_messages
|
00fe6bfb76ce8e146a16a3bc3f108fc2d1277908
| 34,372 |
def get_field(field_name, shapefile, case_sensitive = True):
"""Return the field in shapefile that corresponds to field_name,
None otherwise.
Inputs:
- field_name: string to look for.
- shapefile: where to look for the field.
- case_sensitive: indicates whether the case is relevant when
comparing field names
Output:
- the field name in the shapefile that corresponds to field_name,
None otherwise."""
layer_count = shapefile.GetLayerCount()
for l in range(layer_count):
layer = shapefile.GetLayer(l)
feature_count = layer.GetFeatureCount()
assert feature_count > 0
feature = layer.GetFeature(0)
field_count = feature.GetFieldCount()
for f in range(field_count):
field_defn = feature.GetFieldDefnRef(f)
if case_sensitive:
if field_defn.GetNameRef() == field_name:
return field_defn.GetNameRef()
# if not case sensitive, convert everything to lower case
else:
if field_defn.GetNameRef().lower() == field_name.lower():
return field_defn.GetNameRef()
return None
|
93af96efe4cee128d0b1e47fc0481a4c9f1240fa
| 502,181 |
def _md_fix(text):
"""
sanitize text data that is to be displayed in a markdown code block
"""
return text.replace("```", "``[`][markdown parse fix]")
|
2afcad61f4b29ae14c66e04c39413a9a94ae30f8
| 705,343 |
import re
def deal_with_punctuation(text: str = '',
punctuation_to_collapse_by: str = '',
punctuation_to_explode_by: str = '') -> str:
"""
Removes punctuation from a string
:param text: original text
:param punctuation_to_collapse_by: punctuation marks to strip
:param punctuation_to_explode_by: punctuation marks to replace with spaces
:return: cleaned text
"""
new_text: str = text
# Prioritise exploding first, these are punctuation marks that the user sets
if punctuation_to_explode_by:
pattern_to_explode_by = re.escape(punctuation_to_explode_by)
new_text = re.sub(rf"[{pattern_to_explode_by}]", " ", new_text)
# Then strip the rest
if punctuation_to_collapse_by:
pattern_to_collapse_by = re.escape(punctuation_to_collapse_by)
new_text = re.sub(rf"[{pattern_to_collapse_by}]", "", new_text)
return new_text
|
99b9c8c7de680f35aa806f873c179bb11a676237
| 473,860 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.