content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
from typing import List
def is_tree(pattern: List[str], row: int, col: int) -> bool:
"""Checks if the input pattern has a tree at the input coordinates."""
width = len(pattern[0])
pat_col = col-col//(width-1)*(width-1) # Turn width into an index
if pattern[row][pat_col] == "#":
return True
return False
|
c93771bc51cc4d175979a54033871bc9a98484a9
| 419,330 |
import pickle
def load_pickle(pickle_path):
"""Utility function for loading .pkl pickle files.
Arguments
---------
pickle_path : str
Path to pickle file.
Returns
-------
out : object
Python object loaded from pickle.
"""
with open(pickle_path, "rb") as f:
out = pickle.load(f)
return out
|
436496808be87c92fb6e46bcd4e110984ed64c85
| 684,530 |
def moretrees(a_dict):
"""assumes a_dict is a dictionary of key strings and value ints,
representing country names and number of tree per square kilometer
returns a list of strings, representing the name of countries
with more than 20.000 trees per square kilometer"""
green_contries_list = []
for key, value in a_dict.items():
if value >= 20000:
green_contries_list.append(key)
return green_contries_list
|
c0df584364d4b5361aec48fe84b5075f35a43d47
| 423,717 |
def test_verilog(request):
"""Test Verilog translation rather than python."""
return request.config.option.test_verilog
|
d9723d68cffde9393b5e46c3da044f0ea7a35eda
| 116,495 |
def search_stdout_contains(captured, find_me, contains_me):
"""Search stdout message for find_me, return true if it contains contains_me"""
for msg in captured.out.split("/n"):
if find_me in msg:
print(f"Found '{find_me}' in '{msg}'")
if contains_me in msg:
print(f"Found '{contains_me}' in '{msg}'")
return True
return False
|
06a4592f6fc51683edce74dda608e9050ea3a8be
| 443,329 |
import torch
def active(loss, threshold=1e-5):
"""
Counts how many samples contribute to the loss.
Args:
loss
Returns:
The percentage of active samples.
"""
return (loss > threshold).sum().to(torch.float) / len(loss)
|
d1a8fe96454232cedafa032b131632700815e028
| 235,873 |
def haspropriety(obj, name):
"""Check if propriety `name` was defined in obj."""
attr = getattr(obj, name, None)
return attr and not callable(attr)
|
fc83254933dba169b9725182d15006cc85d1ef45
| 650,823 |
def indent_text(string, indent_level=2):
"""Indent every line of text in a newline-delimited string"""
indented_lines = []
indent_spaces = ' ' * indent_level
for line in string.split('\n'):
indented_lines.append(indent_spaces + line)
return '\n'.join(indented_lines)
|
b3cd63e1578e638624e49e5bbdc2042de83a856c
| 295,568 |
import json
def json_loads(s):
"""Read JSON in a consistent way."""
return json.loads(s)
|
23b350e0cd2b56a3580944bae3e0fa9bdbd9a747
| 170,651 |
def exclusion_information(obs):
"""
Provides a count and percentage of growthcleanr categories by measurement type (param).
Parameters:
obs: a DataFrame, in the format output by setup_individual_obs_df
Returns:
A DataFrame with the counts and percentages
"""
exc = obs.groupby(['param', 'clean_cat']).agg({'id': 'count'}).reset_index().pivot(index="clean_cat", columns='param', values='id')
exc['height percent'] = exc['HEIGHTCM'] / exc['HEIGHTCM'].sum() * 100
exc['weight percent'] = exc['WEIGHTKG'] / exc['WEIGHTKG'].sum() * 100
exc = exc.fillna(0)
exc['total'] = exc['HEIGHTCM'] + exc['WEIGHTKG']
exc = exc[['HEIGHTCM', 'height percent', 'WEIGHTKG', 'weight percent', 'total']]
exc = exc.sort_values('total', ascending=False)
return exc.style.format({'HEIGHTCM': "{:.0f}".format, 'height percent': "{:.2f}%",
'WEIGHTKG': "{:.0f}".format, 'weight percent': "{:.2f}%"})
|
8272dc9fafa8f7cbe67c5e5faa0d8e7f08bcb83f
| 392,808 |
from dateutil.parser import parse
def timestamp(date_time):
"""Convert a date string to number of seconds since 1 Jan 1970 00:00 UTC
date: e.g. "2016-01-27 12:24:06.302724692-08"
"""
t0 = parse("1970-01-01 00:00:00+0000")
t = parse(date_time)
return (t-t0).total_seconds()
|
a736d92f09325252639c0505a894550dd55121f9
| 692,829 |
def a2idx(a):
"""
Tries to convert "a" to an index, returns None on failure.
The result of a2idx() (if not None) can be safely used as an index to
arrays/matrices.
"""
if hasattr(a, "__int__"):
return int(a)
if hasattr(a, "__index__"):
return a.__index__()
|
a0d2216ee1599143daa24540d39cf9aba507aabb
| 116,681 |
def parabolic_fit(Y):
"""Fit a parabola to 3 y values at x=[-1,0,1]; return x0, y0."""
c = Y[1]
a = 0.5 * (Y[0] + Y[2]) - Y[1]
b = 0.5 * (Y[2] - Y[0])
x0 = -b / (2 * a)
y0 = - b**2 / (2 * a) + c
return x0, y0
|
bb1b7734b411a7f92b3b1b8f20a836143af5c4f4
| 303,850 |
def rd(f):
"""Round float to nearest integer"""
return(round(int(f)))
|
8057da2609530bda602556a4104623c736c5da34
| 138,004 |
import torch
def KLD_cost(mu_p, sig_p, mu_q, sig_q):
"""KLD between two gaussian distribution
Parameters
----------
mu_p : :obj:`torch.tensor`
sig_p : :obj:`torch.tensor`
mu_q : :obj:`torch.tensor`
sig_q : :obj:`torch.tensor`
Returns
-------
:obj:`torch.tensor`
KL divergence
"""
KLD = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2)
+ ((mu_p - mu_q) / sig_p).pow(2)).sum()
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
return KLD
|
e191caab3ae0858e2976813ea0257f462d930a7d
| 367,599 |
def get_generated_cols(X_original, X_transformed, to_transform):
"""
Returns a list of the generated/transformed columns.
Arguments:
X_original: df
the original (input) DataFrame.
X_transformed: df
the transformed (current) DataFrame.
to_transform: [str]
a list of columns that were transformed (as in the original DataFrame), commonly self.cols.
Output:
a list of columns that were transformed (as in the current DataFrame).
"""
original_cols = set(X_original.columns)
current_cols = set(X_transformed.columns)
generated_cols = list(current_cols - (original_cols - set(to_transform)))
return generated_cols
|
8332ac6e33e5519bcb83d65b5acf0989d39aff1a
| 104,015 |
def guess_multi_value(value):
"""
Make the best kind of list from `value`. If it's already a list, or tuple,
do nothing. If it's a value with new lines, split. If it's a single value
without new lines, wrap in a list
"""
if isinstance(value, (tuple, list)):
return value
if isinstance(value, str) and '\n' in value:
return [line.strip() for line in value.split('\n')]
return [value]
|
a742a9f37f84bbcc04f550fc3525a8df0fce43b9
| 629,759 |
def filter_images(candidates):
"""Return only images -- i.e. remove audio files etc."""
filtered_candidates = {}
for c in candidates:
if 'imageinfo' not in c or not c['imageinfo']:
print("Missing imageinfo:", c)
continue
if c['imageinfo'][0]['mime'].startswith('image'):
filtered_candidates[c['pageid']] = c
return filtered_candidates
|
8a359ff25dbf376b665eab9642924e874a14beb0
| 479,321 |
def format_like(s: str, escape='\\') -> str:
""" Format and escape a string for a LIKE or ILIKE substring search. """
return '%{}%'.format(s.replace('%', escape+'%').replace('_', escape+'_'))
|
07ab19ab27e07c4ccf5e4c041e1c3934702e0de7
| 282,949 |
def _get_cell(row, name, decimal=False):
"""Retrieves a cell from a row by name."""
for cell in row.cells:
if cell.name == name:
if decimal:
return cell.decimal_value[0]
return cell.string_value[0]
|
16e0c5475121a3d01fdef1ca2f16e3a6e30cccba
| 19,033 |
import json
def load_json(path: str):
"""Loads the json file from 'path' into a list of dicts
Args:
path (str): The path to the json file
Raises:
ValueError: If the provided path does not point to a json file
Returns:
dict: A dict of the json file
"""
if not ".json" in path:
raise ValueError("'path' is not pointing to a json file")
data = None
with open(path) as f:
data = json.loads(f.read())
return data
|
7144f06c393863c49c7645d06ed2b37be731bd04
| 548,450 |
def entity_similarity(K, R):
"""
The similarity metric for chains is based on how many common mentions two chains share (Luo, 2005):
similarity = 2 * |K intersects R|/ (|K| + |R|)
:param K: an entity (set of mentions) from the key entities
:param R: an entity (set of mentions) from the response entities
:return: similarity = 2 * |K intersects R|/ (|K| + |R|)
"""
return 2.0 * len(K.intersection(R)) / (len(K) + len(R))
|
0a859edf55936f832f69c35c9d84a0bd51912ec0
| 383,738 |
from typing import Union
from pathlib import Path
import json
def read_configuration(config_info: Union[str, Path, dict]) -> dict:
"""
read_configuration(config_info)
Reads configuration information and stores it as the instance variable
"configuration".
Args:
config_info (str, Path, dict): Configuration information for the
devices to connect to. Information can either be passed directly
(dict) or can read from a file (str or Path-like).
"""
if not isinstance(config_info, (str, Path, dict)):
raise ValueError('Unsupported type for arguement "config_info"'
' should a str/Path object to a JSON file or a'
' dictionary')
if isinstance(config_info, dict):
return config_info
# read equipment info from file
with open(config_info, 'rb') as file:
configuration = json.load(file)
return configuration
|
7d6c3e2b8eb068d34e85fbaacad6ed41a8896fc9
| 351,432 |
def is_any_descriptor(desc):
"""Returns true if it is an Any descriptor."""
return desc.full_name == "google.protobuf.Any"
|
cda9d6cac5bcf556396570abd30a5e4bd88f9288
| 315,448 |
def make_unpack_map(node):
""" Make a mapping of unpack values for a template instance.
Parameters
----------
node : TemplateInstanceNode
The compiler node for the template instantiation.
Returns
-------
result : dict
A dict mapping unpack name to compiler node for the template
instantiation.
"""
return dict(zip(node.names, node.iternodes()))
|
7e48608fd2ca73001ca4bf3d9d8398a344b64efa
| 55,248 |
import ipaddress
def get_prefix_with_offset(base, offset):
"""Compute prefix from base prefix plus offset.
>>> get_prefix_with_offset(ipaddress.ip_network("10.0.0.0/16"), 0)
IPv4Network('10.0.0.0/16')
>>> get_prefix_with_offset(ipaddress.ip_network("10.0.0.0/16"), 3)
IPv4Network('10.3.0.0/16')
"""
return ipaddress.ip_network("{}/{}".format(
base[0] +
base.num_addresses * offset,
base.prefixlen))
|
b781ba077068a101311ed5b3ae461bccf1bca35f
| 604,308 |
import typing
def get_path(action: typing.Dict[str, typing.Any], path_delim: str) -> typing.List[str]:
"""
Get path from action
:param action: action object
:param path_delim: delimiter to be used to split path into keys.
(Not used when path is list)
:return: list of keys
"""
path = action["path"]
if isinstance(path, str):
keys = [str(key) for key in action["path"].split(path_delim)]
return keys
elif isinstance(path, typing.List) and all(isinstance(key, str) for key in path):
return path
else:
raise TypeError(
"Action {}: path should be str or list of strings".format(action)
)
|
b17a5ee8643b4fbeee07e2733a589f394960b71e
| 131,209 |
def all_awards_are_reviewed(request):
""" checks if all tender awards are reviewed
"""
return all([award.status != "pending" for award in request.validated["tender"].awards])
|
325fb138db00b8696fa424b2a4c95e1378ddd667
| 29,241 |
import requests
def request(apiEndpoint, query):
"""
Return the information from a List network for the queried information.
Parameters
----------
apiEndpoint: str
An url that links to the Lisk net of choice (mainnet, testnet, ...),
see https://lisk.com/documentation/lisk-service/index.html#public-lisk-service-apis.
query: str
The information wanted from the api, ususally composed of an number of queries,
such as transaction and the associated height.
Returns
-------
list:
A list made of the information coming from the chosen network.
Notes
-----
See the Lisk Service documentation for more details at
https://lisk.com/documentation/lisk-service/index.html.
"""
uri = apiEndpoint + query
try:
response = requests.get(uri)
response.raise_for_status()
response = response.json()
if "data" in response:
return response["data"]
else:
print("Empty data entry.")
return None
except requests.exceptions.HTTPError as err:
print("Failed request...")
raise
|
f4e2b830eb8a0705fd5302352981548c00c20743
| 388,644 |
def cap_col(col, q):
"""
Takes in a column
and caps it at the
given quantile
Parameters:
-----------
col : series
A pandas series column
to cap
q : float (ranging 0-1)
The quantile to cap at
Returns
-------
col_capped : series
The column with the capped values
"""
cap_val = col.quantile(q)
return col.apply(lambda x: cap_val if x > cap_val else x)
|
828da352b9d3c2e57f1452b05f3d4ae5c965b71b
| 470,762 |
def cpp_escape_name(name):
"""Escape package name to be CPP macro safe."""
return name.replace("-", "_")
|
57f8bb59d46c6888f4f6d2bc7f851f18ca666fc3
| 604,076 |
from typing import List
from typing import Any
def group_with(predicate, xs: List[Any]):
"""Takes a list and returns a list of lists where each sublist's elements are
all satisfied pairwise comparison according to the provided function.
Only adjacent elements are passed to the comparison function
Original implementation here: https://github.com/slavaGanzin/ramda.py/blob/master/ramda/group_with.py
Args:
predicate ( f(a,b) => bool): A function that takes two subsequent inputs and returns True or Fale
xs: List to group
"""
out = []
is_str = isinstance(xs, str)
group = [xs[0]]
for x in xs[1:]:
if predicate(group[-1], x):
group += [x]
else:
out.append("".join(group) if is_str else group)
group = [x]
out.append("".join(group) if is_str else group)
return out
|
cf54cb6dbdf7d217ec3c40b6690c7207067beafc
| 580,405 |
def _merge_small_dims(shape_to_merge, max_dim):
"""Merge small dimensions.
If there are some small dimensions, we collapse them:
e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if max_dim = 1024
[1, 2, 768, 1, 2048] --> [2, 768, 2048]
Args:
shape_to_merge: Shape to merge small dimensions.
max_dim: Maximal dimension of output shape used in merging.
Returns:
Merged shape.
"""
resulting_shape = []
product = 1
for d in shape_to_merge:
if product * d <= max_dim:
product *= d
else:
if product > 1:
resulting_shape.append(product)
product = d
if product > 1:
resulting_shape.append(product)
return resulting_shape
|
807cdd4682b1387405f1b0f35cb7b1858a763357
| 644,189 |
def expand_basic(state):
"""
Simple function which returns child states by appending an available move to
current state.
"""
assert(len(state) < 9)
# Calculte set difference to get remaining moves.
n = tuple(set(range(9)) - set(state))
# Create tuple of available new states and return to caller.
c = tuple(state + (q,) for q in n)
return c
|
0889a21b043f6f675d133fed6e3c825eb69f4a82
| 6,608 |
def moving_average(time_series, window_size=20, fwd_fill_to_end=0):
"""
Computes a Simple Moving Average (SMA) function on a time series
:param time_series: a pandas time series input containing numerical values
:param window_size: a window size used to compute the SMA
:param fwd_fill_to_end: index from which computation must stop and propagate last value
:return: Simple Moving Average time series
"""
if fwd_fill_to_end <= 0:
sma = time_series.rolling(window=window_size).mean()
else:
sma = time_series.rolling(window=window_size).mean()
sma[-fwd_fill_to_end:] = sma.iloc[-fwd_fill_to_end]
'''
Moving average feature is empty for the first *n* days, where *n* is the window size,
so I'll use some backfill to fill NaN values
'''
sma.fillna(method='backfill', inplace=True)
return sma
|
d71931867c419f306824e8b240a9b1bb3fff2fdd
| 702,645 |
def human2bytes(s):
"""Convert from a human readable string to a size in bytes.
Examples
--------
>>> human2bytes('1 MB')
1048576
>>> human2bytes('1 GB')
1073741824
"""
symbols = ('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB')
ix = -1 if s[-2].isdigit() else -2
letter = s[ix:].strip().upper()
num = s[:ix].strip()
assert letter in symbols
num = float(num)
prefix = {symbols[0]: 1}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter])
|
b5e8343942f45a5b699998bb55db059f4a78980e
| 334,726 |
def clean_attrib(value):
"""Cleans up value string.
Removes any trailing '_0' that randomly show up
Args:
value (str): attrib value to clean
Returns:
str: cleaned attribute value
"""
clean_value = value
if value.endswith("_0"):
clean_value = clean_value.strip('_0')
return clean_value
|
b97bacf819260b9dce08fb0edcf68e0166838be2
| 629,579 |
def get_loc(index, entry, default):
"""Overloaded `get_loc` method from pd.Series.Index.
If entry is in the `index`, returns the integer location.
If not, returns the location of the default value (default must be in `index`)."""
try:
return index.get_loc(entry)
except KeyError:
return index.get_loc(default)
|
26cb98f336f3c78df2208cb46a5d6f26adc86a3b
| 532,149 |
from typing import Dict
from typing import Any
def get_type(property_description: Dict[str, Any]) -> str:
"""
Translate (json)schema property type to something Python understands
:param property_description: from schema
:return: string containing Python type
"""
type_name = property_description['type']
abstract_type = ''
if 'items' in property_description and 'type' in property_description['items']:
abstract_type = property_description['items']['type']
if type_name == 'array':
return f'List[{abstract_type}]'
if type_name == 'object':
return 'Dict[str, Any]'
if type_name == 'string':
return 'str'
if type_name == 'integer':
return 'int'
return 'str'
|
1da49b9007c761ebb0b9cfaadde3c69c741ec1ef
| 240,415 |
import hashlib
def calculate_md5(path, blocksize=65535):
"""
Calculates the MD5 on a file at the given path
"""
hasher = hashlib.md5()
with open(path, 'rb') as fileobj:
buf = fileobj.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = fileobj.read(blocksize)
return hasher.hexdigest()
|
312bd69f43a6834ac821832ef7f5932a24e115e3
| 480,757 |
def legendre_symbol(a, p):
"""
Legendre symbol
Define if a is a quadratic residue modulo odd prime
http://en.wikipedia.org/wiki/Legendre_symbol
"""
ls = pow(a, (p - 1)/2, p)
if ls == p - 1:
return -1
return ls
|
4677c54d41172ecc4b6add122bb612448d1f0653
| 577,717 |
def does_intersect_rect(p, particles, padding, rect, is_3d = False):
"""
Returns true if particle p is sufficiently close or outside the rectangle (in 2d) or cuboid (in 3d)
Parameters
----------
p : list
Coordinates of center and radius of particle [x,y,z,r]
particles : list
List of center + radius of multiple particles. E.g. particles[0] is a list containing coordinates of center and radius.
padding: float
Minimum distance between circle boundaries such that if two circles
rect: list
Coordinates of left-bottom and right-top corner points of rectangle (2d) or cuboid (3d). E.g. [x1 y1, z1, x2, y2, z2]
is_3d: bool
True if we are dealing with cuboid
Returns
-------
bool
True if particle intersects or is near enough to the rectangle
"""
if len(p) < 4: raise Exception('p = {} must have atleast 4 elements'.format(p))
if len(particles) == 0: raise Exception('particles = {} can not be empty'.format(particles))
if padding < 0.: raise Exception('padding = {} can not be negative'.format(padding))
if len(rect) < 6: raise Exception('rect = {} must have 6 elements'.format(rect))
pr = [p[0] - p[3], p[1] - p[3], p[2], p[0] + p[3], p[1] + p[3], p[2]]
if is_3d:
pr[2] -= p[3]
pr[5] += p[3]
if pr[0] < rect[0] + padding or pr[1] < rect[1] + padding or pr[3] > rect[3] - padding or pr[4] > rect[4] - padding:
if is_3d:
if pr[2] < rect[2] + padding or pr[5] > rect[5] - padding:
return True
else:
return True
return False
|
a3300c17c6f9bf3d8f47efac0c94a222f6ee34ef
| 16,638 |
def extract_number_from_money(string):
"""
Extract number from string following this pattern:
$130,321 -> 130321
Also will round to 2 decimal places
"""
try:
trimmed = string.replace(",", "").replace("$", "").replace(" ", "").strip()
number = float(trimmed)
return round(number, 2)
except ValueError:
print("could not parse string")
|
90604b35349a131910ffda53f985ec8d9ee2ab04
| 288,395 |
def is_iterable(item,protect=[str]):
"""
Returns a boolean value describing whether an input
*item* can be considered iterable. *protect* is a
list of types which should be explicitly considered
non-iterable. The default is *[str,unicode].
Types *str* or *unicode* of length 1 are explicitly
protected, as are items of type *dict*. This is
largely a helper function for larger functions
*apply_to_array*, *where_true*, and other iterable
functions.
"""
#protect non iterables from iteration
for type_to_protect in protect:
if isinstance(item,type_to_protect): return False
#specifically protect strings of length one, they will never trip TypeError just by indexing them over and over
if isinstance(item,str) and len(item)==1: return False
elif isinstance(item,type): return False
elif isinstance(item,dict): return False
try: iter(item); return True
except TypeError: return False
|
037cb19772acae94631bf569e9571f09801987f3
| 430,798 |
def assign(target, *sources):
"""
Description
----------
Assign all values from the sources into the target dictioanary.\n
Mutates target dictionary.
Parameters
----------
target : dict - target dictionary to assign values to\n
*sources : dict - dictionaries to pull keys and vlaues from
Returns
----------
dict - dictionary containing keys and values from target and *sources
Example
----------
>>> assign({}, {'a': 1, 'b': 2})
-> {'a': 1, 'b': 2}
"""
if not isinstance(target, dict):
raise TypeError("param 'target' must be a dict")
for source in sources:
if not isinstance(source, dict):
raise TypeError("param 'target' must be a dict")
for dct in sources:
for key in dct:
target[key] = dct[key]
return target
|
a375e694ee6edf9cd83304695e5014280ef6fa54
| 69,930 |
def split_data(data, ratio=0.7):
"""Splits data into test set and training set.
:param data: The object containing data.
:type data: :class:`pandas.DataFrame`.
:param ratio: What portion of data to include in the training set
and the test set. :obj:`0.5` means that the data will be
distributed equaly.
:type ratio: float
"""
threshold = int(len(data) * ratio)
return data[:threshold], data[threshold:]
|
1bbff7080e913b8b5dedfd94f6b7e3f589b62dc8
| 357,461 |
import re
def is_valid_password(password):
"""
Return if a password is valid or not.
A valid password should have its length greater than 8, contain at least one
digit, one uppercase letter, one lowercase letter and one special character.
"""
try:
is_valid = True
is_valid &= len(password) >= 8
is_valid &= re.search(r'\d', password) is not None
is_valid &= re.search(r'[A-Z]', password) is not None
is_valid &= re.search(r'[a-z]', password) is not None
is_valid &= re.search(r'\W?_', password) is not None
return is_valid
except TypeError:
raise TypeError('Password should be string, not %s' % type(password))
|
fccb67a407661b487d588e3ddb12b9ac72c6a59e
| 606,437 |
def sequence_accuracy_score(y_true, y_pred):
"""
Return sequence accuracy score. Match is counted only when the two sequences are equal.
:param y_true: true labels
:param y_pred: predicted labels
:return: sequence accuracy as a fraction of the sample (1 is prefect, 0 is all incorrect)
"""
total = len(y_true)
matches = sum(1 for yseq_true, yseq_pred in zip(y_true, y_pred) if list(yseq_true) == list(yseq_pred))
return matches / total
|
0c6bfb851dd101258f12df8b9efece543b8280d0
| 163,859 |
import itertools
def group_by(iterable, iteratee):
"""
Splits an iterable into sets, grouped by the result of running each value through iteratee.
If iteratee is a string instead of a function, groups by the property named by iteratee on each of the values.
params: iterable, iteratee
iterable -> a list, tuple, iterator, generator
iteratee -> a function or a lambda, taking single value as input and returning a transformed value on which iterable will be grouped
Returns a dictionary
Examples:
>>> _.group_by([1.3, 2.1, 2.4], lambda x:math.floor(x))
>>> {1: [1.3], 2: [2.1, 2.4]}
"""
grouped_iterators = itertools.groupby(iterable, iteratee)
return dict((x,list(y)) for x,y in grouped_iterators)
|
79371a848d3ff290765e34df45f56d65aa74170e
| 582,228 |
from pathlib import Path
def curr_file_path() -> Path:
"""Get cuurent file path."""
return Path(__file__).absolute()
|
2820bf09cf5027b374c8d1622055eb18d596b87d
| 425,106 |
def hour_to_24(hour, meridian):
"""
Converts a 12-hour representation to a 24-hour representation.
e.g.
12, "AM" => 0
6, "AM" => 6
12, "PM" => 12
3, "PM" => 15
11, "PM" => 23
"""
# first, if it's 12 whatever, subtract the 12 because it's actually 0
if hour == 12:
hour -= 12
# if it's PM, add 12
if meridian == "PM":
hour += 12
return hour
|
0e4fa99e86d7f36841bec0254abfa49f6be372fa
| 588,204 |
def config(context, key):
"""
Get a value from the config by key
"""
site = context['__CACTUS_SITE__']
result = site.config.get(key)
if result:
return result
return ""
|
155aac652a87d958a4dd662ce91faaaa69be8bbe
| 291,888 |
def handle_storage_class(vol):
"""
vol: dict (send from the frontend)
If the fronend sent the special values `{none}` or `{empty}` then the
backend will need to set the corresponding storage_class value that the
python client expects.
"""
if "class" not in vol:
return None
if vol["class"] == "{none}":
return ""
if vol["class"] == "{empty}":
return None
else:
return vol["class"]
|
a2747b717c6b83bb1128f1d5e9d7696dd8deda19
| 697 |
def default_account(web3):
"""Returns the default account which is used to deploy contracts"""
return web3.eth.defaultAccount
|
f3b5e684fd050e04ff46b508ca516b5047914cd4
| 661,632 |
def get_curve_color(buffer):
"""
Return prefered color for plots for any buffer
Parameters
----------
buffer: str
Name of buffer
Returns
-------
str
color name
"""
color = {'NNO': 'green',
'QFM': 'darkorange',
'IW': 'black',
'HM': 'red',
'CoCoO': 'blue',
'ReReO': 'magenta',
'Graphite': 'gray',
'QIF': 'mediumaquamarine',
'SiSiO2': 'purple',
'CrCr2O3': 'teal',
'MoMoO2': 'olive',
'CaCaO': 'peru',
'AlAl2O3': 'chartreuse',
'KK2O': 'deeppink',
'MgMgO': 'maroon',
'MnMnO': 'midnightblue',
'NaNa2O': 'dodgerblue',
'TiTiO2': 'orangered'}
return color[buffer]
|
8b27f7c19da0e24139e3de357058cce71772d90c
| 94,113 |
def handleNum(node, line):
""" Converts the Num node to represent the type of the value (Int, Float). """
return node.n.__class__.__name__.capitalize()
|
b0beb5a6d48696c40dcfdd11e305433f49e40965
| 528,276 |
import random
def _rand_range(start: int, end: int, seed: int | None = None):
"""Generates a random number in a range with an optional seed value.
Args:
start: The starting value
end: THe ending value
seed: Optional seed
Returns:
A random integer between start and end.
"""
if seed:
random.seed(seed)
return random.randrange(start, end)
|
ddec6f754396debc353d118a0f7294439807a53d
| 342,258 |
import heapq
def heap_sort(arr):
""" Sorts list arr using a heap. """
heapq.heapify(arr)
out = []
while True:
try:
min_value = heapq.heappop(arr)
out.append(min_value)
except IndexError as err:
break
return out
|
6c549d43269fb05887e1eee827925ec294a7bca5
| 296,895 |
import re
def get_urls(inputfiles):
"""
This function takes as input the list of files containing the hostnames
and normalizes the format of the hostnames in order to be able to perform
valid HTTP/HTTPS requests.
Args:
inputfiles -- list of inputfiles
Returns:
urls -- list of normalized URLs which can be queries
"""
urls = []
scheme_rgx = re.compile(r'^https?://')
for ifile in inputfiles:
urls.append(ifile.read().splitlines())
urls = set([n for l in urls for n in l])
urls = list(filter(None, urls))
for i in range(len(urls)):
if not scheme_rgx.match(urls[i]):
urls[i] = 'http://' + urls[i]
return urls
|
abf1c0fdb533f763e4cd4b0bcf0a2a74f436a051
| 102,346 |
def parse(f, comment="#"):
"""
Parse a file in ``.fasta`` format.
:param f: Input file object
:type f: _io.TextIOWrapper
:param comment: Character used for comments
:type comment: str
:return: names, sequence
:rtype: list[str], list[str]
"""
starter = ">"
empty = ""
if "b" in f.mode:
comment = b"#"
starter = b">"
empty = b""
names = []
sequences = []
name = None
sequence = []
for line in f:
if line.startswith(comment):
continue
line = line.strip()
if line.startswith(starter):
if name is not None:
names.append(name)
sequences.append(empty.join(sequence))
name = line[1:]
sequence = []
else:
sequence.append(line.upper())
if name is not None:
names.append(name)
sequences.append(empty.join(sequence))
return names, sequences
|
fa0da8089656799f669ff1e7472f5f2ced8c0f54
| 440,541 |
from pathlib import Path
from typing import Tuple
def check_for_file_overwrite(file: Path, default_yes: bool = True) -> Tuple[bool, bool]:
"""
Check if a file exists and prompt the user for overwrite
"""
exists = False
abort = False
if file.exists() is True:
abort = True
exists = True
resp: str
if default_yes is True:
resp = input(f"The file {file} already exists. Overwrite? [Y/n)")
if resp in ["", "Y", "y", "yes"]:
abort = False
else:
resp = input(f"The file {file} already exists. Overwrite? [y/N)")
if resp in ["Y", "y", "yes"]:
abort = False
return abort, exists
|
be982a72c2beafa2bed720cd85d73102dee6f230
| 117,103 |
from typing import Dict
import re
def _load_env(file_handler) -> Dict:
"""Deserializes an environment file like .env-devel and
returns a key-value map of the environment
Analogous to json.load
"""
PATTERN_ENVIRON_EQUAL = re.compile(r"^(\w+)=(.*)$")
# Works even for `POSTGRES_EXPORTER_DATA_SOURCE_NAME=postgresql://simcore:simcore@postgres:5432/simcoredb?sslmode=disable`
environ = {}
for line in file_handler:
m = PATTERN_ENVIRON_EQUAL.match(line)
if m:
key, value = m.groups()
environ[key] = str(value)
return environ
|
cd08b7dce9ed6d20d0fbf507f480771c0e37139e
| 161,073 |
def _generate_site_config(dns_name, site_domain, devstack=False):
""" Generate the site configuration for a given site """
if devstack:
lms_url_fmt = "{domain}-{dns_name}.e2e.devstack"
else:
lms_url_fmt = "{domain}-{dns_name}.sandbox.edx.org"
return {
"lms_url": lms_url_fmt.format(domain=site_domain, dns_name=dns_name),
"platform_name": f"{site_domain}-{dns_name}"
}
|
eb3d1b1a69a08c43ee75f85faf6f6d07b3499900
| 497,888 |
def GenerateFirefoxCommandLine(firefox_path, profile_dir, url):
"""Generates the command line for a process to run Firefox
Args:
firefox_path: String containing the path to the firefox exe to use
profile_dir: String containing the directory of the profile to run Firefox in
url: String containing url to start with.
"""
profile_arg = ''
if profile_dir:
profile_dir = profile_dir.replace('\\', '\\\\\\')
profile_arg = '-profile %s' % profile_dir
cmd = '%s %s %s' % (firefox_path,
profile_arg,
url)
return cmd
|
0fe18e2ecd69cfb75b17e4c7e0f0dedd089a3d2d
| 609,164 |
def binary(x: int, pre: str='0b', length: int=8):
"""
Return the binary representation of integer x
Input:
x: an integer of any size
pre: the prefix for the output string, default 0b
length: length of the output in binary if its representation has smaller length
default is 8 i,e, 2**8=256 int, a byte
Return:
The binary representation of integer x with a minimum lenght of "length"
padded with trailing 0s
"""
return '{0}{{:{1}>{2}}}'.format(pre, 0, length).format(bin(x)[2:])
|
287e5bb87f31b71ad7ccd1cf65fab729794eeef4
| 25,262 |
def is_left(p0, p1, p2):
"""
Input: three points P0, P1, and P2
Return: > 0 for P2 left of the line through P0 to P1
= 0 for P2 on the line
< 0 for P2 right of the line
"""
p0x, p0y = p0
p1x, p1y = p1
p2x, p2y = p2
# logging.log(level=GEOM_ALG_INFO,
# msg=(p1x - p0x) * (p2y - p0y) - (p2x - p0x) * (p1y - p0y))
return (p1x - p0x) * (p2y - p0y) - (p2x - p0x) * (p1y - p0y)
|
ac84166f613735ebca8b0632c2dc0f4bfd4f310b
| 399,105 |
import re
def format_missing_carats_units(s: str) -> str:
"""Add missing carats to a malformed unit string.
Will format a string 'm-1' to 'm^-1'.
Parameters
----------
s: str
Unit string.
Returns
-------
str
Formatted unit string.
"""
where = [m.start() for m in re.finditer("[-+][0-9]", s)]
for count, i in enumerate(where):
s = s[: i + count] + "^" + s[i + count :]
where2 = [m.start() for m in re.finditer("[a-z ][0-9]", s)]
for count, i in enumerate(where2):
s = s[: i + count + 1] + "^" + s[i + count + 1 :]
return s
|
321a3503588a2ac0da324fab67ab8244680532d6
| 173,220 |
def offset(targets,yof,xof):
"""
Offset targets(x,y) found in a sub-image
"""
ret = []
for t in targets:
ofs = (t[0]+yof,t[1]+xof,t[2],t[3])
ret.append(ofs)
return ret
|
844bf9eaa0d71f5e0de45e9b0f9a07ca26f7938d
| 297,215 |
def get_resize_dimensions(original_size, dimensions):
"""
Gets the ideal resize dimensions given the original size.
:param original_size: Original size.
:param dimensions: Default target dimensions.
:return: Ideal dimensions.
"""
dim_x, dim_y = dimensions
img_x, img_y = original_size
if img_x >= img_y:
return int(dim_x), int(img_y * (dim_x / (img_x * 1.0)))
else:
return int(img_x * (dim_y / (img_y * 1.0))), int(dim_y)
|
fdcebf6e571ea7306c3f951749a9745343e5f73f
| 297,574 |
def strip(pre, s):
"""Strip prefix 'pre' if present.
"""
if s.startswith(pre):
return s[len(pre):]
else:
return s
|
4fd3a227d2404ed1fdcdfcbea628aad1170cc278
| 654,844 |
def is_odd(nr):
"""Confirms that a number is odd
Args:
nr (float): the number
Returns:
bool: True if the number is odd, False otherwise
"""
if nr % 2 == 0:
return False
else:
return True
|
5ca2f0d69f9472a3810c956cfce91dcb6ccb315a
| 580,179 |
def get_images(directory):
"""
Gets all PNG, JPG, GIF, and BMP format files at path
:param Path directory: location to search for image files
"""
files = []
patterns = ('*.png', '*.jpg', '*.gif', '*.bmp')
for pattern in patterns:
files.extend(directory.glob(pattern))
return files
|
e6c599814d5d31de5a64e502d88f40996d49888f
| 45,841 |
def tick2milisecond(tempo: int, ticks_per_beat: int) -> float:
"""calculate how many miliseconds are in one tick"""
return tempo / (1000 * ticks_per_beat)
|
6de882bfebcc17ff4964cbe4ba986dc3eff85663
| 366,842 |
def _ci_to_hgvs_coord(s, e):
""" Convert continuous interbase (right-open) coordinates (..,-2,-1,0,1,..) to
discontinuous HGVS coordinates (..,-2,-1,1,2,..)
"""
def _ci_to_hgvs(c):
return c + 1 if c >= 0 else c
return (None if s is None else _ci_to_hgvs(s),
None if e is None else _ci_to_hgvs(e) - 1)
|
a25cee5f587b59fd52779f6eeba363f3ddda77b0
| 686,444 |
def own_commit(repo, commit):
"""Return true iff commit appears to be from the repo owner."""
return not commit.author or commit.author == repo.owner or commit.author.login == 'web-flow'
|
21b33d0c8b62d5e5e546ac1b5067754188747588
| 601,082 |
def getMTevents(MTs):
"""
This function takes in the `BSE_RLOF` output category from COMPAS, and returns the information
on the Mass Transfer (MT) events that happen for each seed. The events do not have to be in order,
either chronologically or by seed, this function will reorder them as required.
OUT:
tuple of (returnedSeeds, returnedEvents, returnedTimes)
returnedSeeds (list): ordered list of the unique seeds in the MTs file
returnedEvents (list): list of sublists, where each sublist contains all the MT events for a given seed.
MT event tuples take the form :
(stellarTypePrimary, stellarTypeSecondary, isRlof1, isRlof2, isCEE)
returnTimes (list): is a list of sublists of times of each of the MT events
"""
mtSeeds = MTs['SEED'][()]
mtTimes = MTs['Time<MT'][()]
mtPrimaryStype = MTs['Stellar_Type(1)<MT'][()]
mtSecondaryStype = MTs['Stellar_Type(2)<MT'][()]
mtIsRlof1 = MTs['RLOF(1)>MT'][()] == 1
mtIsRlof2 = MTs['RLOF(2)>MT'][()] == 1
mtIsCEE = MTs['CEE>MT'][()] == 1
# We want the return arrays sorted by seed, so sort here.
mtSeedsInds = mtSeeds.argsort()
mtSeeds = mtSeeds[mtSeedsInds]
mtTimes = mtTimes[mtSeedsInds]
mtPrimaryStype = mtPrimaryStype[mtSeedsInds]
mtSecondaryStype = mtSecondaryStype[mtSeedsInds]
mtIsRlof1 = mtIsRlof1[mtSeedsInds]
mtIsRlof2 = mtIsRlof2[mtSeedsInds]
mtIsCEE = mtIsCEE[mtSeedsInds]
# Process the MT events
returnedSeeds = [] # array of seeds - will only contain seeds that have MT events
returnedEvents = [] # array of MT events for each seed in returnedSeeds
returnedTimes = [] # array of times for each event in returnedEvents (for each seed in returnedSeeds)
lastSeed = -1 # initialize most recently processed seed
for seedIndex, thisSeed in enumerate(mtSeeds): # iterate over all RLOF file entries
thisTime = mtTimes[seedIndex] # time for this RLOF file entry
thisEvent = (mtPrimaryStype[seedIndex], mtSecondaryStype[seedIndex],
mtIsRlof1[seedIndex], mtIsRlof2[seedIndex], mtIsCEE[seedIndex]) # construct event tuple
# If this is an entirely new seed:
if thisSeed != lastSeed: # same seed as last seed processed?
returnedSeeds.append(thisSeed) # no - new seed, record it
returnedTimes.append([thisTime]) # initialize the list of event times for this seed
returnedEvents.append([thisEvent]) # initialize the list of events for this seed
lastSeed = thisSeed # update the latest seed
# Add event, if it is not a duplicate
try:
eventIndex = returnedEvents[-1].index(thisEvent) # find eventIndex of this particular event tuple in the array of events for this seed
if thisTime > returnedTimes[-1][eventIndex]: # ^ if event is not a duplicate, this will throw a ValueError
returnedTimes[-1][eventIndex] = thisTime # if event is duplicate, update time to the later of the duplicates
except ValueError: # event is not a duplicate:
returnedEvents[-1].append(thisEvent) # record new event tuple for this seed
returnedTimes[-1].append(thisTime) # record new event time for this seed
return returnedSeeds, returnedEvents, returnedTimes # see above for description
|
3e7bf450cab0dfc17fc3cc8c85e4b7c15d67b82e
| 244,613 |
def setrep(s):
"""Returns a string representation of a set of type strings.
s: set of strings
Returns: string
"""
rep = ', '.join(s)
if len(s) == 1:
return rep
else:
return '(' + rep + ')'
return
|
fd07214daa97c9c01949bbf3f576fd1573c65335
| 389,917 |
def getter(name, key=None):
"""
Creates a read-only property for the attribute name *name*. If a *key*
function is provided, it can be used to post-process the value of the
attribute.
"""
if not key:
key = lambda x: x
def wrapper(self):
return key(getattr(self, name))
wrapper.__name__ = wrapper.__qualname__ = name
return property(wrapper)
|
1e0e7a500a7fbe55c17b8ba76765c9c7611cc1ba
| 446,431 |
def _GetTopN(objects, n):
"""Returns top n objects with maximum count.
Args:
objects: any object that has count property
n: number of top elements to return
Returns:
top N elements if objects size is greater than N otherwise the map elements
in a sorted order.
"""
return sorted(objects, key=lambda o: o.count, reverse=True)[:n]
|
d5739323b7c6027fd241451515b6469f40bb157b
| 526,681 |
from typing import List
from typing import Dict
from typing import Any
def prepare_human_readable_dict_for_pdns(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
Prepare human-readable dictionary for passive DNS command.
:param results: passive DNS details.
:return: human-readable dict
"""
return [{
'First (GMT)': result.get('firstSeen', ''),
'Last (GMT)': result.get('lastSeen', ''),
'Source': ', '.join(result.get('source', [])),
'Value': result.get('value', ''),
'Collected (GMT)': result.get('collected', ''),
'Record Type': result.get('recordType', ''),
'Resolve': result.get('resolve', ''),
'Resolve Type': result.get('resolveType', ''),
'Record Hash': result.get('recordHash', '')
} for result in results]
|
a1d399fefe23cf4f642289e2745292d2cdce406f
| 157,795 |
def merge_options(global_conf, local_conf, key, *, keep_list_order=False):
"""Merge the conf using override: local conf is prioritary over global.
If keep_list_order is True, list are merged global+local. Might have duplicate.
If false, duplication are removed.
"""
global_keyed_conf = global_conf.get(key) # Could be None
local_keyed_conf = local_conf.get(key) # Could be None
if global_keyed_conf is None or local_keyed_conf is None:
return global_keyed_conf or local_keyed_conf
if isinstance(global_keyed_conf, list):
if keep_list_order:
options = list(global_keyed_conf)
options += local_keyed_conf
return options
options = set(global_keyed_conf)
else:
options = dict(global_keyed_conf)
options.update(local_keyed_conf)
return options
|
5bd6f1f1bece1bee8f05ada5c0b5f32a0a22f931
| 568,236 |
from typing import Literal
from typing import List
def get_foot_marker(foot: Literal["left", "right"]) -> List[str]:
"""Get the names of all markers that are attached ot a foot (left or right)"""
sensors = ["{}_fcc", "{}_toe", "{}_fm5", "{}_fm1"]
return [s.format(foot[0]) for s in sensors]
|
518fbb3f68cbf8622b2bf1fa85f9ecae8008c456
| 701,401 |
def dictHasExact(dict, keys):
""" Verify if dict has exactly what expected """
if len(dict) != len(keys):
return False
for i in keys:
if dict.get(i) is None:
return False
return True
|
a42079f72dd1664136eb7512a1b6a37e20f96c09
| 176,531 |
def time_diff(end, start):
"""Take two times formatted HHMM and return end-start, in hours."""
end = 60 * int(end[:2]) + int(end[2:])
start = 60 * int(start[:2]) + int(start[2:])
return float(end-start) / 60
|
c0d624760e128d48c30268ec81ecd0ae572996a5
| 619,264 |
def append(iterable, data):
"""Appends data to the iterable.
:param iterable: collection of data to transform
:type iterable: list
:param data: any type of data to be appended
"""
iterable.append(data)
return iterable
|
f9a1fecb05baa54a2b8b023d554b56f24a9a1f9e
| 265,638 |
def resolve_job_range(ids):
"""Resolve ranges from a list of ids. Given list of id strings
can contain ranges separated with '-'. For example, '1-10' will
result in a range from 1..10.
:param ids: string or list of strings of ids
:type ids: string or list of strings
:returns: resolved list of ids
:rtype: list of integers
:raises ValueError: if on of the ids could not be converted to a valid,
positive id
"""
if not isinstance(ids, (list, tuple)):
ids = [ids]
r = []
def to_id(i):
try:
v = int(i)
if v < 0:
raise ValueError("Job ids have to be >= 0!")
return v
except:
raise ValueError("Unable to convert '%s' to a job id. A valid"
" job id has to be a number" % i)
for i in ids:
s = i.split("-")
if len(s) == 1:
r.append(to_id(i))
elif len(s) == 2:
start = to_id(s[0])
end = to_id(s[1])
start, end = min(start, end), max(start, end)
r.extend(range(start, end + 1))
else:
raise ValueError("Unable to guess a job range from %s" % i)
return r
|
2497e646fcc8fda113f485247115dc94f6adb5de
| 251,847 |
def coord_to_tuple(coordinates):
"""Transform coordinates to a 2 tuple."""
if type(coordinates) == str:
center = tuple(map(float, coordinates.split(',')))
elif type(coordinates) == dict:
center = tuple(coordinates.values())
elif type(coordinates) == tuple:
center = coordinates
elif (type(coordinates) == list) and (len(coordinates) == 2):
center = tuple(coordinates)
else:
print('invalid coordinate type')
center = None
return center
|
8889e5e72b1fcaa58baa4eb4ee48e3c5d011368e
| 287,986 |
def find_concat_dim(da, possible_concat_dims):
""" look for available dimensions in dataaray and pick the one
from a list of candidates
PARAMETERS
----------
da : xarray.DataArray
xmitgcm llc data array
possible_concat_dims : list
list of potential dims
RETURNS
-------
out : str
dimension on which to concatenate
"""
out = None
for d in possible_concat_dims:
if d in da.dims:
out = d
return out
|
fa6bb3bf6774cefd1ea05fa75b9d80ef9fd042bb
| 188,616 |
def _get_raster_extent(src):
"""
extract projected extent from a raster dataset
(min_x, max_x, min_y, max_y)
Parameters
----------
src : gdal raster
Returns
-------
(min_x, max_x, min_y, max_y)
"""
ulx, xres, xskew, uly, yskew, yres = src.GetGeoTransform()
lrx = ulx + (src.RasterXSize * xres)
lry = uly + (src.RasterYSize * yres)
return ulx, lrx, lry, uly
|
49ed0b3c583cbfa5b9ecbc96d94aec42aeba3a32
| 2,014 |
def next_multiple(x: int, k: int = 512) -> int:
"""Calculate x's closest higher multiple of base k."""
if x % k:
x = x + (k - x % k)
return x
|
fbf8cf548851d0c57867292f9ddcfc33de9b03c0
| 696,350 |
def _filter_single_by_max(move_data, **kwargs):
"""
Filters from a dataframe rows with features below value.
Parameters
----------
move_data : dataframe
Dataframe to be filtered.
**kwargs : arguments
- arg1 : feature
- arg2 : value
Returns
-------
dataframe
Filtered dataframe.
"""
return move_data[move_data[kwargs['arg1']] <= kwargs['arg2']]
|
8021f4e474c50598e9a3a6b20b7e421d4ac3a2be
| 540,197 |
def int_to_roman(n):
"""
Convert an integer to its standard Roman Numeral representation
"""
V = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]
S = ["M", "CM", "D", "CD", "C", "XC", "L", "XL", "X", "IX", "V", "IV", "I"]
out = ""
for val,sym in zip(V,S):
while n >= val:
out += sym
n -= val
return out
|
3127e7d7097758872f559ef918d790f282320669
| 683,396 |
def color_diff_par(pair):
"""
Find the difference between two colors. This is part of `closest_color_parallel`
below, but multiprocessing cannot pickle nested functions.
Parameters
----------
pair : (rgb, pixel)
Check how close `rgb` is to `pixel`.
Returns
-------
int
The difference between `rgb` and `pixel`.
"""
rgb, x = pair
r, g, b = rgb
return abs(r - x[0]) ** 2 + abs(g - x[1]) ** 2 + abs(b - x[2]) ** 2
|
cb982ebe270a7e9311ad7d89a335b62abeb93f32
| 504,290 |
def output_test(filename: str, pattern: str) -> bool: # pylint: disable=unused-argument
"""Test the output.
Always passes if ``pattern == "pass"``. Otherwise, fails.
"""
return pattern == "pass"
|
08306ad22970df3aaf475742ecd270a571b52031
| 465,560 |
import re
def splitExpression(expression):
"""
Parse a license expression into its constituent identifiers.
Arguments:
- expression: SPDX license expression
Returns: array of split identifiers
"""
# remove parens and plus sign
e2 = re.sub(r'\(|\)|\+', "", expression, flags=re.IGNORECASE)
# remove word operators, ignoring case, leaving a blank space
e3 = re.sub(r' AND | OR | WITH ', " ", e2, flags=re.IGNORECASE)
# and split on space
e4 = e3.split(" ")
return sorted(e4)
|
322329e7d3248e4589c52584f84a2be435471cd8
| 485,417 |
from typing import List
from typing import Any
def del_double_elements(l_elements: List[Any]) -> List[Any]:
"""get deduplicated list, does NOT keep Order !
>>> del_double_elements([])
[]
>>> sorted(del_double_elements(['c','b','a']))
['a', 'b', 'c']
>>> sorted(del_double_elements(['b','a','c','b','a']))
['a', 'b', 'c']
>>> sorted(del_double_elements(['x','x','x','y','y']))
['x', 'y']
"""
if not l_elements:
return l_elements
return list(set(l_elements))
|
58e099d9648cb80384b1eee549605abcb805b4c2
| 301,045 |
import json
import importlib
def load_mesh(file_name):
"""Load discretize mesh saved to json file.
For a discretize mesh that has been converted to dictionary and
written to a json file, the function **load_mesh** loads the
json file and reconstructs the mesh object.
Parameters
----------
file_name : str
Name of the json file being read in. Contains all information required to
reconstruct the mesh.
Returns
-------
discretize.base.BaseMesh
A discretize mesh defined by the class and parameters stored in the json file
"""
with open(file_name, "r") as outfile:
jsondict = json.load(outfile)
module_name = jsondict.pop(
"__module__", "discretize"
) # default to loading from discretize
class_name = jsondict.pop("__class__")
mod = importlib.import_module(module_name)
cls = getattr(mod, class_name)
if "_n" in jsondict:
jsondict["shape_cells"] = jsondict.pop(
"_n"
) # need to catch this old _n property here
data = cls(**jsondict)
return data
|
fde5d9adb80c8873433b4753e5d961cd61f79582
| 179,542 |
def cut_neighbor_sequences(seq_s, flanking_i):
"""
cut the flanking sequences
:param seq_s: string, seq
:param flanking_i: size of flanking seq
:return: strings, cut (start), cut (the rest), cut (last)
"""
assert type(seq_s) is str
return seq_s[0:flanking_i], seq_s[flanking_i:-flanking_i], seq_s[-flanking_i:]
|
65f9b1fb45c46e0d968533ff0e81099e0526f571
| 26,510 |
def create_clip_in_selected_slot(creator, song, clip_length=None):
"""
Create a new clip in the selected slot of if none exists, using a
given creator object. Fires it if the song is playing and
displays it in the detail view.
"""
selected_slot = song.view.highlighted_clip_slot
if creator and selected_slot and not selected_slot.has_clip:
creator.create(selected_slot, clip_length, legato_launch=True)
song.view.detail_clip = selected_slot.clip
return selected_slot.clip
|
675b793b346aa40744b36ea30f6828be06bd49be
| 101,143 |
def is_python(filename):
# type: (str) -> bool
"""See if this is a Python file.
Do *not* import the source code.
"""
if not filename.endswith(".py"):
return False
return True
|
9ef454875db37e5a3c5cefa8c6f31fcc49beca44
| 517,661 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.