content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def shorten(s, length):
"""
Shorten `s` to `length` by appending it with "...". If `s` is small,
return the same string
>>> shorten("very long string", 9)
"very l..."
>>> shorten("small", 10)
"small"
"""
if len(s) > length:
return s[:length - 3] + '...'
else:
return s
|
962d606da1c559da716406656fc32e148676c556
| 80,114 |
def multiply_values(dictionary: dict, num: int) -> dict:
"""Multiplies each value in `dictionary` by `num`
Args:
dictionary (dict): subject dictionary
num (int): multiplier
Returns:
dict: mapping of keys to values multiplied by multiplier
"""
return (
{key: value * num for key, value in dictionary.items()}
if dictionary is not None
else {}
)
|
16eb87d60da64d648113858ba5cb4308137e0a14
| 5,488 |
import json
def format_report(jsn):
""" Given a JSON report, return a nicely formatted (i.e. with indentation) string.
This should handle invalid JSON (as the JSON comes from the browser/user).
We trust that Python's json library is secure, but if the JSON is invalid then we still
want to be able to display it, rather than tripping up on a ValueError.
"""
if isinstance(jsn, bytes):
jsn = jsn.decode('utf-8')
try:
return json.dumps(json.loads(jsn), indent=4, sort_keys=True, separators=(',', ': '))
except ValueError:
return "Invalid JSON. Raw dump is below.\n\n" + jsn
|
811c1d0a490ff48463abd40800330dafeffd9fcd
| 689,969 |
from functools import reduce
def fuzzy_or(values):
"""
Applies fuzzy-or to a list of values
>>> fuzzy_or([0.5])
0.5
>>> fuzzy_or([0.5, 0.5])
0.75
>>> fuzzy_or([0.5, 0.5, 0.5])
0.875
"""
if min(values) < 0 or max(values) > 1:
raise ValueError("fuzzy_or expects values in [0,1]")
return reduce(lambda x, y: 1 - (1 - x) * (1 - y), values)
|
0015bb1d13502d55c485ce665606682c9254e578
| 56,969 |
async def index():
""" returns index page """
return "see /docs for the api"
|
a50afb3d6d0f64fd38d5f9f227b2356e5fe753b3
| 649,501 |
import json
def is_bootstrap(event):
"""
Determines if the message sent in was for an bootstrap action -
Bootstrap (success) events are always just strings so loading it as json should
raise a ValueError
"""
try:
message = json.loads(event['Records'][0]['Sns']['Message'])
if isinstance(message, dict):
if message.get('Error'):
return True
return False
except ValueError:
return True
|
40d66c461428c018420f7a2873dce2b08a60bbb3
| 680,459 |
from typing import Union
from pathlib import Path
def is_dir(dir_path: Union[Path, str]) -> bool:
"""If this path is a directory, return True."""
return Path(dir_path).is_dir()
|
6fd5edecc66fe92591ac7fb97d149b6f4bd14890
| 50,974 |
def problem25(nr_of_digits):
"""Problem 25 - 1000-digit Fibonacci number"""
n_1 = 1
n_2 = 1
seq = 3
while True:
n = n_1 + n_2
if (n / 10 ** (nr_of_digits-1)) >= 1:
break
n_2 = n_1
n_1 = n
seq += 1
return seq
|
ebc5fac503d4c00d52fe555510cb8d07460a8e3a
| 403,868 |
from typing import Dict
from pathlib import Path
import json
def load_json(filename: str) -> Dict:
"""Read JSON file from metadata folder
Args:
filename: Name of metadata file
Returns:
dict: Dictionary of data
"""
filepath = (
Path(__file__).resolve().parent.parent.joinpath("metadata").joinpath(filename)
)
metadata: Dict = json.loads(filepath.read_text())
return metadata
|
37d9f08344cf2a544c12fef58992d781556a9efd
| 707,369 |
from typing import List
from typing import Tuple
def _format_list(input_list: List, evals_per_gen: int) -> Tuple:
"""
_format_list() takes as input data collected from multiple algorithm runs.
Then, an average is computed and horizontal axis scaling is applied.
The return value is a tuple of two list, each corresponds to a plot axis, ready to be plotted.
"""
run_count = len(input_list)
run_length = len(input_list[0])
for i in input_list:
assert len(i) == run_length, "Runs are of different length, cannot take average"
y_axis = []
for i in range(run_length):
y_axis.append(sum([sigmas[i] for sigmas in input_list]) / run_count)
x_axis = [x*evals_per_gen for x in range(run_length)]
return x_axis, y_axis
|
6ef9a11e13ceaeaae4fc9793aada1f43eb558659
| 76,329 |
def is_invalid_schema(schema, test_value):
"""
Checks schema against tests with dictionary nesting
>>> is_invalid_schema({"valid_key": "some_value"}, {"valid_key": "some_value"})
False
>>> is_invalid_schema({"invalid_key": "some_value"}, {"valid_key": "some_value"})
True
>>> is_invalid_schema(
... {"nested": {"valid_key": "some_value", "another_key": "some_value"}},
... {"nested": {"valid_key": "some_value"}}
... )
False
>>> is_invalid_schema(
... {"nested": {"invalid_key": "some_value", "another_key": "some_value"}},
... {"nested": {"valid_key": "some_value"}}
... )
True
>>> is_invalid_schema(
... {"nested": {"valid_key": "some_invalid_value", "another_key": "some_value"}},
... {"nested": {"valid_key": "some_value"}}
... )
True
>>> is_invalid_schema(
... {"nested": {"double": {"valid_key": "some_value", "another_key": "some_value"}}},
... {"nested": {"double": {"valid_key": "some_value"}}}
... )
False
>>> is_invalid_schema(
... {"nested": {"double": {"valid_key": "some_value", "another_key": "some_value"}}},
... {"nested": {"double": {"valid_key": "some_value"}, "some_key": "no_value"}}
... )
True
"""
if isinstance(test_value, dict):
return any(
is_invalid_schema(schema[k], test_value[k]) if k in schema else True
for k in test_value.keys()
)
return schema != test_value
|
894109fc9910fc708d9a8800e1169d6e00876e0d
| 49,040 |
def avg_list(items):
"""Return the average of a list of numbers."""
if items:
return int(sum(items) / len(items))
return 0
|
ef962801092b922594dd1f7c6706803ddd50603e
| 109,558 |
def _check_dimensionality(matrix):
"""
Checks whether the input is a 2D array (matrix) and square.
By @t-kimber.
Parameters
----------
matrix : np.array
The matrix for which the condition should be checked.
Returns
-------
bool :
True if the condition is met, False otherwise.
"""
if len(matrix.shape) != 2:
raise ValueError(f"The input is not a matrix, but an array of shape {matrix.shape}.")
elif matrix.shape[0] != matrix.shape[1]:
raise ValueError("The input is not a square matrix. Failing.")
else:
return True
|
bc36fa4f308cfff772bf145cf4d16f8faa4d8c21
| 518,157 |
def add_doy(ts, tdim="time"):
"""Add coordinate 'doy' day of the year for a 366 days year.
Parameters
----------
ts: xarray DataArray
Timeseries array
tdim: str, optional
Name of time dimension (default='time')
Returns
-------
ts: xarray DataArray
Timeseries array with extra 'doy' coordinate
"""
# get original dayofyear
# create filters: from 1st of March onwards and non leap years
# add extra day if not leap year and march or later
t = ts[tdim]
doy_original = t.dt.dayofyear
march_or_later = t.dt.month >= 3
not_leap_year = ~t.dt.is_leap_year
doy = doy_original + (not_leap_year & march_or_later)
# rechunk and return new doy as coordinate of the "ts" input variable
ts.coords['doy'] = doy.chunk({tdim: -1})
return ts
|
925f20aaff5dc7d7ec59333b939c3d76c53a65c5
| 280,464 |
def extended_gcd(a, b):
"""returns gcd(a, b), s, r s.t. a * s + b * r == gcd(a, b)"""
s, old_s = 0, 1
r, old_r = b, a
while r:
q = old_r // r
old_r, r = r, old_r - q * r
old_s, s = s, old_s - q * s
return old_r, old_s, (old_r - old_s * a) // b if b else 0
|
edb0365de2057ca52ffdd20e8c5859b62519ff63
| 522,796 |
def is_file_key(path):
"""Returns True if the given S3 key is a file."""
return not path.endswith('/')
|
f5c7e0d96ba6581e6a8ec62d5765716d25424d6e
| 590,387 |
import torch
def betagamma2xyz(beta, gamma, axis=None):
"""
Returns new tensors corresponding to angle representation from the cartesian representation.
Args:
beta (`torch.FloatTensor`): beta rotations about y axis.
gamma (`torch.FloatTensor`): gamma rotations about z axis.
axis (str, optional): cartesian axis. If None, return all axis. Defaults to None.
Returns:
(`torch.FloatTensor`, optional): x positions.
(`torch.FloatTensor`, optional): y positions.
(`torch.FloatTensor`, optional): z positions.
"""
if axis == "x":
return torch.sin(beta) * torch.cos(gamma)
if axis == "y":
return torch.sin(beta) * torch.sin(gamma)
if axis == "z":
return torch.cos(beta)
x = torch.sin(beta) * torch.cos(gamma)
y = torch.sin(beta) * torch.sin(gamma)
z = torch.cos(beta)
return x, y, z
|
d2b661e30406f7ec375ee07da724be9e5d01affe
| 364,048 |
def red_channel(image):
"""
Changes the image as follows:
For every pixel, set green and blue values to 0, yielding the red channel.
Return the changed image.
"""
for pixel in image:
pixel.green = 0
pixel.blue = 0
return image
|
cabc1d81a5b15a56692d6d7d67e26662aad31afd
| 593,984 |
def _combine_odds(odds):
"""Combine the odds of different outcomes."""
combined_odds = 1 / (1 / odds).sum(axis=1)
return combined_odds
|
7b2b91c2e9cc484bf1d05fabc95b1561c360ff6a
| 616,185 |
def get_old_field_item(field_update):
"""
Get the old key-value for a field_update.
"""
return (field_update[0], field_update[1][0])
|
7252b5814b77cef6dc71a8ad0cf26c8b629ccc18
| 346,716 |
def _do_initial_output(writer, df, sheet_name, na_rep, index=True):
"""Helper function to push data to xlsx and return formatting handles"""
df.to_excel(writer, sheet_name=sheet_name, na_rep=na_rep, index=index)
wb = writer.book
ws = writer.sheets[sheet_name]
max_row = len(df) + 1
return (wb, ws, sheet_name, max_row)
|
67b6ad712760d1af6b4b66548db0801615a6738b
| 402,875 |
def is_repo_image(image):
"""
Checks whether the given image has a name, i.e. is a repository image. This does not imply that it is
assigned to an external repository.
:param image: Image structure from the Docker Remote API.
:type image: dict
:return: ``False`` if the only image name and tag is <none>, or if there is no tag at all, ``True`` otherwise.
:rtype: bool
"""
repo_tags = image['RepoTags']
if not repo_tags:
return False
return repo_tags[0] != '<none>:<none>'
|
75aa72fac2d1a99870b72b59b1f9e8861c82fd79
| 667,547 |
import re
def validate_CEP(CEP: str) -> bool:
"""Validade CEP.
Args:
CEP (str): The CEP string to validate.
Returns:
bool: If True, CEP is valid. Otherwise, false.
"""
pattern = re.compile('^[0-9]{8}$', re.I)
match = pattern.match(str(CEP))
return bool(match)
|
d212fb1a3853c5ce5e7ced3d3382eea9f21df560
| 230,392 |
def get_faces(card):
"""All faces on this card.
For single faced cards, this is just the card.
Args:
card: Scryfall card object
"""
if "image_uris" in card:
return [card]
elif "card_faces" in card and "image_uris" in card["card_faces"][0]:
return [face for face in card["card_faces"]]
else:
raise ValueError(f"Unknown layout {card['layout']}")
|
50bc7fbf736694d6067e6997479547db79002972
| 235,482 |
import struct
def get_return_value_bytes(return_value):
"""Return (lower byte, higher byte) tuple created of return_value."""
return struct.unpack('<BB', struct.pack('<h', return_value))
|
bb3f9d03854d33066c0eb16e34861b413039527f
| 246,366 |
def validate_projectfilesystemlocation_type(projectfilesystemlocation_type):
"""
Validate ProjectFileSystemLocation type property
Property: ProjectFileSystemLocation.Type
"""
VALID_PROJECTFILESYSTEMLOCATION_TYPE = "EFS"
if projectfilesystemlocation_type not in VALID_PROJECTFILESYSTEMLOCATION_TYPE:
raise ValueError(
"ProjectFileSystemLocation Type must be one of: %s"
% ", ".join(VALID_PROJECTFILESYSTEMLOCATION_TYPE)
)
return projectfilesystemlocation_type
|
ccd65a5bf347fa3ee3efdeacfd371ab6dc80a6da
| 240,330 |
def get_winch_eff(eff_pump, eff_o, eff_i, eff_batt=0.95):
"""
Calculate the total winch efficiency for a given pumping cycle.
Parameters
----------
eff_pump : Float
Pumping efficiency: Total Energy divided by reel-out energy.
eff_o : Float
Generator efficiency for reel-out.
eff_i : Float
Generator efficiency for reel-in.
eff_batt : Float, optional
Battery efficiency, impacting the used power during reel-in.
The default is 0.95.
Returns
-------
eff : Float
Total winch efficiency as defined in
Fechner, Uwe & Schmehl, Roland. (2013). Model-Based Efficiency Analysis of Wind Power Conversion by a Pumping Kite Power System. 10.1007/978-3-642-39965-7_14.
"""
eff = (eff_o * eff_i * eff_batt - 1 + eff_pump)/(
eff_i * eff_batt * eff_pump)
return eff
|
23253aba59fc82efe4449b6a8644f67f79987fc8
| 161,107 |
def default_get_serializer_class(self):
"""
Return the class to use for the serializer.
Defaults to using `self.serializer_class`.
You may want to override this if you need to provide different
serializations depending on the incoming request.
(Eg. admins get full serialization, others get basic serialization)
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, a `http_method_XXXX_serializer_class`, "
"or override the `get_serializer_class()` method."
% self.__class__.__name__
)
return self.serializer_class
|
9759bce5e65b6c6f3764dcaaf3cb4fa653c4ca67
| 119,202 |
def prevent_sentence_boundary_detection(doc):
"""
Disable the sentence splitting done by Spacy
More info: https://github.com/explosion/spaCy/issues/1032
:param doc: a Spacy doc
:return: a disable sentence splitting Spacy doc
"""
for token in doc:
# This will entirely disable spaCy's sentence detection
token.is_sent_start = False
return doc
|
92fb37cf80233c13392e7fa36c684af5b9e2d0ec
| 39,535 |
def _individual_settings(update_result):
"""Helper that returns the number of settings that were updated."""
return sum(
len(settings_in_a_group)
for settings_in_a_group in update_result.values()
)
|
02f62841dc6bdbe6546f731fb8267d13e0ae9733
| 281,038 |
def sub(num1:int,num2:int) -> int:
"""sub is function used to give the subtraction
of two inputted number
if num1 is greater than num2 then returns num1 - num2
else
num2 - num1
Args:
num1 (int): first number
num2 (int): second number
Returns:
int: subtraction of inputted number
"""
result = abs(num1-num2)
return result
|
5ea5176c17e892ef1119b9b86477634fe938699c
| 538,860 |
def shortenFEN(fen):
"""Reduce FEN to shortest form (ex. '111p11Q' becomes '3p2Q')"""
return fen.replace('11111111','8').replace('1111111','7') \
.replace('111111','6').replace('11111','5') \
.replace('1111','4').replace('111','3').replace('11','2')
|
ab0a79c686294f35875ebc00fdfddfff604e6d3a
| 471,864 |
def bin_entities(uri_set, delimiter="/", splitpos=-1):
""" Takes iteratable elemts and splits them according to the position
(splitpos) of the delimiter. The first part is used as a key,
whereas the second appended to a list connected to the former key.
return: dict {key1: [id11, id12, id13, …], key2: […}}
"""
ent_dict = dict()
for res in uri_set:
# split entity up to splitpos using delimiter
entity = delimiter.join(res.split(delimiter)[:splitpos])
# id_ is the remainder
id_ = delimiter.join(res.split(delimiter)[splitpos:])
if entity in ent_dict:
ent_dict[entity].append(id_)
else:
ent_dict[entity] = [id_]
return ent_dict
|
fcbcddbff909d74fe14fe7cb3a21560c8ca9549a
| 160 |
def get_const_func(const):
"""Helper function for defining a constant function."""
# pylint: disable=unused-argument
def env(t):
return const
return env
|
96df0ff2f0e2989e66d229268b649575d4d874f9
| 526,272 |
def check_tr_id_full_coverage(tr_id, trid2exc_dic, regid2nc_dic,
pseudo_counts=False):
"""
Check if each exon of a given transcript ID is covered by > 0 reads.
If so return True, else False.
>>> trid2exc_dic = {"t1" : 2, "t2" : 2, "t3" : 1}
>>> regid2nc_dic = {"t1_e1": 0.4, "t1_e2": 1.2, "t2_e1": 0.4, "t2_e2": 0.0, "t3_e1": 1.6}
>>> check_tr_id_full_coverage("t1", trid2exc_dic, regid2nc_dic)
True
>>> check_tr_id_full_coverage("t2", trid2exc_dic, regid2nc_dic)
False
>>> check_tr_id_full_coverage("t3", trid2exc_dic, regid2nc_dic)
True
"""
min_ex_cov = 0
if pseudo_counts:
min_ex_cov = 1
for i in range(trid2exc_dic[tr_id]):
ex_nr = i + 1
ex_id = tr_id + "_e" + str(ex_nr)
ex_cov = regid2nc_dic[ex_id]
if ex_cov == min_ex_cov:
return False
return True
|
7f8ec307a6faf09b6d22c9d0d1c63b1d0a181c8e
| 63,248 |
import re
def _regex_search(pattern: str, string: str, group: int):
"""Shortcut method to search a string for a given pattern.
:param str pattern:
A regular expression pattern.
:param str string:
A target string to search.
:param int group:
Index of group to return.
:returns:
Substring pattern matches.
"""
regex = re.compile(pattern)
results = regex.search(string)
if not results:
return False
return results.group(group)
|
c703ac3eed3cbb981586b5a950f071c8535f32a5
| 22,583 |
def tcpdump_read(device, capture_file, protocol='', opts=''):
"""Read the tcpdump packets and deletes the capture file after read
:param device: lan or wan
:type device: Object
:param capture_file: Filename in which the packets were captured
:type capture_file: String
:param protocol: protocol to filter. Defaults to ''
:type protocol: String, Optional
:param opts: can be more than one parameter but it should be joined with "and" eg: ('host '+dest_ip+' and port '+port). Defaults to ''
:type opts: String, Optional
:return: Output of tcpdump read command.
:rtype: string
"""
if opts:
protocol = protocol + ' and ' + opts
device.sudo_sendline("tcpdump -n -r %s %s" % (capture_file, protocol))
device.expect(device.prompt)
output = device.before
device.sudo_sendline("rm %s" % (capture_file))
device.expect(device.prompt)
return output
|
2dfe26aa17e4d27ed87781e7dc7bb377abea2907
| 248,742 |
def ngrams(tokens, min_n, max_n):
"""
Generates ngrams(word sequences of fixed length) from an input token sequence.
tokens is a list of words.
min_n is the minimum length of an ngram to return.
max_n is the maximum length of an ngram to return.
returns a list of ngrams (words separated by a space)
"""
all_ngrams = list()
n_tokens = len(tokens)
for i in range(n_tokens):
for j in range(i + min_n, min(n_tokens, i + max_n) + 1):
all_ngrams.append(" ".join(tokens[i:j]))
return all_ngrams
|
b37f25f2896560f178dbd41b65e86f541c6f7681
| 358,182 |
def normalize_timestamp(value, ndigits=1):
"""
Utility function to round timestamps to the given number of digits.
This helps to make the test suite less sensitive to timing issues caused by
multitasking, processor scheduling, etc.
"""
return '%.2f' % round(float(value), ndigits=ndigits)
|
69230afe840d8237be4b400356bb25e50ae7ccdd
| 469,596 |
def rk4(rhs, initial, t_initial, t_final, dt):
"""RK4 integrator.
Inputs:
- rhs: a callable that takes arguments (t, y)
- initial: initial value
- t_initial: initial time
- t_final: final time
- dt: step size
Returns:
The solution computed at the final time.
"""
t = t_initial
sol = initial
while t < t_final:
dt = min(dt, t_final - t)
s0 = rhs(t, sol)
s1 = rhs(t + dt/2, sol + dt/2 * s0)
s2 = rhs(t + dt/2, sol + dt/2 * s1)
s3 = rhs(t + dt, sol + dt * s2)
sol = sol + dt / 6 * (s0 + 2 * s1 + 2 * s2 + s3)
t += dt
return sol
|
ae010db5a3a81f201340f4c21207538764bc9cf6
| 538,126 |
def CCT_to_xy_illuminant_D(CCT):
"""
Converts from the correlated colour temperature :math:`T_{cp}` of a
*CIE Illuminant D Series* to the chromaticity of that
*CIE Illuminant D Series* using *D. B. Judd, D. L. Macadam, G. Wyszecki,
H. W. Budde, H. R. Condit, S. T. Henderson and J. L. Simonds* method.
Parameters
----------
CCT : numeric
Correlated colour temperature :math:`T_{cp}`.
Returns
-------
tuple
*xy* chromaticity coordinates.
Raises
------
ValueError
If the correlated colour temperature is not in appropriate domain.
References
----------
.. [12] **Wyszecki & Stiles**,
*Color Science - Concepts and Methods Data and Formulae -
Second Edition*,
Wiley Classics Library Edition, published 2000,
ISBN-10: 0-471-39918-3,
page 145.
Examples
--------
>>> CCT_to_xy_illuminant_D(6504.38938305) # doctest: +ELLIPSIS
(0.3127077..., 0.3291128...)
"""
if 4000 <= CCT <= 7000:
x = (-4.607 * 10 ** 9 / CCT ** 3 +
2.9678 * 10 ** 6 / CCT ** 2 +
0.09911 * 10 ** 3 / CCT +
0.244063)
elif 7000 < CCT <= 25000:
x = (-2.0064 * 10 ** 9 / CCT ** 3 +
1.9018 * 10 ** 6 / CCT ** 2 +
0.24748 * 10 ** 3 / CCT +
0.23704)
else:
raise ValueError(
'Correlated colour temperature must be in domain [4000, 25000]!')
y = -3 * x ** 2 + 2.87 * x - 0.275
return x, y
|
8a674c3f3f8d0ce11c90478c4169c661a22f1d42
| 201,830 |
def bubble_sort(array, ascending=True):
"""Sort array using bubble sort algorithm.
Parameters
----------
array : list
List to be sorted; Can contain any Python objects that can be compared
ascending : bool, optional
If True sort array from smallest to largest; False -> sort array from
largest to smallest
Returns
-------
list
Input array sorted.
"""
# Create copy to avoid modifying array inplace
array = array.copy()
# Use swap_count to keep track of number of swaps on each sweep
swap_count = 1
# Keep track of number of times array has been iterated over
sweep_count = 0
# Keep sweeping through array until no swaps need to occur
while swap_count > 0:
# Reset swap scount at beginning of sweep
swap_count = 0
for i in range(len(array) - sweep_count - 1):
# Swap pair of elements being compared if out of order
if array[i] > array[i+1]:
# Perform swap
temp = array[i+1]
array[i+1] = array[i]
array[i] = temp
# Increment swap count
swap_count += 1
# Increment sweep_count, to avoid checking elements that are already in
# correct order, at end of array
sweep_count += 1
if ascending:
return array
else:
# Reverse array for descending order sort
return array[::-1]
|
1d02e5cad5e8cacaa22c0f513dabb438ff4905fd
| 525,640 |
def walk(d, path):
"""Walk dict d using path as sequential list of keys, return last value."""
if not path: return d
return walk(d[path[0]], path[1:])
|
3d5d8f36ceaa87b1526a16b1f89e671afc012e97
| 429,877 |
from typing import Callable
def bundle_callables(*callables: Callable) -> Callable:
"""
Bundle many callables into a single function.
Callables will be invoked in the order given.
"""
def bundle_callables_inner(*args, **kwargs):
"""Call all callables in order."""
for method in callables:
method(*args, **kwargs)
return bundle_callables_inner
|
f691941d904b064718c124d78c0419db8661c4c7
| 99,009 |
def get_image_urls(post):
"""Returns a list of URLs for all images in a scraped Post object"""
image_urls = {key: val for key, val in post.flat_json_dict.items() if "display_url" in key}
return list(set(image_urls.values()))
|
9eb09a0988deb33be4a765732a519c6c77be56fe
| 625,057 |
def clean_up_value(value, deci_place=4, sig_fig=2, only_big=False):
"""
Round the given value to the given decimal place (`deci_place`).
If the absolute value of the given value is too big or too small, return the value in
scientific notation with the given significant figure (`sig_fig`).
"""
if value is None:
return value
if only_big is True:
if abs(value) < 1000:
return "{:.{}f}".format(value, deci_place)
else:
return "{:.{}e}".format(value, sig_fig)
else:
if 1e-1 < abs(value) < 1000:
return "{:.{}f}".format(value, deci_place)
else:
return "{:.{}e}".format(value, sig_fig)
|
5cbc654ce4c57e4356c24dc8ef11a4f37a19255a
| 183,446 |
import requests
def send_token_request(form_values, add_headers={}):
"""Sends a request for an authorization token to the EVE SSO.
Args:
form_values: A dict containing the form encoded values that should be
sent with the request
add_headers: A dict containing additional headers to send
Returns:
requests.Response: A requests Response object
"""
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Host": "login.eveonline.com",
}
if add_headers:
headers.update(add_headers)
res = requests.post(
"https://login.eveonline.com/v2/oauth/token",
data=form_values,
headers=headers,
)
print("Request sent to URL {} with headers {} and form values: "
"{}\n".format(res.url, headers, form_values))
res.raise_for_status()
return res
|
f9c00e7f598db20c69fe6bf44f3df7100ef974b8
| 668,355 |
def get_formatted_time(s):
"""
This function gets an amount of seconds started when the game began and converts it to minutes:second format
:param s: float
:return: string
"""
seconds = int(s % 60)
minutes = int(s / 60)
# hours = int(minutes / 60)
return ('{0}:{1}').format(minutes, seconds)
|
484eff6773b379373bbb979393e7fa0a86d925d4
| 196,987 |
def _is_vector_similar(v1, v2, error):
"""
Check if two vectors are similar, within an error threshold
"""
within_err_x = abs(v2.x - v1.x) < error
within_err_y = abs(v2.y - v1.y) < error
within_err_z = abs(v2.z - v1.z) < error
return within_err_x and within_err_y and within_err_z
|
84e2f83907d20bec90a77edf325cff0f0720a4fb
| 359,843 |
from textwrap import dedent
def skip_comment(line):
"""Return True if a comment line should be skipped based on contents."""
line = dedent(line)
return not line or "to_remove" in line or "uncomment" in line.lower()
|
beda4e72faee0b062ddf6a12b3d449c884554127
| 63,191 |
def simple_goal_subtract(goal, achieved_goal):
"""
We subtract the achieved goal from the desired one to see how much we are still far from the desired position
"""
assert goal.shape == achieved_goal.shape
return goal - achieved_goal
|
bf35fa91fa848f070c813ba2313891672f005de9
| 652,170 |
def get_best_pixel(dis_lst, pixels):
"""
Given a list of pixels, returns the pixel with the smallest
distance from the average red, green, and blue values across all pixels.
Input:
pixels (List[Pixel]): list of pixels to be averaged and compared
Returns:
best (Pixel): pixel closest to RGB averages
"""
shortest_dis = dis_lst[0] # Assign 'dis_lst[0]' to 'shortest_dis' at the very beginning.
best = '' # Set 'best' as an empty string at the very beginning.
for i in range(len(dis_lst)): # Get data from 'dis_lst' and compare their value.
if dis_lst[i] <= shortest_dis: # Find out the smallest value(shortest distance)
best = pixels[i] # Assign the same index of data in 'pixels' list to 'best'.
return best # Return 'best'. (pixel which is in the shortest distance)
|
02cc1568c712d142049dfa37fa062d659b5b2ad5
| 379,884 |
def check_index_exists(indexname, es):
"""Check if index in Elasticsearch."""
if es.indices.exists(index=indexname):
return True
return False
|
02b926ee3fd5e2348ab51e68ad8949996d7f0a15
| 74,538 |
def str_join(*strings, sep="") -> str:
"""
Joins multiple strings with optional separator.
"""
return sep.join(strings)
|
6a8c7b5806444b2a5b1843a00d69cf685d6a9e64
| 554,962 |
import time
def generate_timestamp_string() -> str:
"""Generate the current timestamp.
Returns: current timestamp string
"""
return str(time.strftime('%s', time.gmtime()))
|
a4f5d7f3c45b8bda0b9f270dd808f4f9fc47bdac
| 177,386 |
def get_message(count, name, deleted=True):
"""Generates a message on the number of records inserted or deleted
Args:
count (int): The number of records
name (str): The table name
deleted (bool): The exception handler to wrap the function in
(default: True)
Examples:
>>> print(get_message(5, 'table'))
Deleted 5 records from table `table`.
>>> print(get_message(5, 'table', False))
Inserted 5 records into table `table`.
Returns
str: The message
"""
verb, prep = ('Deleted', 'from') if deleted else ('Inserted', 'into')
return '%s %s records %s table `%s`.' % (verb, count, prep, name)
|
5f0fdaad023f6a6b0c55b33e3014e854e3aadd84
| 96,196 |
def get_controller(self, name):
"""
Retrieves a controller instance (inside the current system)
with the provided name.
The strategy for the retrieval of the controller is based
upon the current conventions.
:type name: String
:param name: The name of the controller that should be retrieved,
defined using underscore notation.
:rtype: Controller
:return: The controller retrieved with the provided name strategy
from the currently associated system instance.
"""
return getattr(self.system, "%s_controller" % name)
|
4f1f7fcae76e30aad23b0dfb10ebf6bd4aabb076
| 94,971 |
def pyyaml_path_representer(dumper, instance):
"""Helper method to dump :class:`~pathlib.Path` in PyYAML."""
return dumper.represent_scalar('Path', f'{instance}')
|
3755b6a581a498658ea3df2ad6420b3e506fd521
| 623,601 |
def get_demisto_severity(severity):
"""
Maps LogPoint risk_level into Demisto Severity
:param severity: LogPoint risk_level
:return: Demisto Severity level (0 to 4)
"""
severity = severity.lower()
if severity == 'low':
return 1
elif severity == 'medium':
return 2
elif severity == 'high':
return 3
elif severity == 'critical':
return 4
return 0
|
c9cf2bf529984cd27a62cc3055912b80ac066535
| 66,915 |
def quote(s):
"""Returns quoted PO term string, with special PO characters escaped"""
assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
return '"%s"' % s.replace('\\','\\\\') \
.replace('"','\\"') \
.replace('\n', '\\n"\n"')
|
251c096ecd8ef6e92d1d098369020edbe957c7aa
| 327,832 |
import re
def ipv4_range_type(string):
""" Validates an IPv4 address or address range. """
ip_format = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if not re.match("^{}$".format(ip_format), string):
if not re.match("^{ip_format}-{ip_format}$".format(ip_format=ip_format), string):
raise ValueError
return string
|
5a4ad5c2dfb06509e99d818c0faab776c338c2e2
| 346,887 |
import sqlite3
def get_lineageos_stats(conn, limit):
"""Retrieves lineageos stats with a specified limit"""
conn.row_factory = sqlite3.Row
cur = conn.cursor()
# selects all fields from table where the device / model contains - exact
cur.execute(f'select rank, code, count from stats limit {limit};')
# to keep it simple, just get the first record found
data = cur.fetchall()
cur.close()
conn.row_factory = None
return data
|
a2b713c445afad610822a53913da23d083d51211
| 292,064 |
def get_fileobject(title, objtype):
"""
Returns the id of the object if it exists, otherwise returns
False.
Arguments:
title -- The filename to be queried.
objtype -- Image, Document from Wagtail.
Returns:
False if the object does not exist in this environment,
object's integer ID if it does exist.
"""
try:
# Check whether the object already exists.
localobj = objtype.objects.get(file=title)
if localobj:
return localobj.id
except objtype.DoesNotExist:
return False
return False
|
d8aaf7e9a35d4b13bd5397d70abc568d00deec5f
| 604,273 |
def range_overlap(range1, range2):
"""
determine range1 is within range2 (or is completely the same)
:param range range1: a range
:param range range2: another range
:rtype: bool
:return: True, range1 is subset of range2, False, not the case
"""
result = all([
range1.start >= range2.start,
range1.stop <= range2.stop
])
return result
|
3df4edf59ea473ad7b832256443a1e4e8c7e0ce9
| 2,024 |
def designer_pdf_viewer(h, word):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/designer-pdf-viewer/problem
When you select a contiguous block of text in a PDF viewer, the selection is highlighted with a blue rectangle.
In this PDF viewer, each word is highlighted independently. For example:
Highlighted Text:
abc def ghi
In this challenge, you will be given a list of letter heights in the alphabet and a string. Using the letter heights
given, determine the area of the rectangle highlight in mm^2 assuming all letters are 1mm wide.
Args:
h (list): list of the heights of the 26 letters
word (string): the string word we want to determine the highlighted area for
Returns:
int: the area of the highlighted word
"""
max_height = 0
for i in word:
val = h[ord(i) - 97]
if val > max_height:
max_height = val
return max_height * len(word)
|
d229a602acedf2ff99bc11471259db17648d9f3b
| 476,683 |
def to_n_class(digit_lst, data, labels):
"""to make a subset of MNIST dataset, which has particular digits
Parameters
----------
digit_lst : list
for example, [0,1,2] or [1, 5, 8]
data : numpy.array, shape (n_samples, n_features)
labels : numpy.array or list of str
Returns
-------
numpy.array, list of int
"""
if not set(digit_lst) <= set(range(10)):
raise ValueError
indices = []
new_labels = []
for i, x in enumerate(data):
for digit in digit_lst:
if labels[i] == str(digit):
indices.append(i)
new_labels.append(digit)
return data[indices], new_labels
|
79652687ec0670ec00d67681711903ae01f4cc87
| 709,682 |
from typing import Any
def is_property(obj: Any) -> bool:
"""Return True if it is a property."""
return type(obj) is property
|
ec7286d890f5a9828b9dbfc44165d978c3bf0ecd
| 414,275 |
def cf_cloud(beam, diffuse, cf):
"""
Correct beam and diffuse irradiance for cloud attenuation at a single
time, using input clear-sky global and diffuse radiation calculations
supplied by locally modified toporad or locally modified stoporad
Args:
beam: global irradiance
diffuse: diffuse irradiance
cf: cloud attenuation factor - actual irradiance / clear-sky irradiance
Returns:
c_grad: cloud corrected gobal irradiance
c_drad: cloud corrected diffuse irradiance
20150610 Scott Havens - adapted from cloudcalc.c
"""
# define some constants
CRAT1 = 0.15
CRAT2 = 0.99
CCOEF = 1.38
# cloud attenuation, beam ratio is reduced
bf_c = CCOEF * (cf - CRAT1)**2
c_grad = beam * cf
c_brad = c_grad * bf_c
c_drad = c_grad - c_brad
# extensive cloud attenuation, no beam
ind = cf <= CRAT1
c_brad[ind] = 0
c_drad[ind] = c_grad[ind]
# minimal cloud attenution, no beam ratio reduction
ind = cf > CRAT2
c_drad[ind] = diffuse[ind] * cf[ind]
c_brad[ind] = c_grad[ind] - c_drad[ind]
return c_grad, c_drad
|
c6f7b6d1534f92beb6cef3b159f21ebba8b69f05
| 241,768 |
from typing import List
import re
def _remove_WhiteSpace(lines: List[str]) -> List[str]:
"""Change white spaces, including eols, to spaces."""
out = []
for line in lines:
out.append(re.subn(r'[\n\r\f\t]', ' ', line)[0])
return out
|
094bbdae1f4161062e3da7286586ba757b73d3e2
| 163,092 |
def _extract_name(line: str) -> str:
"""Accepts a UniProt DE line (string) as input. Returns the name with
evidence tags removed.
"""
tokens = line[19:-2].split(" {")
name = tokens[0]
return name
|
16675f43e27328db3656b51a79681754dad606bf
| 610,469 |
def rreplace(s, old, new, occurrence):
"""
Function which replaces a string occurence
in a string from the end of the string.
"""
return new.join(s.rsplit(old, occurrence))
|
fc1cfde420b60c9f4769c31d302272a02f708a0b
| 630,727 |
from typing import List
def mean(xs: List[float]) -> float:
"""returns the mean of a list"""
return sum(xs) / len(xs)
|
74aa8641bc7dd7a9e0c7112a2668c11534bfdb4a
| 185,928 |
def create_response_error_message(response):
"""Works out what to say about a response that isn't JSON.
:param response: The HTTP response object.
:rtype: ``str``"""
content_type = response.headers["Content-type"]
try:
content = response.content.decode()
except: content = None
message = f"Server did not return JSON, it returned {content_type}"
if content and len(content) < 256:
message += ":\n" + content
return message
|
f793a17344aeaa12afeb574611bd1ce9ba4caea2
| 369,242 |
import re
def get_device_id_from_body(body):
"""Extracts the device ID from the given body.
Arguments:
body {dict} -- The body as extracted by `extract_body()`
Returns:
str -- The device ID if given. If there's an error extracting this
or if the id is malformed, None is returned.
"""
if 'device_id' in body:
device_id = body['device_id']
if re.match("^[a-zA-Z0-9]{3,4}-[a-zA-Z0-9]{3,4}-[a-zA-Z0-9]{4}$", device_id):
return device_id
return None
|
7b39b8c9df93b2c870e95451357c0e43c1c3f870
| 171,220 |
import socket
def listen(host, port):
""" Create a TCP/IP socket listen for incoming connections """
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
print('[SERVER LOG] starting up on {} port {}'.format(host, port))
server_address = (host, port)
sock.bind(server_address)
sock.listen(1)
return sock
|
f2b1afc86418d9f0953cf1121fbbc8325a711598
| 557,579 |
def get_synset_by_wnid(wnid, graph):
"""Return the synset of sampling_graph whose WordNet id is wnid."""
for n in graph:
if n.wn_id == wnid:
return n
return None
|
0492fa2d5704b1cf03c1fa95b22dabdc8d76400d
| 107,670 |
import re
def add_rolename(filename, rolename):
"""
A file with an extension, e.g., README.md is converted to README-rolename.md
A file with no extension, e.g., LICENSE is to LICENSE-rolename
"""
if filename.find(".", 1) > 0:
with_rolename = re.sub(
r"([\w\d_\.]*)(\.)([\w\d]*)",
r"\1" + "-" + rolename + r"\2" + r"\3",
filename,
)
else:
with_rolename = filename + "-" + rolename
return with_rolename
|
8c0def9c1a91dd099d7f167b21dd8fb8191fd510
| 302,851 |
def arithmetic_mean(X):
"""Computes the arithmetic mean of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
.. math::
u = \frac{\sum_{i = 0}^{n - 1} X_i}{n}
"""
return sum(X) / len(X)
|
cf6f2300442afe961e96a5f10943393cf071eb5b
| 23,566 |
def brightness_from_percentage(percent):
"""Convert percentage to absolute value 0..255."""
return (percent*255.0)/100.0
|
0ad7a68dc6fd5b18c198289d8066a6ad1eb88534
| 378,384 |
import math
def f(x):
"""
returns the value of the function for some given x
Inputs:
1.x : Input value
Output: Function value
"""
return (x*x*x - 3*x*x - x + 9)*(math.exp(x))
|
2148244b7a728005e67ccffd4beb1d6081d3d274
| 520,865 |
def is_fq_local_branch(ref):
"""Return True if a Git reference is a fully qualified local branch.
Return False otherwise.
Usage example:
>>> is_fq_local_branch("refs/heads/master")
True
>>> is_fq_local_branch("refs/remotes/origin/master")
False
>>> is_fq_local_branch("refs/notes/commits")
False
"""
return ref.startswith('refs/heads/')
|
455e8bc4897155292bb36f359db46952d8d74f95
| 629,442 |
def split(string, delim=' '):
"""Split a string on the delim."""
return string.split(delim)
|
3105517176529abe9aecb1b4f04160d7b1861430
| 444,682 |
import math
def var_y(obliquity_correction):
"""Returns Var Y with Obliquity Correction, obliquity_correction"""
var_y = math.tan(math.radians(obliquity_correction / 2)) * math.tan(
math.radians(obliquity_correction / 2)
)
return var_y
|
47c14488da71edcb130e8a64434b6269d06be993
| 50,518 |
def format_y_ticks_as_dollars(plot):
"""Formats y ticks as dollar values with commas and no decimals.
Args:
plot: matplotlib.AxesSubplot object.
"""
y_ticks = plot.get_yticks()
plot.set_yticklabels(['${:,.0f}'.format(tick) for tick in y_ticks])
return plot
|
cef37ba91dc44cd4fb9af18b346e846ac9cde02c
| 353,821 |
import re
async def filter_blobs(blobs, target_path):
"""
Filters out blobs that do not come from target_path
Parameters
----------
blobs: A list of candidate blobs to be returned from Azure
target_path: Actual prefix of the blob folder
"""
finalblobs = [
b for b in blobs if re.search(r"\b" + target_path + r"(?=/)" + r"\b", b["name"])
]
return finalblobs
|
742a600ab72c9cda4d375a3e3c5bae050f14e01b
| 548,789 |
def get_attr_groups(attr_name_file):
"""
Read attribute names one by one from attr_name_file and based on the common prefix, separate them into different attribute groups
Return list of starting indices of those groups
"""
new_group_idx = [0]
with open(attr_name_file, 'r') as f:
all_lines = f.readlines()
line0 = all_lines[0]
prefix = line0.split()[1][:10]
for i, line in enumerate(all_lines[1:]):
curr = line.split()[1][:10]
if curr != prefix:
new_group_idx.append(i+1)
prefix = curr
return new_group_idx
|
db2338320fa09c7e6b7e864ea42f8244d76337b9
| 687,109 |
import re
def extract_queries_from_explanations(explanation):
"""
Checks for the existence of a quoted phrase within an explanation
Three types of quotes are accepted
Arguments:
explanation (str) : explanation text for a labeling decision
Returns:
arr : an array of quoted phrases or an empty array
"""
possible_queries = re.findall('"[^"]+"', explanation)
if len(possible_queries):
possible_queries = [query[1:len(query)-1] for query in possible_queries]
return possible_queries
possible_queries = re.findall("'[^']+'", explanation)
if len(possible_queries):
possible_queries = [query[1:len(query)-1] for query in possible_queries]
return possible_queries
possible_queries = re.findall("`[^`]+`", explanation)
if len(possible_queries):
possible_queries = [query[1:len(query)-1] for query in possible_queries]
return possible_queries
return []
|
16f2ac94fa5515a17a9e8bf1c09f07f18150e636
| 347,521 |
import random
def random_hex(digits = 12):
"""Generate a string of random hexadecimal digit and return as a string.
Arguments:
digits: the number of hexadecimal digits to create
"""
str_hex = ''.join([''.join(random.choice("0123456789ABCDEF")) for _ in range(digits)])
return str_hex
|
b571013b8e17a08dc35b14bece6bde24f9829813
| 690,746 |
import re
def to_lowercase_alphanum(text):
"""Remove non-alphanumeric characters and convert text to lowercase.
"""
return re.sub(r'[^a-z0-9]', '', text.lower())
|
e508e708b964775660a9d188f2ff91e02ea42eaa
| 630,952 |
def parse_df_labels(df):
"""
Return a dictionary of response name and values from df
"""
assert(len(df.columns) == 1)
resp = df.columns[0]
ls = [eval(val) for val in df[resp]]
ret_dict = {"resp": resp, "vals": ls}
return ret_dict
|
3ad9ba22a5b6bb9fbdd5dbb676683ab79a3322e4
| 347,893 |
def get_query_string(request):
"""
Return the query string if it exists in the request.
"""
query_string = ''
if 'QUERY_STRING' in request:
query_string = request['QUERY_STRING']
return query_string
|
4af48662cd214f0cfcf9d8739d7626cb17337467
| 607,484 |
def allele_indices_with_num_alts(variant, num_alts, ploidy=2):
"""Returns a list of allele indices configurations with `num_alts` alternates.
Args:
variant: third_party.nucleus.protos.Variant. The variant of interest, which
defines the candidate alternate alleles that can be used to generate
allele indices configurations.
num_alts: int in [0, `ploidy`]. The number of non-reference alleles for
which to create the allele indices configurations.
ploidy: int. The ploidy for which to return allele indices configurations.
Returns: A list of tuples. Each tuple is of length `ploidy` and represents the
allele indices of all `ploidy` genotypes that contain `num_alts`
non-reference alleles.
Raises:
ValueError: The domain of `num_alts` is invalid.
NotImplementedError: `ploidy` is not diploid.
"""
if ploidy != 2:
raise NotImplementedError(
'allele_indices_with_num_alts only supports diploid.')
if not 0 <= num_alts <= ploidy:
raise ValueError(
'Invalid number of alternate alleles requested: {} for ploidy {}'.
format(num_alts, ploidy))
max_candidate_alt_ix = len(variant.alternate_bases)
if num_alts == 0:
return [(0, 0)]
elif num_alts == 1:
return [(0, i) for i in range(1, max_candidate_alt_ix + 1)]
else:
return [(i, j)
for i in range(1, max_candidate_alt_ix + 1)
for j in range(i, max_candidate_alt_ix + 1)]
|
69edbafd8109ec38c87419b0bc31975c2ad01e4e
| 243,438 |
import yaml
def jinja_filter_toyaml_dict(value) -> str:
"""Jinjafilter to return a dict as a Nice Yaml.
Args:
value (dict): value to convert
Returns:
Str formatted as Yaml
"""
return yaml.dump(value, default_flow_style=False)
|
eb3506fd00a9a36390037436a38de36a9ccbe343
| 652,649 |
def commaSplitNum(num: int) -> str:
"""Convert an number to a string with commas in every third position. Also accepts floats.
For example: 3 -> "3", 30000 -> "30,000", and 561928301 -> "561,928,301"
https://stackoverflow.com/a/10742904
:param int num: string to insert commas into. probably just containing digits
:return: num, but split with commas at every third digit
:rtype: str
"""
return f"{num:,}"
|
476233cf858e97d43e9874219c86de05840f4920
| 209,256 |
def filter_stories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist
fires.
"""
return [s for s in stories if any(t.evaluate(s) for t in triggerlist)]
|
64a16db7f3ef89b74327ce702cb7070741da8cf5
| 527,924 |
import fsspec
def href_exists(href: str) -> bool:
"""Returns true if the asset exists.
Uses fssepc and its `exists` method:
https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.exists.
"""
fs, _, paths = fsspec.get_fs_token_paths(href)
return bool(paths and fs.exists(paths[0]))
|
a6a4dc2c3e283a270cf662308038519e1d643373
| 253,685 |
import torch
def apply_examplewise(fn, x: torch.Tensor, sequence_lengths, time_axis=1):
"""
Applies a function to each element of x (along batch (0) dimension) and
respects the sequence lengths along time axis. Assumes that fn does not
change the dimensions of its input (e.g., norm).
"""
if sequence_lengths is None:
return fn(x)
else:
# Check inputs
assert time_axis != 0, 'The first axis must be the batch axis!'
assert len(sequence_lengths) == x.shape[0], (
'Number of sequence lengths and batch size must match!'
)
time_axis = time_axis % x.dim()
selector = [slice(None)] * (time_axis - 1)
out = torch.zeros_like(x)
for b, l in enumerate(sequence_lengths):
s = (b, *selector, slice(l))
# Keep the batch dimension while processing
out[s] = fn(x[s][None, ...])[0]
return out
|
ea5a100c1af502785f4ade090aa8480655a064be
| 169,578 |
import re
def looks_like_adduct(adduct):
"""Return True if input string has expected format of an adduct."""
if not isinstance(adduct, str):
return False
adduct = adduct.strip().replace("*", "")
# Format 1, e.g. "[2M-H]" or "[2M+Na]+"
regexp1 = r"^\[(([0-9]M)|(M[0-9])|(M)|(MBr)|(MCl))[+-0-9][A-Z0-9\+\-\(\)|(Na)|(Ca)|(Mg)|(Cl)|(Li)|(Br)|(Ser)]{1,}[\]0-9+-]{1,4}"
# Format 2, e.g. "M+Na+K" or "M+H-H20"
regexp2 = r"^(([0-9]M)|(M[0-9])|(M)|(MBr)|(MCl))[+-0-9][A-Z0-9\+\-\(\)|(Na)|(Ca)|(Mg)|(Cl)|(Li)|(Br)|(Ser)]{1,}"
return re.search(regexp1, adduct) is not None or re.search(regexp2, adduct) is not None
|
8f5870b5b52b07eda3e18c1b3989bc31bc8639fa
| 285,202 |
def replace_dict_keys_split(dicts, replace_list_dict):
""" Replace values in `dicts` according to :attr:`replace_list_dict`.
Replaced dict is splitted by :attr:`replaced_dict` and :attr:`remain_dict`.
Parameters
----------
dicts : dict
Dictionary.
replace_list_dict : dict
Dictionary.
Returns
-------
replaced_dict : dict
Dictionary.
remain_dict : dict
Dictionary.
Examples
--------
>>> replace_list_dict = {'a': 'loc'}
>>> x_dict = {'a': 0, 'b': 1}
>>> print(replace_dict_keys_split(x_dict, replace_list_dict))
({'loc': 0}, {'b': 1})
"""
replaced_dict = {replace_list_dict[key]: value for key, value in dicts.items()
if key in list(replace_list_dict.keys())}
remain_dict = {key: value for key, value in dicts.items()
if key not in list(replace_list_dict.keys())}
return replaced_dict, remain_dict
|
d64ae69364bfa608c97449a22962ec8cda74546a
| 308,846 |
import random
def Partition(t, n):
"""Splits a sequence into two random partitions.
Side effect: shuffles t
Args:
t: sequence of values
n: size of the first partition
Returns:
two lists of values
"""
random.shuffle(t)
return t[:n], t[n:]
|
45d601e7d3abde35e7f61b370ac1de9bfb68f1c5
| 233,452 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.