content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def split_comment(line):
"""Split a line into line,comment, where a comment is
started by the ! character"""
res = line.strip().split('!')
if len(res) == 0: return "",""
elif len(res) == 1: return res[0],""
elif len(res) == 2: return res
return res[0],''.join(res[1:]) | 8cce13cd909ad6d2501324ed5f0f4a6166f3ab02 | 435,297 |
def generate_huffman_codes(node, prefix):
"""
Generate Huffman codes for each character by traversing the tree
and assigning '0' to a move towards left child node and '1' to right child node.
Parameters:
node (class: Node): root node of Huffman Tree
prefix (str): starting prefix
Returns:
huffman_codes (dictionary): map of characters to generated Huffman codes
"""
huffman_codes = {}
def generate_codes(node, prefix=""):
if node is None:
return
if node.right_child is None and node.left_child is None:
huffman_codes[node.char] = prefix
generate_codes(node.left_child, prefix + '0')
generate_codes(node.right_child, prefix + '1')
generate_codes(node)
return huffman_codes | eeb8de5e1d191a8edd6c3af729516092b5dd662b | 92,903 |
import torch
def reparameterize(mu, logvar):
"""Sample from N(mu, var)
Parameters
----------
mu : :obj:`torch.Tensor`
vector of mean parameters
logvar : :obj:`torch.Tensor`
vector of log variances; only mean field approximation is currently implemented
Returns
-------
:obj:`torch.Tensor`
sampled vector of shape (n_frames, n_latents)
"""
std = torch.exp(logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu) | 4f3f4e7490b9b9f5b3f90251f52d1c8ebf316a14 | 576,732 |
def crop_center(shape_in, shape_out):
"""Crop input images to desired output shape.
Args:
shape_in (tuple): Input shape.
shape_out (tuple): Output shape.
Returns:
tuple: indices to slice image array.
"""
slices = []
for length, delta in zip(shape_in, shape_out):
assert isinstance(length, int), "shape_in must be a tuple of ints."
assert isinstance(delta, int), "shape_out must be a tuple of ints."
assert length >= delta, "Cropped shape cannot be larger than input shape."
assert (length - delta) % 2 == 0, \
"Cropped shape cannot be centered given input and output shapes."
start = length // 2 - delta // 2
slices.append((start, start + delta))
return tuple(slices) | 6b7bc4a6cb16754560046cae35bddbcfdc8ccc74 | 433,062 |
import base64
def img_stream_to_b64str(stream, urlsafe=False):
"""
Convert a byte stream of image file to Base64 encoded string.
:param stream: The byte stream of the file.
:param urlsafe: Trigger using URL-Safe format.
:return: Encoded Base64 string.
"""
if urlsafe:
stream_base64 = base64.urlsafe_b64encode(stream)
else:
stream_base64 = base64.standard_b64encode(stream)
stream_base64_string = stream_base64.decode('utf-8')
return stream_base64_string | cc132c0e24f7153beb45ddbc8d66b82dedcc18c0 | 624,775 |
def extract_single(value):
"""Return a single value if the given value is of len 1."""
try:
if len(value) == 1:
return value[0]
except(ValueError, TypeError, Exception):
pass
return value | 74ff8e35292be420d1f11a6eea38f4a618edb5b9 | 366,960 |
def mean(l: list) -> float:
""" Returns the mean of a list, rounded to two decimal places. """
return round(sum(l)/len(l), 2) | b892833526d584c235d145bd6ae8f1631ef9acde | 136,767 |
def format_date_for_api_request(date_str):
"""
Formats a date string (YYYY-MM-DD) into 'MM%2FDD%2FYYY' format.
>>> format_date_for_api_request('2017-10-17')
'10%2F17%2F2017'
"""
year, month, day = date_str.split('-')
return '{}%2F{}%2F{}'.format(month, day, year) | 6b99405f184a36eed0872a3254298bcb85388f5d | 388,232 |
import unicodedata
def deaccent(text):
"""
Remove accentuation from the given string.
"""
norm = unicodedata.normalize("NFD", text)
result = "".join(ch for ch in norm if unicodedata.category(ch) != 'Mn')
return unicodedata.normalize("NFC", result) | d1f3f4261a8cec3246a66f17434d120f4096e51f | 539,662 |
def generate_wiki_redirect_text(redirect_name: str) -> str:
"""Generate wikitext for redirect."""
return f'#REDIRECT [[{redirect_name}]]' | f6e55fa20004d836ea601a1d3966d070273df237 | 11,938 |
import torch
def cos_sim(x, y, epsilon=0.01):
"""
Calculates the cosine similarity between the last dimension of two tensors.
"""
numerator = torch.matmul(x, y.transpose(-1,-2))
xnorm = torch.norm(x, dim=-1).unsqueeze(-1)
ynorm = torch.norm(y, dim=-1).unsqueeze(-1)
denominator = torch.matmul(xnorm, ynorm.transpose(-1,-2)) + epsilon
dists = torch.div(numerator, denominator)
return dists | 84fa92595110680350e0fe5eb7c3ba230e7a0ec1 | 50,294 |
def get_queue(conn, queue_name):
"""
Create a queue with the given name, or get an existing queue with that
name from the AWS connection.
"""
return conn.get_queue(queue_name) | 3984ef179cfc74336067e217795308ed4768e736 | 686,756 |
def sentinel_id(vocabulary, return_value=None):
"""Token ID to use as a sentinel.
By default, we use the last token in the vocabulary.
Args:
vocabulary: a t5.data.vocabularies.Vocabulary
return_value: an optional integer
Returns:
an integer
"""
if return_value is not None:
return return_value
return vocabulary.vocab_size - 1 | 08ad1116b7f41ba7070359675a0133f14b9917bd | 709,168 |
from math import cos, sin, pi
def laglongToCoord(theta: float, phi: float):
"""Convert lagtitude and longitude to xyz coordinate."""
theta, phi = theta/180*pi, phi/180*pi
return sin(theta)*cos(phi), sin(phi), cos(theta)*cos(phi) | 13faf15366badec7718c6fc04b2fdf04dc597927 | 121,689 |
def ip_to_int(ip_str):
"""
Convert an IPv4 string to its integer representation
:param ip_str: IPv4 address string
:return: 32-bit integer corresponding to ip_str
"""
return sum(int(ip) << i for ip, i in zip(ip_str.split('.'), [24, 16, 8, 0])) | 2f537705b8ffb998a022c29ca18aab5aff628fc7 | 454,296 |
import json
def loadpd(infile):
"""Load procpar dictionary from json file"""
with open(infile, 'r') as openfile:
pd = json.load(openfile)
return pd | bf076f60e49e4fe95b9520b0617365d008bf9e23 | 374,382 |
def thcf_partial2(x):
"""Partial derivative of the Three-Hump Camel Function with respect to x2.
"""
partial = x[0] + 2 * x[1]
return partial # Gradient 2 | a340646a00514d28ce7cf6545697f129a11ae847 | 396,468 |
def conlleval_output(results):
"""Create conlleval formated output.
:param results: `dict` The metrics. results should have the following keys.
tokens: `int` The total number of tokens processed.
acc: `float` The token level accuracy.
gold_total: `int` The total number of gold entities.
pred_total: `int` The total number of predicted entities.
overlap: `int` The number of exact match entites.
precision: `float` The precision of all entities.
recall: `float` The recall of all entities.
f1: `float` The f1 score of all entities.
types: `List[dict]` A list of metrics for each entity type. Keys should include:
ent: `str` The name of the entity.
precision: `float` The precision of this entity type.
recall: `float` The recall of this this entity type.
f1: `float` The f1 score of this entity type.
count: `int` The number of predicted entities of this type.
:returns: `str` The formatted string ready for printing.
Note:
Both the metrics in the results dict and acc are expected to already be
multiplied by 100. The result won't look correct and a lot of the
metric will be cut off if they are not.
Metrics per type are output in the order they appear in the list.
conlleval.pl outputs the types in sorted order. To match this the list
in `results['types'] should be sorted.
"""
s = "processed {tokens} tokens with {gold_total} phrases; found: {pred_total} phrases; correct: {overlap}.\n" \
"accuracy: {acc:>{length}.2f}%; precision: {precision:>6.2f}%; recall: {recall:>6.2f}%; FB1: {f1:>6.2f}\n"
t = []
longest_ent = max(len(max(results['types'], key=lambda x: len(x['ent']))['ent']), 17)
for type_metric in results['types']:
t.append("{ent:>{longest_ent}}: precision: {precision:>6.2f}%; recall: {recall:>6.2f}%; FB1: {f1:>6.2f} {count}".format(longest_ent=longest_ent, **type_metric))
s = s + "\n".join(t)
s = s.format(length=longest_ent - 11, **results)
return s | a1e49c843ea63441257311bb01a1e7a5e333e7d5 | 412,237 |
def is_mca(config):
"""Returns whether or not the configured account is an MCA."""
return config.get('isMCA', False) | b2bdc2e8dcda528b35151c10e350c809789c65ec | 365,028 |
def ternary_point_kwargs(
alpha=1.0,
zorder=4,
s: float = 25,
marker="X",
):
"""
Plot point to a ternary figure.
"""
return dict(
alpha=alpha,
zorder=zorder,
s=s,
marker=marker,
) | 89b930fb68710417fb3b41d1e97fa51b6f9f808d | 230,108 |
def augment_features(data, feature_augmentation):
"""
Augment features for a given data matrix.
:param data: Data matrix.
:param feature_augmentation: Function applied to augment the features.
:return: Augmented data matrix.
"""
if data is not None and feature_augmentation is not None:
if isinstance(feature_augmentation, list):
for augmentation_function in feature_augmentation:
data = augmentation_function(data)
else:
data = feature_augmentation(data)
return data | 687a7ff2a4b61131f5d95e1f7d6eb77d75bd6f06 | 707,291 |
def spot_check_by_query(myinca, query, n_examples=5):
"""Spot-check a sample of docs.
Args:
myinca (object): INCA instance
query (dict): elasticsearch query
n_examples (int): number of examples to return
Returns:
list of documents (dict)
"""
generator = myinca.database.document_generator(query=query)
docs = []
for n, doc in enumerate(generator):
if n >= n_examples:
break
docs.append(doc)
return docs | 769b802dae6f76359498df57231ecae8d0139ee5 | 462,306 |
def join_keywords(l):
""" join a list of keywords with '+' for BingSearch """
return u'+'.join(l) | 02e49c6a0be33e4e3c1135999087635685f1d93b | 212,589 |
def remove_whitespace(word):
"""Removes whitespace from word"""
return word.strip() | 6d70751cee467ad0455be7604521700803173c37 | 637,192 |
def mocked_requests_get(*args, **_):
"""Mock function for requests.get()."""
# pylint: disable=R0903
class MockResponse:
"""Mock response of a requests.get() call."""
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
"""Return the JSON data."""
return self.json_data
if args[0] == 'http://mock.vagrancy.net/inventory':
return MockResponse({"boxes": []}, 200)
return MockResponse(None, 404) | 380780cb4144a9616c81fd72cf7c9fa8f4b33515 | 314,291 |
def stellar_radius(M, logg):
"""Calculate stellar radius given mass and logg"""
if not isinstance(M, (int, float)):
raise TypeError('Mass must be int or float. {} type given'.format(type(M)))
if not isinstance(logg, (int, float)):
raise TypeError('logg must be int or float. {} type given'.format(type(logg)))
if M < 0:
raise ValueError('Only positive stellar masses allowed.')
M = float(M)
return M/(10**(logg-4.44)) | 2afbd991c7461d7861370f18d90df840569da857 | 3,166 |
def remove_zero_amount_coproducts(db):
"""Remove coproducts with zero production amounts from ``exchanges``"""
for ds in db:
ds[u"exchanges"] = [
exc
for exc in ds["exchanges"]
if (exc["type"] != "production" or exc["amount"])
]
return db | 8bf10592b723b9c0f4b0a82eaafd4cf7b8a792d0 | 323,607 |
from typing import Any
from typing import List
def is_typing_list(data_type: Any) -> bool:
"""
Return `True` if data_type is `typing.List` or a subscription of it.
Examples
--------
```python3
assert is_typing_list(typing.List)
assert is_typing_list(typing.List[int])
assert not is_typing_list(int)
```
"""
return data_type is List or (
hasattr(data_type, '__origin__') and
hasattr(data_type, '__args__') and
data_type.__origin__ in # type: ignore
# This has changed in newer Python implementations to `List`,
# `list` is checked for compatibility.
(list, List)
) | 5da5e598c570e21947f2765095d01f8d7c7e33de | 587,690 |
import re
def roman_numeral(s):
""" Returns a roman numeral converted to an integer if valid, or the source string """
def roman_to_int(s):
d = {'m': 1000, 'd': 500, 'c': 100, 'l': 50, 'x': 10, 'v': 5, 'i': 1}
n = [d[i] for i in s if i in d]
return str(sum([i if i>=n[min(j+1, len(n)-1)] else -i for j,i in enumerate(n)]))
if not re.match(r'^(?=[mdclxvi])m*(c[md]|d?c{0,3})(x[cl]|l?x{0,3})(i[xv]|v?i{0,3})$', s):
return s
return roman_to_int(s) | 0a94e509a30b234b74ff6be72ec31e156e014ed4 | 523,743 |
def variant_key(variant, sort_alleles=True):
"""Gets a human-readable string key that is almost unique for Variant.
Gets a string key that contains key information about the variant, formatted
as:
reference_name:start+1:reference_bases->alternative_bases
where alternative bases is joined with a '/' for each entry in
alternative_bases. The start+1 is so we display the position, which starts at
1, and not the offset, which starts at 0.
For example, a Variant(reference_name='20', start=10, reference_bases='AC',
alternative_bases=['A', 'ACC']) would have a key of:
20:11:AC->A/ACC
The key is 'almost unique' in that the reference_name + start + alleles should
generally occur once within a single VCF file, given the way the VCF
specification works.
Args:
variant: nucleus.genomics.v1.Variant to make into a key.
sort_alleles: bool. If True, the alternative_bases of variant will be sorted
according to their lexicographic order. If False, the alternative_bases
will be displayed in their order in the Variant.
Returns:
A str.
"""
alts = variant.alternate_bases
if sort_alleles:
alts = sorted(alts)
return '{}:{}:{}->{}'.format(variant.reference_name, variant.start + 1,
variant.reference_bases, '/'.join(alts)) | 756cb639bf749ed0d8682cf2df6014b2f36dc9ad | 381,338 |
import re
def create_mad_lib_story(mad_lib_template, responses):
"""
Summary of create_mad_lib_story function: The user input responses will be used to populate the template in the proper position.
Parameters:
mad_lib_template (string): Madlib template from input file
responses (array): the responses entered by user
Returns:
madlib_result (string): a string containing the Madlib template with user input reponses placed in the proper position of the template.
"""
madlib_result = mad_lib_template
prompts = re.findall(r'{[^}]+}', mad_lib_template)
# The regex sub function is invoked to replace the matching prompt in the
# Madlib string with the user input. It replaces just the first occurence
# of the matching prompt. The madlib result string is recreated each time.
for i in range(len(responses)):
madlib_result = re.sub(prompts[i], responses[i], madlib_result, 1)
return madlib_result | 33ccea094569825ddffb7d33f7b5d9dfe2b5bf83 | 108,084 |
def _DuplicateName(values):
"""Returns the 'mojom_name' of the first entry in |values| whose 'mojom_name'
has already been encountered. If there are no duplicates, returns None."""
names = set()
for value in values:
if value.mojom_name in names:
return value.mojom_name
names.add(value.mojom_name)
return None | c20216c26136132326d9b2eff594dee025a89896 | 126,012 |
def minimize_reference(reference):
"""
Takes a reference spikein dataset object and strips it down to a subset of its fields.
"""
reference_fields = ['accession', 'dbxrefs', 'description']
minimized_reference = {field:reference[field] for field in reference_fields if field in reference}
return minimized_reference | 035b3b2e3241501f0623870264cdef79f0cabece | 568,738 |
import torch
def _reduction(loss: torch.Tensor, reduction: str) -> torch.Tensor:
"""
Reduce loss
Parameters
----------
loss : torch.Tensor, [batch_size, num_classes]
Batch losses.
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
Returns
-------
loss : torch.Tensor
Reduced loss.
"""
if reduction == 'elementwise_mean':
return loss.mean()
elif reduction == 'none':
return loss
elif reduction == 'sum':
return loss.sum()
else:
raise ValueError(f'{reduction} is not a valid reduction') | 58138edb5e0ba083d4a6f34b616176f4a71155b5 | 143,057 |
def _maximal_independent_set(G):
"""Returns a maximal independent set of nodes in ``G`` by repeatedly
choosing an independent node of minimum degree (with respect to the
subgraph of unchosen nodes).
"""
result = set()
remaining = set(G)
while remaining:
G = G.subgraph(remaining)
v = min(remaining, key=G.degree)
result.add(v)
remaining -= set(G[v]) | {v}
return result | f25c4c95c5b258651e1f1412542a9b60b5e616bf | 323,109 |
def create_layers_range(config):
"""
Calculate the router id range for each layer of NoC.
Parameters
----------
config : [type]
Configuration
Returns
-------
[type]
list of router id range
"""
layers_range = []
router_counter = 0
for x, y in zip(config.x, config.y):
layers_range.append(range(router_counter, router_counter+x*y))
router_counter += x*y
return layers_range | cb690d158ba3c0c5250abdbf0118bb4382a5795d | 414,598 |
def strip_ns(ref):
"""
strip the namespace prefix from ``ref``
:param str ref: one word, colon delimited string, such as *nx:groupGroup*
:returns str: the part to the right of the last colon
"""
return ref.split(":")[-1] | 2e2b32fe1cb471a9e0f308bca1ae0add51a9fa3e | 411,210 |
def check_compatibility(content):
"""
Check the compatibility of a log. If an older version of refl1d was used,
we will need to parse the logs differently.
Starting with version 0.8.6, a JSON representation of models is available
and the refl1d version is part of the log.
"""
return content.find('REFL1D_VERSION') >= 0 | dbafd07be91529b928fd2b547acfc4562bfff5d0 | 277,666 |
def rectify(invec):
"""Rectify input series so that negative values are zero"""
invec[invec <= 0] = 0
return invec | e4ff98a09348a5efef82fb1cd8c14246ea883f80 | 54,230 |
def get_variable_indices(atom):
"""
Gets the indexes of the variables in the atom.
:param atom: the atom
:type atom: Atom
:return: the indices of the variables
:rtype: list[int]
"""
indexes = []
for i in range(atom.arity()):
if not atom.terms[i].is_constant():
indexes.append(i)
return indexes | 7daf0faab7c8c4470313d7b609b7589b87153bfa | 115,919 |
def remove_whitespace(tableString):
"""
brief: Remove whitespace in a given string
Args:
@param tableString : the string that has to be scanned
Raises:
throws an exception (ValueError) when the value is not type of string
Return: Return the string without space
"""
if isinstance(tableString, int):
raise ValueError('The given value is not of type string')
tableString = tableString.replace(" ", "")
return tableString | 28c050b5e1da87c90d8c620696203bf884899c88 | 279,691 |
def getBandNumber(band):
"""
Returns band number from the band string in the spreadsheet.
This assumes the band format is in ALMA_RB_NN.
If there is an error, then 0 is return.
"""
try :
bn = int(band.split('_')[-1])
except:
bn = 0
return bn | 47247c1e860306dc38487c3fc7ce3355ee98ebde | 490,129 |
def verify_header(filename):
"""
verify the signature file header (pastadb)
"""
with open(filename, "rb") as f:
if f.read(7) == "\x70\x61\x73\x74\x61\x64\x62":
return True
return False | 5958179b0656ac7b4e8681e20d2746176592da3a | 25,196 |
def get_ngram_plot_data(df, type, sentiment):
"""Format the data to the ngram plot
Parameters
----------
df: Pandas DataFrame
Pandas dataframe with the ngrams sentiment data
type: String
Type of the ngram to filter
sentiment: String
POSITIVE or NEGATIVE
Returns
-------
Pandas Dataframe
The dataframe filtered
"""
return df.query("type == @type and sentiment == @sentiment").sort_values('score', ascending=False) | 088685bd5691816cfcb07a99447bdc93a12b3c7b | 146,738 |
def correct_allele_by_strand(strand, allele):
"""
correct_allele_by_strand
========================
This method is used to get a corrected allele based on the strand a variant is on.
If the variant is on the positive strand the allele will stay the same. If the variant
is on the negative strand the variant will be corrected. (A -> T, T => A, C -> G, G -> C)
Parameters:
----------
1) strand: (str) The strand the variant is on
2) allele: (str) The allele to correct
Returns:
++++++++
1) (str) The corrected allele
"""
corrected_allele = allele
if strand == "-":
if allele.upper() == "A":
corrected_allele = "T"
elif allele.upper() == "T":
corrected_allele = "A"
elif allele.upper() == "G":
corrected_allele = "C"
elif allele.upper() == "C":
corrected_allele = "G"
return corrected_allele | 84049cb502c4a853676fce0666a7c8d52a36f0bb | 284,725 |
def convert2Frequency(featureDict):
"""
Converts the count values of the feature dictionary to frequencies.
Arguments
---------
featureDict : dict
dictionary of document with the respective values
Returns
-------
featureDict : dict
containing the corresponding frequencies converted to absolute numbers
"""
if sum(featureDict.values()) != 0:
invTotalNumber = 1.0 / sum(featureDict.values())
else:
invTotalNumber = 1.0
featureDict.update((k, invTotalNumber*v) for k,v in featureDict.items())
return featureDict | 35a980e82dd3c84764f3bc1fbfe49fa0e066e43e | 455,766 |
def exactly_one_topping(ketchup, mustard, onion):
"""Return whether the customer wants exactly one of the three available toppings
on their hot dog.
"""
return True if int(ketchup) + int(mustard) + int(onion) == 1 else False | 214c95d35c116993dc78740d5d16b874122960ed | 1,221 |
def get_numeric_trace_attribute_value(trace, trace_attribute):
"""
Get the value of a numeric trace attribute from a given trace
Parameters
------------
trace
Trace of the log
Returns
------------
value
Value of the numeric trace attribute for the given trace
"""
if trace_attribute in trace.attributes:
return trace.attributes[trace_attribute]
raise Exception("at least a trace without trace attribute: " + trace_attribute) | fd757861972dce8d9624efa13773bd4624cf9ace | 22,058 |
import random
def save(service_request):
"""Save service request"""
# Implementation specific. Just return a random SR id for now
return {'service_request_id':random.randint(1,10000)} | f13aa0aa38ac463bea1cd877c99232f9c78c12e0 | 565,442 |
def round_down_half_hour(time):
"""
Rounds a datetime object DOWN to the nearest half hour. So if its 12:15 pm, return datetime at 12:00 pm
:param time: A datetime object
"""
if 0 < time.minute < 30:
return time.replace(minute=0)
if 30 < time.minute < 59:
return time.replace(minute=30)
return time | 67bc8475cedd560a60f7cc1459c73cf0bcc31675 | 76,993 |
def plotly_figure(figure, id: str):
"""
:param figure: plotly graph object or px figure
:param id: unique id string of format 'id_xxx' with x representin a number
:return: html style string containing a plotly figure
"""
json_figure = figure.to_json()
html = """
<div id="""+id+"""></div>
<script>
var plotly_data = {}
Plotly.react("""+id+""", plotly_data.data, plotly_data.layout);
</script>
"""
local_text = html.format(json_figure)
return local_text | 949415c70d467c48ee3aa1f028c9e3539099febf | 708,047 |
def CommandLine(command, args):
"""Convert an executable path and a sequence of arguments into a command
line that can be passed to CreateProcess"""
cmd = "\"" + command.replace("\"", "\"\"") + "\""
for arg in args:
cmd = cmd + " \"" + arg.replace("\"", "\"\"") + "\""
return cmd | dfb7de2d1a72a007c9d120a27de5078d407f947d | 44,096 |
def SlopePoint(pt1,pt2):
"""The slope between two point (x1,y1),(x2,y2)
(y2-y1)/(x2-x1)
"""
tmp_slope=0
tmp_slope = (pt2[1]-pt1[1])/(pt2[0]-pt1[0])
return tmp_slope | 24d73293186130c2c787a530aef0c80327e721c2 | 130,064 |
def join_nums_and_pairs(verses, joiner=', '):
"""
Given an array of ints and int pairs, return a single string
of individual verse numbers and verse ranges: [3,5,(7,10)] --> "3, 5, 7-10".
"""
return joiner.join([str(x) if isinstance(x, int) else '%d-%d' % x for x in verses]) | 09328d796cdbf0dca6330534ad13dc1c8c60030a | 532,039 |
def percent(num):
"""Format number as per cent with 2 decimal places."""
return "{:.2f}%".format(num * 100) | 8c6eef8a619b403a623bc8181dacd1aea3cc7bf9 | 503,179 |
def lanthoc(b3, b5, b7):
"""
Leaf Anthocyanid Content (Wulf and Stuhler, 2015).
.. math:: LAnthoC = b7/(b3 - b5)
:param b3: Green.
:type b3: numpy.ndarray or float
:param b5: Red-edge 1.
:type b5: numpy.ndarray or float
:param b7: Red-edge 3.
:type b7: numpy.ndarray or float
:returns LAnthoC: Index value
.. Tip::
Wulf, H.; Stuhler, S. 2015. Sentinel-2: Land Cover, \
Preliminary User Feedback on Sentinel-2A Data. \
in: Proceedings of the Sentinel-2A Expert Users \
Technical Meeting, Frascati, Italy, 29-30 September 2015.
"""
LAnthoC = b7/(b3 - b5)
return LAnthoC | 03ce088c6bcb1178649c7525a51fbc2bf88205c4 | 421,976 |
def is_four_memebered_ring_torsion(torsion):
"""Check that three atoms in the given torsion form a four-membered ring."""
# Push a copy of the first and second atom in the end to make the code simpler.
torsion = list(torsion) + [torsion[0], torsion[1]]
is_four_membered_ring = True
for i in range(4):
# The atom is bonded to the next one.
is_four_membered_ring &= torsion[i].is_bonded_to(torsion[i+1])
# The atom is not bonded to the atom on its diagonal.
is_four_membered_ring &= not torsion[i].is_bonded_to(torsion[i+2])
return is_four_membered_ring | fd79e3e551e21b9d07222a9a36589c33f11b1353 | 559,232 |
def is_string(a):
"""
Returns if given :math:`a` variable is a *string* like variable.
Parameters
----------
a : object
Data to test.
Returns
-------
bool
Is :math:`a` variable a *string* like variable.
Examples
--------
>>> is_string("I'm a string!")
True
>>> is_string(["I'm a string!"])
False
"""
return True if isinstance(a, str) else False | 299f515e517f6be166db912602cdef908fb589d9 | 339,619 |
def _is_displayed(row, current_quarter, current_offset):
"""
Return true if message is for current quarter and current date
falls within the display range for a given message.
Message will display for 7 days.
"""
display_quarter = row[2]
display_offset = int(row[3])
return current_quarter == display_quarter and \
(display_offset + 7) > current_offset >= display_offset | 9b5f7a067e06f4104a44d89e03c9afe3169fc99e | 616,508 |
def has_error(json):
"""
Returns True if the query has an error; False otherwise.
Given a JSON response to a currency query, this returns the
opposite of the value following the keyword "valid". For example,
if the JSON is
'{ "src":"", "dst":"", "valid":false, "err":"Currency amount is invalid." }'
then the query is not valid, so this function returns True (It
does NOT return the message 'Source currency code is invalid').
Parameter json: a json string to parse
Precondition: json is the response to a currency query
"""
if 'false' in json:
return True
else:
return False | 16e783c59d722d043375878bf5775c668808b57c | 112,307 |
def persistent_real_state(real_state, spike):
"""Handles refractory state changes due to spike in last time bin.
Parameters
----------
real_state : torch tensor
real state in last time bin.
spike : torch tensor
spike state in last time bin.
Returns
-------
torch tensor
persistent real state to store for next time.
"""
spike = (spike > 0).to(real_state.dtype)
return real_state * (1 - spike) | f2b581a964b2d2e95245140a00fb3414fdc10c9b | 567,722 |
import re
def get_url_dir(url):
"""Return the url directory - url minus the text after last '/'."""
return re.sub("[^/]*$", "", url) | 91fe5c4a78c433a6c0138a616a88538c71f6a894 | 479,157 |
from pathlib import Path
def some_dir(tmp_path) -> Path:
"""Folder with some data, representing a given state"""
base_dir = tmp_path / "original"
base_dir.mkdir()
(base_dir / "empty").mkdir()
(base_dir / "d1").mkdir()
(base_dir / "d1" / "f1").write_text("o" * 100)
(base_dir / "d1" / "f2").write_text("o" * 100)
(base_dir / "d1" / "d1_1" / "d1_2").mkdir(parents=True, exist_ok=True)
(base_dir / "d1" / "d1_1" / "f3").touch()
(base_dir / "d1" / "d1_1" / "d1_2" / "f4").touch()
(base_dir / "d1" / "d1_1" / "d1_1_1").mkdir(parents=True, exist_ok=True)
(base_dir / "d1" / "d1_1" / "d1_1_1" / "f6").touch()
return base_dir | 1fa99d40b102107d39df18f5cbd1337a3163d645 | 674,029 |
def _get_resource_type(api_version: str, kind: str) -> str:
"""
>>> _get_resource_type("v1", "Pod")
"pod"
>>> _get_resource_type("batch/v1", "Job")
"job.v1.batch"
"""
if '/' in api_version:
api_group, version = api_version.split('/')
return f'{kind}.{version}.{api_group}'.lower()
else:
return kind.lower() | 33ba0fddc87a160ef0d3337b4e548fef23ebd074 | 367,870 |
def _escapefun(matchobj):
"""Callback to interpret an escape sequence"""
s = matchobj.group()
c = s[1]
i = "bfnrt".find(c)
if i >= 0:
c = "\b\f\n\r\t"[i]
elif c == 'x':
c = chr(int(s[2:], 16))
elif c in '01234567':
c = chr(int(s[1:], 8))
return c | b2a6760913d0479dec7422267d77f14c143b02e0 | 125,549 |
from ntpath import split, basename
def path_leave(path, **kwargs):
"""
Extract a file name from a whole
path.
Parameters
----------
path : str
Path including the file name.
Returns
-------
file_name : str
Extracted file name.
Notes
-----
ntpath.split:
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty.
ntpath.basename:
Return final component of a pathname
"""
head, tail = split(path)
# if tail is empty, return last component of head:
return tail or basename(head) | 32834253eb574e60cbd068eb21f37b05929cda86 | 532,375 |
def required_columns(row_to_add, wanted_keys):
"""
:param row_to_add: Contains the rows from the input file.
:param wanted_keys: List of required column header names to be displayed at the end.
:return: Dict of keys,values formatted data.
"""
required_keys = dict((k, row_to_add[k]) for k in wanted_keys if k in row_to_add)
return required_keys | 8643a592662939cf8b00f009c4dc3f87d1df4e6c | 29,080 |
import torch
def fast_compute_class_metric(test_preds, test_sols, class_metrics = ('accuracy', 'recall', 'precision')):
"""
Computes manually (i.e. without sklearn functions) accuracy, recall and predicision.
Args:
test_preds: numpy array/ torch tensor of size N with discrete output vals
test_sols: numpy array/torch tensor of size N with binary labels
class_metrics: tuple with a subset of values from ('accuracy', 'recall', 'precision') indicating which
metrics to report
Returns:
dictionary with values of accuracy, recall and precision
"""
with torch.no_grad():
TP = ((test_sols == 1) & (test_preds == 1)).sum().float()
FP = ((test_sols == 0) & (test_preds == 1)).sum().float()
TN = ((test_sols == 0) & (test_preds == 0)).sum().float()
FN = ((test_sols == 1) & (test_preds == 0)).sum().float()
accuracy = (TP + TN) / (TP + FP + TN + FN)
recall = TP / (TP + FN) if TP + FN > 0 else torch.tensor(0)
precision = TP / (TP + FP) if TP + FP > 0 else torch.tensor(0)
class_metrics_dict = {'accuracy': accuracy.item(), 'recall': recall.item(), 'precision': precision.item()}
class_metrics_dict = {met_name: class_metrics_dict[met_name] for met_name in class_metrics}
return class_metrics_dict | 997b1d279201b71c7b2e3b609ab60a76ba74ddce | 596,953 |
def filter_tokens(tokens):
"""Filters out any whitespace and comment tokens."""
return [token for token in tokens if token.token_type not in ['WS', 'COMMENT']] | cfbbe2e88d0a21a2f0c0f5ae81977f1ac14ef5a8 | 296,391 |
def check_bidi_comp(cond0, cond1, arg0, arg1):
"""
Check whether conditions are True for two arguments regardless of order.
Parameters
----------
cond0, cond1 : callable
Function of one variable that evaluate to a bool.
arg0, arg1 : object
Arguments to pass to `cond0` and `cond1`.
Returns
-------
result : bool
True if `cond0` and `cond1` are respectively True for
`arg0` and `arg1` or `arg1` and `arg0`.
"""
assert callable(cond0) and callable(cond1)
return (cond0(arg0) and cond1(arg1)) or \
(cond1(arg0) and cond0(arg1)) | 27086054fe32a14456ee0178068a6cf4919d7258 | 610,742 |
def first_or_none(iterable):
"""Returns the first item in the given iterable, or None if the iterable is empty"""
return next((x for x in iterable), None) | ebf543d880f5b89e44c26ade240a6faea9287563 | 555,720 |
def roll_da_to_0360(xr_da, lon_name="longitude"):
"""Roll DataArray's data with longitudes from (-180, 180) to (0, 360)."""
# Roll longitudes and corresponding data
out = xr_da.roll(**{lon_name: xr_da[lon_name].shape[0] // 2}, roll_coords=True)
# Reset western (negative) longitudes to values within (180, 360) range
out[lon_name] = out[lon_name] - (out[lon_name] // 360) * 360
return out | 1ea16ca1167a941f563115d0627dee99240ae246 | 335,162 |
def read_data() -> str:
"""Read the data from the `input.txt` file."""
with open('input.txt') as input_file:
return input_file.read() | 763847f1b781a44ec7528f7082e19fbb9b1a11bc | 427,039 |
def is_device_timer_running(botengine, reference):
"""
Determine if the timer with the given reference is running
:param botengine: BotEngine environment
:param reference: Unique reference name for the timer
:return: True if the timer is running
"""
return botengine.is_timer_running(reference) | a1884045b9d936e9f1339566b8becd5fc6d45fda | 254,399 |
import socket
import pickle
def send_request(port, message):
"""Send a request to an IPCServer.
Args:
port (int): port to connect to
message (any type)
Returns:
response (any type)
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", port))
pickle.dump(message, s.makefile("wb"))
return pickle.load(s.makefile("rb")) | 87085ee36674fbbeec1ff8f50a577861831a774f | 206,142 |
def escape_perl_string(v):
"""Escape characters with special meaning in perl"""
return str(v).replace("$", "\\$").replace("\"", "\\\"").replace("@", "\\@") if v else '' | 605aaba55decada7f6c52b1511a84ccc388fd907 | 531,145 |
def reshape_as_fmap(tensor, shape):
"""Reshape from (b, h*w, c) to (b, c, h, w)."""
b, (h, w) = tensor.shape[0], shape
return tensor.reshape(b, h, w, -1).permute(0, 3, 1, 2) | 472efe353b590a9951f10db2b0c46b4ac76c34c6 | 383,607 |
import re
def get_book_ids(book_urls):
"""
Return a list of unique book ids from a list of book urls
"""
# Check if books are duplicated
unique_list = {}
num_processed = 0
for url in book_urls:
# print('url=', url)
all_nums = re.findall('[0-9]+', url)
# if all_nums[-1] == all_nums[-2] or all_nums[-2] == all_nums[-3]:
unique_list[all_nums[-2]] = unique_list.get(all_nums[-2], 0) + 1
num_processed += 1
# Get the list of unique book ids
book_ids = list(unique_list.keys())
print('Number of links before removing duplicates:', len(book_urls))
print('Number of links processed:', num_processed)
print('Number of unique books retained:', len(unique_list.keys()))
return book_ids | c9ce5437f13fbcf0e2ee324da02bab016ffe32de | 581,752 |
def spell_stats(user):
"""
Get's the player/boss' def, name and str.
Useful as it's stored differently between bosses and players in a battle
Returns
-------
Any
float - The defence of the user.
str - The name of the account.
float - The defence of the account
"""
# Get user stats for player/boss (as the data is stored differently)
try:
# Player
u_def = user['account']['stats']['defense']
u_name = user['user'].name
u_str = user['account']['stats']['strength']
except KeyError:
# Boss
u_def = user['stats']['defense']
u_name = user['name']
u_str = user['stats']['strength']
return u_def, u_name, u_str | f4fa6425dbf18cf8a7e7a50c44cb3a8092f085ac | 688,079 |
def early_stop(val_acc_history, k=10, required_progress=1e-4):
"""
Stop the training if there is no non-trivial progress in k steps
@param val_acc_history: a list contains all the historical validation acc
@param required_progress: the next acc should be higher than the previous by
at least required_progress amount to be non-trivial
@param t: number of training steps
@return: a boolean indicates if the model should earily stop
"""
non_trivial = 1
if len(val_acc_history)>=k+1:
non_trivial = 0
for i, acc in enumerate(val_acc_history):
if i != len(val_acc_history) - 1:
if val_acc_history[i+1]-acc < -required_progress:
non_trivial = 1
break
return not non_trivial | bc79d31851b73527c1bc622d257e0b6db25f03da | 437,843 |
def read_True_steps_suffix_map(txtfile_name_with_path):
"""
This function reads the text file that contains all the steps in the configuration file, the
corresponding suffix, and whether they were completed or not.
Args:
txtfile_name_with_path: string, full name and path of the text file
Returns:
steps_list: list, steps set to True in configuration file
suffix_list: list, suffix for the output file corresponding to each step had it completed
completion_list: list, strings of True or False depending on whether the step completed or not
"""
steps_list, suffix_list, completion_list = [], [], []
with open(txtfile_name_with_path, "r") as tf:
for line in tf.readlines():
if "#" not in line:
info = line.split()
steps_list.append(info[0])
suffix_list.append(info[1])
completion_list.append(info[2])
return steps_list, suffix_list, completion_list | 363676d44f1cd410394a68e267d92a34f2cc6f27 | 626,585 |
def depth_bonacci_rule(depth):
"""rule for generating tribonacci or other arbitrary depth words
Args:
depth (int): number of consecutive previous words to concatenate
Returns:
lambda w: w[-1] + w[-2] + ... + w[-(depth+1)]
For example, if depth is 3, you get the tribonacci words.
(there is already a tribonacci and quadbonacci words method)
"""
return lambda w: "".join(w[-1:-depth-1:-1]) | db0d9f51475c9e4dd60acee36ffa651e577c4be2 | 691,003 |
def reverse_class_dependencies(dependencies):
""" Reverses class dependency dictionary
Consumes dictionary in format
{
'test_class_name_1': {
'Id': '01pU00000026druIAA',
'References': ['class_name_1', 'class_name_2']
}
}
produces dictionary in format
{
'class_name_1': ['test_class_name_1'],
'class_name_2': ['test_class_name_1']
} """
reverse_deps = dict()
for class_name, data in dependencies.items():
for dep_class_name in data['References']:
if dep_class_name in reverse_deps:
if class_name in reverse_deps[dep_class_name]:
continue
cur_class_deps = reverse_deps[dep_class_name]
else:
cur_class_deps = list()
cur_class_deps.append(class_name)
cur_class_deps.sort()
reverse_deps[dep_class_name] = cur_class_deps
return reverse_deps | 15002b38847721ffe4862f3a5d69a73dd2f999d8 | 131,782 |
def _get_aea_logger_name_prefix(module_name: str, agent_name: str) -> str:
"""
Get the logger name prefix.
It consists of a dotted save_path with:
- the name of the package, 'aea';
- the agent name;
- the rest of the dotted save_path.
>>> _get_aea_logger_name_prefix("aea.save_path.to.package", "myagent")
'aea.myagent.save_path.to.package'
:param module_name: the module name.
:param agent_name: the agent name.
:return: the logger name prefix.
"""
module_name_parts = module_name.split(".")
root = module_name_parts[0]
postfix = module_name_parts[1:]
return ".".join([root, agent_name, *postfix]) | 753b8f6342039260eedde66420f8cacaaf99027d | 436,644 |
def is_isbn13(s):
"""
Check whether the given string is a valid, normalized ISBN-13 number.
This only passes if the given value is a string that has exactly 13
decimal digits, and the last decimal digit is a proper ISBN-13 check
digit.
Parameters:
s : str | mixed - the value to check
Return:
True if a valid, normalized ISBN-13 number, False otherwise
"""
# Fail if not a string
if not isinstance(s, str):
return False
# Fail if not exactly 13 characters
if len(s) != 13:
return False
# Check that each character is a decimal digit and compute the
# weighted sum as we go along
wsum = 0
for x in range(0, 13):
# Get current character code
c = ord(s[x])
# Fail if not a decimal digit
if (c < ord('0')) or (c > ord('9')):
return False
# Get proper weight for this digit
w = 1
if (x & 0x1) == 1:
w = 3
# Update weighted sum
wsum = wsum + (w * (c - ord('0')))
# If weighted sum mod 10 is zero, then check passes; otherwise, check
# fails
if (wsum % 10) == 0:
return True
else:
return False | cd32042073865e09a7139960f1e6aded2ac9f38d | 494,447 |
import operator
def extend_overlapping_spans(spans):
"""
Method to take a list of spans and extend overlapping spans to the longest span
Args:
spans (List[tuple]): A list of span tuples (start, end)
Returns:
List[tuple]: A list of span tuples (start, end) with overlapping spans extended to the longest span
"""
spans = sorted(spans, key=operator.itemgetter(1))
i = len(spans) - 1
while i >= 0:
start, end = spans[i]
delete = False
for j in range(i + 1, len(spans)):
rstart, rend = spans[j]
if start < rstart < end:
spans[j][0] = start
del spans[i]
delete = True
break
elif start >= rstart:
del spans[i]
delete = True
break
if ~delete:
i -= 1
return spans | f5d61c7db6cc3506f58338848ca1661ef7979e30 | 326,101 |
import string
def make_Template(name, **kwargs):
"""make_Template(name, **kwargs) -> subclass of string.Template
- name is the name of the subclass
- kwargs is passed as the dict element to type
"""
return type(name, (string.Template,), kwargs) | 4632bc7bcdd27a3641551ef1e98bab52cfe499f7 | 426,998 |
def get_deltaV_losses(aim_altitude: float) -> float:
"""Estimates the losses due to various sources, including atmospheric drag,
using a very simplified equation.
The result is the amount of Delta V (ie: the velocity) lost during ascent of the rocket.
Args:
aim_altitude (float): The altitude of the perigee/injection in km
Returns:
float: the velocity lost during ascent in m/s
"""
return (2.452e-3) * aim_altitude ** 2 + 1.051 * aim_altitude + 1387.50 | 10c1991981890e76ba7a3feb8a7f7fc1e08f44be | 173,195 |
def top_markets_by_property(property_data_frame, number_of_markets, dimension):
""" A function which returns the top number_of_markets per dataframe
I: dataframe, the number of markets to return
O: list of the top markets sorted by number of observations"""
markets = property_data_frame.groupby([dimension]).count().reset_index()
markets = markets.copy()
markets = markets.sort(markets.columns[2],ascending=False)
top_markets = markets.head(number_of_markets)
return top_markets[dimension].tolist() | 6799d9ce3cb343a0827aa23733ca44345420aeb0 | 104,262 |
import pathlib
def get_paths_to_patient_files(path_to_imgs, append_mask=True):
"""
Get paths to all data samples, i.e., CT & PET images (and a mask) for each patient.
Parameters
----------
path_to_imgs : str
A path to a directory with patients' data. Each folder in the directory must corresponds to a single patient.
append_mask : bool
Used to append a path to a ground truth mask.
Returns
-------
list of tuple
A list wherein each element is a tuple with two (three) `pathlib.Path` objects for a single patient.
The first one is the path to the CT image, the second one - to the PET image. If `append_mask` is True,
the path to the ground truth mask is added.
"""
path_to_imgs = pathlib.Path(path_to_imgs)
#patients = [p for p in os.listdir(path_to_imgs) if os.path.isdir(path_to_imgs / p)]
patients = [f.name.split("_")[0] for f in path_to_imgs.rglob("*_ct*")]
print(str(patients))
paths = []
for p in patients:
path_to_ct = path_to_imgs / (p + '_ct.nii.gz')
path_to_pt = path_to_imgs / (p + '_pt.nii.gz')
if append_mask:
path_to_mask = path_to_imgs / (p + '_gtvt.nii.gz')
paths.append((path_to_ct, path_to_pt, path_to_mask))
else:
paths.append((path_to_ct, path_to_pt))
return paths | 61480fee3e300d2ca97e819fae875cf4c7a637e1 | 16,103 |
def get_wikipedia_multi_pattern(lang, date):
"""Return a regex pattern matching for wiki .bz2 files to be extracted."""
return r'({}wiki-{}-pages-articles[0-9]+.xml.*bz2$)'.format(lang, date) | 3f4d03a82140e6b897dbee8195a3f18b90e98e85 | 148,020 |
def flatten_nested_dict_list(d, parent_key='', sep='_', item_key=''):
"""
Flatten a nested dict or list to a list.
For example, given a dict
{
a: 1
b: {
c: 2
}
c: 3
}
the function would return [(a, 1), (b_c, 2), (c, 3)]
Args:
d (dict, list): a nested dict or list to be flattened
parent_key (str): recursion helper
sep (str): separator for nesting keys
item_key (str): recursion helper
Returns:
list: a list of (key, value) tuples
"""
items = []
if isinstance(d, (tuple, list)):
new_key = parent_key + sep + item_key if len(parent_key) > 0 else item_key
for i, v in enumerate(d):
items.extend(flatten_nested_dict_list(v, new_key, sep=sep, item_key=str(i)))
return items
elif isinstance(d, dict):
new_key = parent_key + sep + item_key if len(parent_key) > 0 else item_key
for k, v in d.items():
assert isinstance(k, str)
items.extend(flatten_nested_dict_list(v, new_key, sep=sep, item_key=k))
return items
else:
new_key = parent_key + sep + item_key if len(parent_key) > 0 else item_key
return [(new_key, d)] | def7f9e410aad1c0927e5b6907b18a3c64749a34 | 112,019 |
def reduce_text(text, allowed_text_df, allowed_text_column):
"""
A function to reduce an input document to include only words contained in the allowed_text_df (with values in the allowed_text_columns)
:param text: A string of words in the text, separated by commas. This should be handled automatically by the input from the make_text_encoded_features function
:param allowed_text_df: The optional DataFrame containing the curated vocabulary of terms
:param allowed_text_column: The column in keyword_df which contains the words
:return: A list with the reduced text
"""
text_list_ = text.split(',')
text_reduced_list_ = [x for x in text_list_ if x in allowed_text_df[allowed_text_column].values]
return text_reduced_list_ | 265719f7f9336df9acb65b89da5d637df9ab676d | 269,086 |
def _check_count(counter):
"""
Get the most common dimension, and check if at least two thirds of the
values agree.
"""
dim, count = counter.most_common(1)[0]
if count >= (2 / 3) * sum(counter.values()):
return True, dim
else:
return False, dim | 4163cfcb65a0b3011504039faf44c88589acade6 | 53,135 |
import re
def find_emails(text):
"""
It will parse the given string and return a list of emails if found
Example:
>>find_emails('hello\n find me here\[email protected]')
['[email protected]']
:param text: string
:return: list
"""
return re.findall(r"([a-zA-Z0-9+._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)", text) | e46ec7746947a4463864d477ce30238f80303076 | 292,920 |
def intcode_three(parameter_list, code_list):
""" Accepts input and places it in parameter_list[0] place in code_list. Returns True. """
number_in = int(input('Please enter a number: ') or "5")
code_list[parameter_list[0]] = number_in
return True | 690b377da19aea89026237fc2677ea6796967487 | 607,516 |
import torch
def _pdist(a, b):
"""Compute pair-wise squared distance between points in `a` and `b`.
Parameters
----------
a : array_like
An NxM matrix of N samples of dimensionality M.
b : array_like
An LxM matrix of L samples of dimensionality M.
Returns
-------
ndarray
Returns a matrix of size len(a), len(b) such that eleement (i, j)
contains the squared distance between `a[i]` and `b[j]`.
"""
if len(a) == 0 or len(b) == 0:
return torch.zeros(len(a), len(b), dtype=a.dtype, device=a.device)
a = a[:, None, :]
b = b[None, :, :]
return torch.sum(torch.pow(a - b, 2), dim=-1) | 47025e93442418293cc51c7e6d335fa4861d7711 | 87,674 |
def _match_parameters(parameters_1, parameters_2):
"""Decide whether parameters_1 are a subset of parameters_2."""
if set(parameters_1.keys()) <= set(parameters_2.keys()):
return all(v == parameters_2[k] for k, v in parameters_1.items())
return False | 4b88320c9799f286e6c4f1f8c443d41cf12c23bc | 394,202 |
import textwrap
def dedent(text):
"""Remove any common leading whitespace from every line in a given text."""
return textwrap.dedent(text) | 514f9f41feac1c19ff92d6c9258bf54d7d3d7bd8 | 705,056 |
import itertools
def trim_spec_composer(bins_seq, lefts=None, rights=None, axes=None):
"""
Helps to compose trim_spec parameter in
:py:func:`triniti_ne.rebin.trim_nd` with
reasonable defaults for lefts, rights and axes iterators.
Parameters
----------
bins_seq: sequence of ndarrays
Iterates over the list of bins associated with a grid to be trimmed.
lefts: sequence of floats
Iterates over the list of left edges for trimming.
rights: sequence of floats
Iterates over the list of right edges for trimming.
axes: sequence of ints, optional
Iterates over the list of corresponding axes.
If not provided (default), then iterates over sequence 0..len(bins).
Returns
-------
trim_spec: sequence of tuples
Iterator over the sequence of tuples (bins, lefts, rights, axis)
"""
if not lefts:
lefts = itertools.repeat(None)
if not rights:
rights = itertools.repeat(None)
if not axes:
axes = itertools.count()
return zip(bins_seq, lefts, rights, axes) | 36d786955e1123624d3d5664b34d1da4d4d985ad | 531,032 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.