content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def reverse(seq):
"""
Recursively reverses a string
@param {string} sequence to reverse
@return {string}
"""
if seq == '':
return ''
last_index = len(seq) - 1
return seq[last_index] + reverse(seq[:last_index])
|
5395f8be64f48145e45612fb5b9635d6639dafed
| 592,357 |
import array
def round_float32(x_double):
"""
Uses the array module to retun a (truncated) 32-bit float
representation of a python floating point number (double)
"""
return array.array('f', [x_double])[0]
|
b614f83123d90bafc8d72c8fc723aae24331b3b2
| 590,777 |
def gen_wordlist(wordfile):
"""Generates a list of words given a line-delimited wordfile."""
# read text in
with open(wordfile,"r") as fil:
text=fil.read()
# Split on lines but not spaces because of multiword phrases
return text.split("\n")
|
2f9c88e0db2b7b54639abc54c58bf69d04677e8e
| 183,808 |
def __sbiw_rd_i(op):
"""
Decode and return the 'Rd' and 'immediate' arguments of a SBIW opcode
"""
AVR_SBIW_RD_MASK = 0x0030
AVR_SBIW_CONST_MASK = 0x00CF
imm_part = op & AVR_SBIW_CONST_MASK
imm = ((imm_part >> 2) & 0x30) | (imm_part & 0xF)
# rd_part is 2 bits and indicates r24, 26, 28, or 30
rd_part = (op & AVR_SBIW_RD_MASK) >> 4
rd = (2 * rd_part) + 24
return (rd, imm)
|
1e34e08c60b87bf27956e796390334efcbb8402f
| 194,440 |
def has_anonymous_link(node, auth):
"""check if the node is anonymous to the user
:param Node node: Node which the user wants to visit
:param str link: any view-only link in the current url
:return bool anonymous: Whether the node is anonymous to the user or not
"""
if auth.private_link:
return auth.private_link.anonymous
return False
|
c5941bce3f0110dfcd5e9bbb19bae0682c5e731f
| 707,328 |
import re
def collapse_linebreaks(string, threshold=1):
""" Returns a string with consecutive linebreaks collapsed to at most the given threshold.
Whitespace on empty lines and at the end of each line is removed.
"""
n = "\n" * threshold
p = [s.rstrip() for s in string.replace("\r", "").split("\n")]
string = "\n".join(p)
string = re.sub(n+r"+", n, string)
return string
|
5bf230b210cbccadeb7dc21f762d3499234b700e
| 516,021 |
import pathlib
def tmp_fixed_file(tmp_dir):
"""Return a path for a tmp yaml file."""
return pathlib.Path.joinpath(tmp_dir, 'fixed_file')
|
861cd579ee3c3254b8eccd2ac5b10a6a8435e09a
| 547,567 |
def generate_new_row(forecast_date, target, target_end_date,
location, type, quantile, value):
"""
Return a new row to be added to the pandas dataframe.
"""
new_row = {}
new_row["forecast_date"] = forecast_date
new_row["target"] = target
new_row["target_end_date"] = target_end_date
new_row["location"] = location
new_row["type"] = type
new_row["quantile"] = quantile
new_row["value"] = value
return new_row
|
212438098b7ad56f2046fdc3c5f6a73cab96f420
| 599,042 |
import six
import uuid
def six_uuid5(namespace, name):
"""A uuid.uuid5 variant that takes utf-8 'name' both in Python 2 and 3.
:param namespace: A UUID object used as a namespace in the generation of a
v5 UUID.
:param name: Any string (either bytecode or unicode) used as a name in the
generation of a v5 UUID.
:returns: A v5 UUID object.
"""
# NOTE(bence romsics):
# uuid.uuid5() behaves seemingly consistently but still incompatibly
# different in cPython 2 and 3. Both expects the 'name' parameter to have
# the type of the default string literal in each language version.
# That is:
# The cPython 2 variant expects a byte string.
# The cPython 3 variant expects a unicode string.
# Which types are called respectively 'str' and 'str' for the sake of
# confusion. But the sha1() hash inside uuid5() always needs a byte string,
# so we have to treat the two versions asymmetrically. See also:
#
# cPython 2.7:
# https://github.com/python/cpython/blob
# /ea9a0994cd0f4bd37799b045c34097eb21662b3d/Lib/uuid.py#L603
# cPython 3.6:
# https://github.com/python/cpython/blob
# /e9e2fd75ccbc6e9a5221cf3525e39e9d042d843f/Lib/uuid.py#L628
if six.PY2:
name = name.encode('utf-8')
return uuid.uuid5(namespace=namespace, name=name)
|
c99e4b6a609a9594728e2ecc155b453dc1739f6c
| 514,502 |
import fnmatch
def csv_matches(list_of_files):
"""Return matches for csv files"""
matches = fnmatch.filter(list_of_files, "*.csv")
return matches
|
ee81a533731b78f77c4b4472c2e19c643d04525f
| 56,370 |
def _identify_loss_method(loss_method):
"""
Identifies the symmetry for the given loss method.
Parameters
----------
loss_method : str
The loss method to use. Should have the symmetry identifier as
the prefix.
Returns
-------
symmetric : bool
True if `loss_method` had 's_' or 'symmetric_' as the prefix, else False.
str
The input `loss_method` value without the first section that indicated
the symmetry.
Raises
------
ValueError
Raised if the loss method does not have the correct form.
"""
prefix, *split_method = loss_method.lower().split('_')
if prefix not in ('a', 's', 'asymmetric', 'symmetric') or not split_method:
raise ValueError('must specify loss function symmetry by appending "a_" or "s_"')
if prefix in ('a', 'asymmetric'):
symmetric = False
else:
symmetric = True
return symmetric, '_'.join(split_method)
|
21b07e898f0d45381f2834aac632cb3350d38694
| 404,409 |
def convert_age(age: str):
"""
Convert age string.
All age ranges in format AXX-AXX with X = numeric.
Exception: A80+
"""
age_range = age.split("-")
if len(age_range) > 1:
return {
"start": float(age_range[0][1:3]),
"end": float(age_range[1][1:3]),
}
else:
return {
"start": 80.0,
"end": 120.0,
}
|
7b22029ce52be3f29fb45482ba87f99287c44429
| 79,724 |
def create_queries(group, joiner, template, maxlen):
"""Create queries combining `maxlen` entities of a `group` to search for.
Parameters
----------
group : list
Entities (authors, documents, etc.) to search for on Scopus.
joiner : str
On which the group elements should be joined to fill the query.
template : string.Template()
A string template with one parameter named `fill` which will be used
as search query.
maxlen : int
The maximum length a query can be. If equal 1, one element at the time
is used per query.
Returns
-------
queries : list of tuples
A list of tuples where the first element of each tuple is a query
and the second is the list of elements searched by the query.
"""
group = sorted([str(g) for g in group]) # make robust to passing int
queries = []
start = 0
for i, g in enumerate(group):
sub_group = group[start:i+2]
query = template.substitute(fill=joiner.join(sub_group))
if maxlen == 1 or len(query) > maxlen or i+1 == len(group):
sub_group = group[start:i+1]
query = template.substitute(fill=joiner.join(sub_group))
queries.append((query, sub_group))
start = i + 1
return queries
|
87bd96b4816a3950d905607e47dbd5ab85e31d55
| 362,202 |
def micro_f1_similarity(
y_true: str,
y_pred: str
) -> float:
"""
Compute micro f1 similarity for 1 row
Parameters
----------
y_true : str
True string of a space separated birds names
y_pred : str
Predicted string of a space separated birds names
Returns
-------
float
Micro F1 similarity
Examples
--------
>>> from evaluations.kaggle_2020 import micro_f1_similarity
>>> y_true = 'amecro amerob'
>>> y_pred = 'amecro bird666'
>>> micro_f1_similarity(y_true, y_pred)
0.5
"""
true_labels = y_true.split()
pred_labels = y_pred.split()
true_pos, false_pos, false_neg = 0, 0, 0
for true_elem in true_labels:
if true_elem in pred_labels:
true_pos += 1
else:
false_neg += 1
for pred_el in pred_labels:
if pred_el not in true_labels:
false_pos += 1
f1_similarity = 2 * true_pos / (2 * true_pos + false_neg + false_pos)
return f1_similarity
|
fbe6b07e329ce5727e8488f8627dc7904694f719
| 592,573 |
def crop_by_bbox(pil_image, bbox):
"""
Crops the specified PIL image with the given bounding box
:param pil_image: PIL image to crop
:param bbox: bounding box object to crop by
:return: PIL image cropped.
"""
box = bbox.get_box()
box[2] += box[0]
box[3] += box[1]
crop_result = pil_image.crop((box[0], box[1], box[2], box[3]))
return crop_result
|
cd829f9b3cb134e73dc28ecfccdedabc2db529e0
| 345,415 |
def GetMaxOrder(Scat):
"""Function compute max Order (n) for :math:`S_1 \, \\& \, S_2` computing
MaxOrder = :math:`\\big( 2+x + 4 x^{1/3} \\big)`;
x begin the size parameter of the scatterer.
Parameters
----------
Scat : :class:`BaseScatterer`
Description of parameter `Scat`.
Returns
-------
:class:`int`
The maximum order n to reach.
"""
return int(2 + Scat.SizeParam + 4*Scat.SizeParam**(1/3))
|
0b89eaf60244f93f68d5b3e9172c506e9c550646
| 426,204 |
from typing import Callable
def action(func: Callable):
"""
Decorator to mark functions as playbook actions
"""
setattr(func, "_action_name", func.__name__)
return func
|
f3b9ef46da4a1a801a0cfe0ae0c04bf398aa6120
| 207,990 |
def mult(terms):
"""
Takes a 2-element tuple TERMS and multiplies the two terms
"""
a,b = terms[0], terms[1]
product = 0
for i in range(a):
product += b
return product
|
58184fa0143e61d058bdd71d5c6651588770fc43
| 391,423 |
import re
def search_matches(match_list, search_list, ignore_list=None):
"""
Return list of football matches that match search
"""
if ignore_list is None:
ignore_list = []
search = re.compile('|'.join(search_list))
my_matches = [m for m in match_list if search.search(m['fixture'])]
if ignore_list:
ignore = re.compile('|'.join(ignore_list))
my_matches = [m for m in my_matches if not ignore.search(m["fixture"])]
return my_matches
|
51ae6dcd18b1f89825b6193f114a09bc1aab3682
| 102,797 |
def DoesTestHaveLabels(cls, labels):
"""Returns true if any tests in cls have any of the labels."""
labels = set(labels)
for name in dir(cls):
if name.startswith("test"):
item = getattr(cls, name, None)
if labels.intersection(getattr(item, "labels", set(["small"]))):
return True
return False
|
61c5e816a2fe6bde0798d51f0b832673655d8130
| 134,862 |
from typing import Counter
def count_words_fast(text):
"""count the number of times each word occurs in text (str).
Return dictionary where keys are unique words and values are
word counts. skip punctuations"""
text = text.lower() #lowercase for the counting letters so the function can cont the same words whether it's capatilised or not
skips = [".", ",", ";", ":", "'", '"'] #skipping all the punctuations to not be counted with the words that come bfore them
for ch in skips:
text = text.replace(ch,"")
word_counts = Counter(text.split(" "))
return word_counts
|
113eba5988651177b353e7afedbe8e63e2fdb452
| 529,514 |
def chunker(iterable, inc):
"""
Convert an iterable into a list of inc sized lists.
:param iterable: Iterable to chunk.
:type iterable: list/tuple/string
:param inc: Increment; how big each chunk is.
:type inc: int
"""
chunks = [iterable[x:x+inc] for x in range(0, len(iterable), inc)]
return chunks
|
a6a78efd0f9a92d24f2395adbec0cf00aea30686
| 329,464 |
def never(_):
"""Predicate function that returns ``False`` always.
:param _:
Argument
:returns:
``False``.
"""
return False
|
38a949525bb45b556eb5131bd9ff3d11b83a616e
| 214,375 |
def first_not_none(itr):
"""
returns the first non-none result from an iterable, similar to any() but return value not true/false
"""
for x in itr:
if x:
return x
return None
|
de04721394fd45f8d3886b6adb17a9db0893825b
| 641,641 |
def has_duplicates(t):
"""Returns True if any element appears more than once in a sequence.
t: list returns: bool
"""
s = t[:] # make a copy of t to avoid modifying the parameter
s.sort()
for i in range(len(s)-1):
if s[i] == s[i+1]: # check for adjacent elements that are equal
return True
return False
|
2200d4c5c5b22937f60e560937794d7419082f40
| 653,394 |
def nfa_determinization(nfa: dict) -> dict:
""" Returns a DFA that reads the same language of the input NFA.
Let A be an NFA, then there exists a DFA :math:`A_d` such
that :math:`L(A_d) = L(A)`. Intuitively, :math:`A_d`
collapses all possible runs of A on a given input word into
one run over a larger state set.
:math:`A_d` is defined as:
:math:`A_d = (Σ, 2^S , s_0 , ρ_d , F_d )`
where:
• :math:`2^S` , i.e., the state set of :math:`A_d` , consists
of all sets of states S in A;
• :math:`s_0 = S^0` , i.e., the single initial state of
:math:`A_d` is the set :math:`S_0` of initial states of A;
• :math:`F_d = \{Q | Q ∩ F ≠ ∅\}`, i.e., the collection of
sets of states that intersect F nontrivially;
• :math:`ρ_d(Q, a) = \{s' | (s,a, s' ) ∈ ρ\ for\ some\ s ∈ Q\}`.
:param dict nfa: input NFA.
:return: *(dict)* representing a DFA
"""
def state_name(s):
return str(set(sorted(s)))
dfa = {
'alphabet': nfa['alphabet'].copy(),
'initial_state': None,
'states': set(),
'accepting_states': set(),
'transitions': dict()
}
if len(nfa['initial_states']) > 0:
dfa['initial_state'] = state_name(nfa['initial_states'])
dfa['states'].add(state_name(nfa['initial_states']))
sets_states = list()
sets_queue = list()
sets_queue.append(nfa['initial_states'])
sets_states.append(nfa['initial_states'])
if len(sets_states[0].intersection(nfa['accepting_states'])) > 0:
dfa['accepting_states'].add(state_name(sets_states[0]))
while sets_queue:
current_set = sets_queue.pop(0)
for a in dfa['alphabet']:
next_set = set()
for state in current_set:
if (state, a) in nfa['transitions']:
for next_state in nfa['transitions'][state, a]:
next_set.add(next_state)
if len(next_set) == 0:
continue
if next_set not in sets_states:
sets_states.append(next_set)
sets_queue.append(next_set)
dfa['states'].add(state_name(next_set))
if next_set.intersection(nfa['accepting_states']):
dfa['accepting_states'].add(state_name(next_set))
dfa['transitions'][state_name(current_set), a] = state_name(next_set)
return dfa
|
9ee37893974a7c5e06ad8486359c4460bbda763d
| 507,623 |
import torch
def smooth_l1_loss(input,
target,
beta: float,
reduction: str = "none",
size_average=False):
"""
Smooth L1 loss defined in the Fast R-CNN paper as:
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
Smooth L1 loss is related to Huber loss, which is defined as:
| 0.5 * x ** 2 if abs(x) < beta
huber(x) = |
| beta * (abs(x) - 0.5 * beta) otherwise
Smooth L1 loss is equal to huber(x) / beta. This leads to the following
differences:
- As beta -> 0, Smooth L1 loss converges to L1 loss, while Huber loss
converges to a constant 0 loss.
- As beta -> +inf, Smooth L1 converges to a constant 0 loss, while Huber loss
converges to L2 loss.
- For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant
slope of 1. For Huber loss, the slope of the L1 segment is beta.
Smooth L1 loss can be seen as exactly L1 loss, but with the abs(x) < beta
portion replaced with a quadratic function such that at abs(x) = beta, its
slope is 1. The quadratic segment smooths the L1 loss near x = 0.
Args:
input (Tensor): input tensor of any shape
target (Tensor): target value tensor with the same shape as input
beta (float): L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
Returns:
The loss with the reduction option applied.
Note:
PyTorch's builtin "Smooth L1 loss" implementation does not actually
implement Smooth L1 loss, nor does it implement Huber loss. It implements
the special case of both in which they are equal (beta=1).
See: https://pytorch.org/docs/stable/nn.html#torch.nn.SmoothL1Loss.
"""
if beta < 1e-5:
# if beta == 0, then torch.where will result in nan gradients when
# the chain rule is applied due to pytorch implementation details
# (the False branch "0.5 * n ** 2 / 0" has an incoming gradient of
# zeros, rather than "no gradient"). To avoid this issue, we define
# small values of beta to be exactly l1 loss.
loss = torch.abs(input - target)
else:
n = torch.abs(input - target)
cond = n < beta
loss = torch.where(cond, 0.5 * n**2 / beta, n - 0.5 * beta)
if reduction == "mean" or size_average:
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss
|
a794842340f58b366959f2ac7806afb6579c9abe
| 529,827 |
from functools import reduce
def sum_groups_by_key(unsummed, key_name):
"""Computes the sum of the values for a given key across multiple dicts.
Steps through a dictionary of lists. Each list is a list of dictionaries.
For each dictionary in a list this function computes the sum of the value
of a given key. A dictionary with the lists replaced by the result of each
sum calculation is returned.
"""
def sumfn(count, item):
return count + item.get(key_name, 0)
summed = {k: reduce(sumfn, g, 0) for k, g in unsummed.iteritems()}
return summed
|
0a14a64f5d85cae819796d56b7a5896694269f50
| 206,868 |
def note_to_frequency(note):
""" Translates a MIDI sequential note value to its corresponding frequency
in Hertz (Hz). Note values are of integer type in the range of 0 to
127 (inclusive). Frequency values are floating point. If the input
note value is less than 0 or greater than 127, the input is
invalid and the value of None is returned. Ref: MIDI Tuning Standard
formula.
"""
if 0 <= note <= 127:
return pow(2, (note - 69) / 12) * 440
else: return None
|
fb77367a86341b728fdfa9cf9e98ec8ea699390d
| 153,514 |
import json
import logging
def parse_json_to_dict(text_data):
"""parse json-containing text, return a python dictionary"""
try:
results = json.loads(text_data)
return results
except:
#print('error parsing json result: {}'.format(sys.exc_info()[0]))
logging.exception('unexpected error parsing json result')
return {}
|
5528ac68bec9f8d3cd8338bffbe7b3a65d719417
| 110,210 |
def initialize_log(file_path, mode='a'):
"""
Initialize a file in which logs will be written during the execution of
training, validation...
Args:
file_path: string with the path to the file
mode: default 'a', (if the file exists, it will continue writing logs
after the last line.). 'w' is also possible if you want to create
blank file.
Returns:
_io.TextIOWrapper of the file where logs will be written
Raises:
IOError if the path doesn't exists.
"""
try:
log_file = open(file_path, mode)
return log_file
except IOError:
print('It doesn\'t exit path: ', file_path)
raise
|
0c32270ab5c0ba8ca5633b486f27d109bcb72cf0
| 275,211 |
def coef_updation(c1min,c1max,c2min,c2max,iteration,n_iterations):
"""
Coeficients update function
Parameters:
c1min: float
Minimum value of the acceleration coefficient c1
c1max: float
Maximum value of the acceleration coefficient c1
c2min: float
Minimum value of the acceleration coefficient c2
c2max: float
Maximum value of the acceleration coefficient c1
iteration: int
The number of the iteration
n_iterations: int
The number of total iterations
Returns: float, float
The new acceleration coefficients c1 and c2
"""
c1=c1max-((c1max-c1min)/n_iterations)*iteration # decreasing
c2=c2min+((c2max-c2min)/n_iterations)*iteration # increasing
return c1, c2
|
952d767f645d3bc757f034a715da2081beb8086b
| 142,119 |
def check_value_above_filter(value, threshold):
"""
Returns a boolean to indicate value at or above threshold.
:param value: integer from a column "*read count".
:param threshold: threshold for the filtering of these read counts.
:return: boolean whether integer is equal or greater than threshold.
"""
return int(value) >= threshold
|
d67802f591d3925f233249b388719cca230e5cc3
| 619,552 |
def valid_time(time):
""" 針對時間格式進行偵錯
minute、second 介於 0 ~ 60;hour is positive
return: boolean
"""
if time.second < 0 or time.minute < 0 or time.hour < 0:
return False
if time.second >= 60 or time.minute >= 60:
return False
return True
|
633febba2a8b05643ff7f622d8372d2cf7ab4f13
| 176,667 |
def find_first_between(string, left, right):
""" Find first string in `string` that is between two strings `left` and `right`. """
start = string.find(left)
if start != -1:
end = string.find(right, start + len(left))
if end != -1:
return string[start + len(left):end]
|
199c2c1ce91633e1881c84d359124b767dcbabfa
| 340,329 |
import dill
def apply_packed_function_for_map(arg, ):
"""
Unpack dumped function as target function
and call it with arguments.
:return:
result of target function
"""
(dumped_function, item, args, kwargs) = arg
target_function = dill.loads(dumped_function)
res = target_function(item, *args, **kwargs)
return res
|
ad48f30ff2769903144e37039f03c2f68598475b
| 183,935 |
def register_new_peak(background_index_left, background_index_right, keys):
"""Registers a new peak by creating a peak from the background values given
Args:
background_index_left (int): index in compressed depths of background left to a peak
background_index_right (int): index in compressed depths of background right to a peak
keys (List): The keys for the compressed depths dictionary
Returns:
str: A peak in start - end format that is the result of registration
"""
if background_index_right - background_index_left >= 5:
peak = keys[background_index_left + 1][0], keys[background_index_right - 1][1]
else:
peak = keys[background_index_left][0], keys[background_index_right][1]
return peak
|
af943a71bb530be3c087912961d331875771b355
| 266,932 |
def screen_collection(src, cols, possibilities):
"""Return entries in a list of dicts where a set of fields match one of a set of possible values for those fields."""
rows = []
for row in src:
check = tuple([row[col] for col in cols])
if check in possibilities:
rows.append(row)
return rows
|
ff3b8deada521639bf4978ea15da29327283f602
| 412,053 |
def in_comment(regex):
"""Builds a regex matching "regex" in a comment"""
return '^[ \t]*//[ \t]*' + regex + '[ \t]*$'
|
c071bcca0cdc2bdc0982ae7bd27d427a180d07d2
| 365,830 |
import re
def is_ipv4(address):
""" Checks if given address is valid IPv4 address
:param str address: IP address to check
:return bool: True if address is valid IPv4 address, False otherwise
"""
regexp = "^(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"
if re.match(regexp, address):
return True
return False
|
65a1fde7d94daaec39dc76658d8fa0504acf6b3b
| 485,413 |
def dict_to_aws_tags(d):
"""
Transforms a python dict {'a': 'b', 'c': 'd'} to the
aws tags format [{'Key': 'a', 'Value': 'b'}, {'Key': 'c', 'Value': 'd'}]
Only needed for boto3 api calls
:param d: dict: the dict to transform
:return: list: the tags in aws format
>>> from pprint import pprint
>>> pprint(sorted(dict_to_aws_tags({'a': 'b', 'c': 'd'}), key=lambda d: d['Key']))
[{'Key': 'a', 'Value': 'b'}, {'Key': 'c', 'Value': 'd'}]
"""
return [{'Key': k, 'Value': v} for k, v in d.items()]
|
12680d9ccd1cfc04a6ac94910256d52018daa22d
| 656,177 |
def make_xz_ground_plane(vertices):
"""
Given a vertex mesh, translates the mesh such that the lowest coordinate of the mesh
lies on the x-z plane.
:param vertices: (N, 6890, 3)
:return:
"""
lowest_y = vertices[:, :, 1].min(axis=-1, keepdims=True)
vertices[:, :, 1] = vertices[:, :, 1] - lowest_y
return vertices
|
e05622420f431882a11cf60f4db28c094cbc0dca
| 658,561 |
def AT(y,x):
"""
Returns control codes to set the coordinates of the text cursor.
Use this in a ``PRINT`` or ``SET`` command. Example:
``PRINT("normal",AT(5,15),"row 5 column 15",AT(14,4),"row 14 column 4")``
Args:
- y - integer - the y coordinate to move to (0-23)
- x - integer - the x coordinate to move to (0-31)
"""
return "".join((chr(22),chr(int(y)),chr(int(x))))
|
2c42890dd20e1a436d978b7cc08dff9205a902b4
| 240,714 |
def sorting_rivers(station):
"""sorting function for stations_by_rivers"""
river = station[1]
return river
|
f790ebcaeeff93a0859d36e4f57a78ea52d3f2b5
| 619,558 |
def ver_list_float(param_name, param_value):
"""
This function verifies if all elements in a given list are float.
Parameters
----------
param_name : str
the parameter being evaluated
param_value : list
the list to evaluate for float values
Returns
-------
eval : boolean
True if all elements are float, False otherwise
"""
if not issubclass(type(param_value), list):
print (param_name + ' is not a list')
return False
for e in param_value:
if type(e) != float:
print (param_name + ' should be list of float values')
return False
return True
|
5d2fa5d993c8293ea3464b055cc488be48b602c7
| 352,123 |
import string
def is_valid_letter(letter):
"""
Returns True if the input letter contains only one character
in ascii_letters. Both upper and lower case is OK. Otherwise
returns False.
:param letter: string
:return: True or False
"""
if len(letter) != 1:
return False
if letter not in string.ascii_letters:
return False
return True
|
45e038df4a18f970793fa76363b0a10813c28f5b
| 378,135 |
def _parse_suspected_cls(predator_result):
"""Parse raw suspected_cls into dict."""
if not predator_result:
return None
# The raw result contains some additional information that we don't need here.
# Everything we're concerned with is a part of the "result" object included
# with the response.
predator_result = predator_result['result']
return {
'found': predator_result.get('found'),
'suspected_project': predator_result.get('suspected_project'),
'suspected_components': predator_result.get('suspected_components'),
'changelists': predator_result.get('suspected_cls'),
'feedback_url': predator_result.get('feedback_url'),
'error_message': predator_result.get('error_message'),
}
|
0f4d72542fc138512d4bbfe3c997f8e9b940a96c
| 629,806 |
def string_to_list(s):
"""
e.g. '124' --> [1, 2, 4]
"""
return list(map(int, list(s)))
|
c4e8ea51f8c7bedf8c5d69e22f02d53bc9cab7f2
| 68,606 |
def format_example_name(example):
"""Formats an example command to a function or file name
"""
return '_'.join(example).replace('-', '_').replace(
'.', '_').replace('/examples/', '')[1:]
|
223301709fb52aac98e2622c7df760fd3d7056e1
| 35,747 |
def line_with_jacoco_test_footer(line, report_type):
"""Check if the given string represents JaCoCo unit test footer."""
return report_type == "jacoco" and line == "Code coverage report END"
|
63bc1e47c61e4898c017242865fc2b8039b2bf7c
| 453,308 |
def lcm(num1, num2):
"""Returns the lowest common multiple of two given integers."""
temp_num = num1
while (temp_num % num2) != 0:
temp_num += num1
return temp_num
|
bdb1c66c2e155fe930ffb84d2657e1536d2387dc
| 101,134 |
def assure_list(s):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
"""
if isinstance(s, list):
return s
elif s is None:
return []
else:
return [s]
|
f15cf892c1e278f5a99db57b91594cfc209c5fe4
| 249,214 |
def slice_size(slice_objects):
"""
Returns the total number of elements in the combined slices
Also works if given a single slice
"""
num_elements = 0
try:
for sl in slice_objects:
num_elements += (sl.stop - (sl.start + 1)) // sl.step + 1
except TypeError:
num_elements += (slice_objects.stop - (slice_objects.start + 1)) \
// slice_objects.step + 1
return num_elements
|
ef3ae504537390cbe2a1458298813f51a4ddb89f
| 56,040 |
import yaml
def load_yaml(stream):
"""
Load YAML data.
"""
loader = yaml.Loader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose()
|
8f46e91d0e4c69866950854a31b11ec2a0e610e3
| 257,534 |
def IsGlobalUrlMapRef(url_map_ref):
"""Returns True if the URL Map reference is global."""
return url_map_ref.Collection() == 'compute.urlMaps'
|
82280837da4cd9a16baf27fe08b88f6464535494
| 403,364 |
def whether_in_file(email, rows):
"""
Check if email is in CSV file.
:param email: str
:param rows: list of csv file rows [row0, row1, ...]
:return: is_in_file: boolen
"""
is_in_file = False
for row in rows:
if email.lower() == row[0].lower():
is_in_file = True
break
return is_in_file
|
f14d593d5b16e6c3f5718821be5c6425b768f1b9
| 334,267 |
def best_med_worst(f1s, c_test_genomes, tla_to_tnum):
"""
Get the best, median, and worst reconstructions from the test set, as measured by F1 score
Arguments:
f1s (list) -- test F1 scores
c_test_genomes (list) -- tlas of genomes in the test set
tla_to_tnum (dict) -- maps tla to tnum for each genome
Returns:
best (list) -- for the best reconstruction: index, tla, F1 score, tnum
median (list) -- for the median reconstruction: index, tla, F1 score, tnum
worst (list) -- for the worst reconstruction: index, tla, F1 score, tnum
"""
idx_best = f1s.index(max(f1s))
tla_best = c_test_genomes[idx_best]
best = [idx_best, tla_best, f1s[idx_best], tla_to_tnum[tla_best]]
# Get index of median F1 score
f1s_sorted = sorted(f1s, reverse=True)
idx_median = f1s.index(f1s_sorted[int(len(f1s_sorted)/2)])
tla_median = c_test_genomes[idx_median]
median = [idx_median, tla_median, f1s[idx_median], tla_to_tnum[tla_median]]
idx_worst = f1s.index(min(f1s))
tla_worst = c_test_genomes[idx_worst]
worst = [idx_worst, tla_worst, f1s[idx_worst], tla_to_tnum[tla_worst]]
return best, median, worst
|
717ffade5c7e52ddf279279d931ac18ea84c074c
| 288,639 |
def basic_block_size(bb):
""" calculate size of basic block """
return bb.end_ea - bb.start_ea
|
57986803a9de2555fd419311200ba342a31bc07b
| 325,983 |
def validate_vlan_id(vlan_id):
"""
Validate VLAN ID provided is an acceptable value
:param vlan_id: int
:return: list
"""
errors = []
if vlan_id < 1 or vlan_id > 4094:
errors.append("Invalid ID: must be a valid vlan id between 1"
" and 4094")
return errors
|
638270360b718587371a23952d321f88b9de3f1c
| 244,431 |
def find_field_with_suffix(val, suffix):
"""
Return ``val[field]``, where ``field`` is the only one whose name ends
with ``suffix``. If there is no such field, or more than one, raise KeyError.
"""
names = []
for field in val.type.fields():
if field.name.endswith(suffix):
names.append(field.name)
#
if len(names) == 1:
return val[names[0]]
elif len(names) == 0:
raise KeyError("cannot find field *%s" % suffix)
else:
raise KeyError("too many matching fields: %s" % ', '.join(names))
|
8ec2fe6d48bd4e8ea3377f6c126292229518b17a
| 127,523 |
def _downsample_simple(arr, n):
"""
Skip n elements of a 1D array.
:param arr: 1D array.
:param n: integer which defines the skips.
:return: Downsampled 1D array.
"""
return arr[::n]
|
bd531cf5a5771966dfb3edba5136dd69ea2e2849
| 647,779 |
from typing import Callable
import inspect
import functools
def wrap_in_coroutine(func: Callable) -> Callable:
"""Decorator to wrap a function into a coroutine function.
If `func` is already a coroutine function it is returned as-is.
Args:
func: A callable object (function or coroutine function)
Returns:
A coroutine function which executes `func`.
"""
if inspect.iscoroutinefunction(func):
return func
@functools.wraps(func)
async def _wrapper(*args, **kwargs):
return func(*args, **kwargs)
return _wrapper
|
3134241771749d63ce5213180a34d7c26f8f0c76
| 40,677 |
import re
def regex_prettifier(scraped_data, regex):
"""Prettify the scraped data using a regular expression
Positional Arguments:
scraped_data (list): data scraped from a website
regex (str): a regular expression
Return:
list: the regex modified data
"""
data_list = []
for data in scraped_data:
data_list.append(re.sub(regex, '', data))
return data_list
|
5eb42d0df2a0f93dbc14ec5fb5dd68bc6fe127ca
| 34,814 |
def bending_stresses(mx, my, mxy, z, h):
"""
Computes the bending stresses at given shell thickness and fiber height.
"""
def stress(m):
return m * z / ((h ** 3) / 12)
return stress(-mx), stress(-my), stress(mxy)
|
0a089542ccfee722dfa5746f357ef529b0124fa7
| 397,813 |
def summarize_missing_df(dataframe):
"""
Populate the fraction missing counter from a given DataFrame
"""
results = {
'frac_missing': float((dataframe != 0.0).sum().sum()) \
/ float((dataframe.shape[0]*dataframe.shape[1]))
}
return results
|
d69fe13bfcb0f176d0ae1024d0f043816f732a8d
| 549,180 |
import codecs
def load_utf8(filename):
"""Load UTF8 file."""
with codecs.open(filename, 'r', encoding='utf-8') as f:
return f.read()
|
9ce0f51879b2bc4b83690fb06a0353c3c73235cb
| 637,694 |
import struct
def get_bool(s: bytes) -> bool:
"""
Get bool value from bytes
:param s: bytes array contains bool value
:return: bool value from bytes array
"""
if str(s) == "b\'F\'":
return False
elif str(s) == "b\'T\'":
return True
else:
return struct.unpack('?', s)[0]
|
8da9c3c76a578ca347f8835404ef415005a39ed4
| 609,406 |
def label_from_id(id_string):
"""
Returns a label string constructed from the suppliued Id string
Underscore characters in the Id are replaced by spaces.
The first character may be capirtalized.
>>> label_from_id("entity_id") == "Entity id"
True
"""
temp = id_string.replace('_', ' ').strip()
label = temp[0].upper() + temp[1:]
return label
|
9b52abdce169d26412a69585281ebadcff7cb0c2
| 40,036 |
import json
import base64
def decode_environ_backup(encoded_environ_backup: str) -> dict:
"""
Decode environ backup from safe string.
"""
return json.loads(base64.b64decode(encoded_environ_backup).decode("utf-8"))
|
1aa9f90bf9aa7f64c761a174e95394779534ad59
| 605,242 |
def quote_item(item, pre='', post=''):
"""Format an string item with quotes.
"""
post = post or pre
return f'{pre}{item}{post}'
|
5b3877a5ba1cd991406a6900d2dfd6ad7d3d42d1
| 177,779 |
def soup_extract_enzymelinks(tabletag):
"""Extract all URLs for enzyme families from first table."""
return {link.string: link['href']
for link in tabletag.find_all("a", href=True)}
|
7baabd98042ab59feb5d8527c18fe9fa4b6a50af
| 707,306 |
def get_counts(filename):
"""
reads the .counts_edit file and extracts the counts
:param filename: counts file (original or simulated)
:return: list of counts
"""
with open(filename, "r") as counts_file:
counts = []
for line in counts_file:
line = line.strip()
if line.startswith('>'): # taxa name
continue
else:
if line=="x" or line=="X": # discard counts with x
continue
counts.append(int(line))
return counts
|
2bb9914fbca82550e121f0dcb5c55f6817da6c91
| 687,739 |
def default(v, d):
"""Returns d when v is empty (string, list, etc) or false"""
if not v:
v = d
return v
|
17d0ee06fa28c088512aa6f5ecb7181949bbc315
| 541,872 |
def generate_parameter_map(parameters):
"""
Generates mapping of parameters for fasm features for architecture
definition.
"""
xml = []
xml.append("<meta name=\"fasm_params\">")
for feature, parameter in sorted(parameters):
xml.append(" {} = {}".format(feature, parameter))
xml.append("</meta>")
return "\n".join(xml)
|
de54f03de88b858cf4face404e47a1c5ced6d954
| 329,307 |
from typing import Iterable
from typing import List
import re
def split(delimiters: Iterable[str], s: str, maxsplit: int = 0) -> List[str]:
"""Split the string over an iterable of delimiters.
Based on https://stackoverflow.com/a/13184791
"""
pattern = "|".join(map(re.escape, delimiters))
return re.split(pattern, s, maxsplit)
|
42ab4216b6c24e28fb98970b85e7492612ac1a21
| 29,297 |
from typing import List
def getVariantRedirects(rTitle: str) -> List[str]:
"""Get list of variant abbreviations similar to rTitle.
Similar means obtained by replacing an ISO-4 abbreviation with a
popular non-ISO-4 alternative hardcoded here, or by removing dots.
In particular both rTitle and rTitle.replace('.', '') will be returned.
"""
variantTitles = [rTitle]
replacements = [('Adm.', 'Admin.'),
('Animal', 'Anim.'),
('Am.', 'Amer.'),
('Atmospheric', 'Atmos.'),
('Br.', 'Brit.'),
('Calif.', 'Cal.'),
('Commun.', 'Comm.'),
('Contributions', 'Contrib.'),
('Entomol.', 'Ent.'),
('Investig.', 'Invest.'),
('Lond.', 'London'),
('Philos.', 'Phil.'),
('Political', 'Polit.'),
('Radiat.', 'Rad.'),
('Royal', 'Roy.'),
('Royal', 'R.'),
('Special', 'Spec.')]
for replIso, replVariant in replacements:
newVariantTitles = variantTitles
for vTitle in variantTitles:
if replIso in vTitle:
newVariantTitles.append(vTitle.replace(replIso, replVariant))
variantTitles = newVariantTitles
dotless = [v.replace('.', '') for v in variantTitles]
variantTitles.extend(dotless)
return variantTitles
|
a2801b20dff3d8ad5175c5fd2fa577d3307e8268
| 304,750 |
def hop_to_str(offset):
"""
Formats a two-character string that uniquely identifies the hop offset.
>>> hop_to_str([-3, 0])
'L3'
>>> hop_to_str([1, 0])
'R1'
>>> hop_to_str([0, -2])
'T2'
>>> hop_to_str([0, +7])
'B7'
"""
# Zero offsets are not allowed
assert offset[0] != 0 or offset[1] != 0, offset
# Diagonal offsets are not allowed
if offset[0] != 0:
assert offset[1] == 0, offset
if offset[1] != 0:
assert offset[0] == 0, offset
# Horizontal
if offset[1] == 0:
if offset[0] > 0:
return "R{}".format(+offset[0])
if offset[0] < 0:
return "L{}".format(-offset[0])
# Vertical
if offset[0] == 0:
if offset[1] > 0:
return "B{}".format(+offset[1])
if offset[1] < 0:
return "T{}".format(-offset[1])
# Should not happen
assert False, offset
|
e5b59a37ef76b8a6fcbe1f5afc5ce57b861ca4b9
| 481,829 |
def fib(x):
"""
Returns the xth term of the Fibonacci sequence
"""
a, b = 1, 1
for i in range(1, x):
a, b = b, a + b
x += 1
return(a)
|
76cb08e89f11152c1aa2240b14c216042850b0ab
| 44,432 |
def binary_search(arr, key):
"""
Searches for the key in a list and returns the position of the key in the array.
If the key can not be found in the list, raise a ValueError
Will keep shifting the middle point for as long as the middle value is not equal to the search key
If, the search key is less than the middle value, we shift the high point by the middle value - 1
the same applies if the key is greater than the low point, only difference is, we increase the low point by 1
:param arr: The array, which will be a list of numbers
:param key: search key, what we will search for
:return: Position of the key in the array
:raises: ValueError if the key is not in the array
"""
low = 0
high = len(arr) - 1
while low <= high:
middle = (low + high) // 2
if arr[middle] > key:
high = middle - 1
elif arr[middle] < key:
low = middle + 1
else:
return middle
raise ValueError("Value not found")
|
8b7fbd9ded4741c32c766c57d3698d87ea739e18
| 55,725 |
import math
def offset(point, angle, dist):
"""Find the point `dist` from `point`, in the direction `angle`."""
x, y = point
angle = math.radians(angle)
x += dist * math.cos(angle)
y += dist * math.sin(angle)
return x, y
|
5a593a13cd979bcfd08299c9245e3526f5e2586c
| 195,185 |
def xml_find_single_tag (root, tag_name, required=True):
"""
Fetch an XML tag from XML.
:param root: XML to utilize
:param tag_name: Name of tag to fetch
:param key: A boolean indicating whether tag is required for a valid manifest XML
:return Tag if found
"""
tag = root.findall (tag_name)
if not tag:
if required:
print ("Missing {0} tag in manifest".format (tag_name))
return None
elif len (tag) > 1:
print ("Too many {0} tags in manifest".format (tag_name))
return None
return tag[0]
|
89234b451a34239373227201688b2dd4317d4d35
| 307,073 |
import torch
def H(x, eps=1e-6):
""" Compute the element-wise entropy of x
Arguments:
x {torch.Tensor} -- array of probabilities in (0,1)
Keyword Arguments:
eps {float} -- prevent failure on x == 0
Returns:
torch.Tensor -- H(x)
"""
return -(x+eps)*torch.log(x+eps)
|
25c0e1684130fbffcd64c42b142063a0f501a303
| 475,226 |
def api_version() -> str:
"""Which Adwords API version should be called"""
return 'v201809'
|
e304cff2fe010095f77719731bf9bedd310f0feb
| 220,814 |
import re
def workdir_from_dockerfile(dockerfile):
"""Parse WORKDIR from the Dockerfile."""
WORKDIR_REGEX = re.compile(r'\s*WORKDIR\s*([^\s]+)')
with open(dockerfile) as f:
lines = f.readlines()
for line in lines:
match = re.match(WORKDIR_REGEX, line)
if match:
# We need to escape '$' since they're used for subsitutions in Container
# Builer builds.
return match.group(1).replace('$', '$$')
return None
|
33a927626a023ba988534afe5b3f8885e18db471
| 47,126 |
def get_change_extent(str1, str2):
"""Determines the extent of differences between two strings.
Returns a pair containing the offset at which the changes start,
and the negative offset at which the changes end.
If the two strings have neither a common prefix nor a common
suffix, ``(0, 0)`` is returned.
"""
start = 0
limit = min(len(str1), len(str2))
while start < limit and str1[start] == str2[start]:
start += 1
end = -1
limit = limit - start
while -end <= limit and str1[end] == str2[end]:
end -= 1
return (start, end + 1)
|
275ae78cddc8bfe2948dfdc48e66b85b79cd27b8
| 496,993 |
import operator
import functools
def all_answering_yes(survey: list[str]) -> int:
"""
Counts the number of A-Z questions where every person
in the survey group answered yes.
"""
set_intersect_op = operator.and_
central_response = functools.reduce(set_intersect_op, (set(response) for response in survey))
return len(central_response)
|
4b870dd1ca730f354727e8a9ce0d2d12839139f1
| 509,368 |
def get_hit_fields(es_resp_obj_dict):
"""Returns a list of they fields in an elasticsearch search response
hit object."""
if es_resp_obj_dict['hits']['hits']:
fields = [key for key in es_resp_obj_dict['hits']['hits'][0]['_source'].keys()]
else:
return []
return fields
|
be090011f8ad96ff0bb4c745c9fd50fb762d87ee
| 253,992 |
def ask(question, answers, default_answer, type_validator=str):
"""Ask question to user via console.
:param question: [str] The question to ask.
:param answers: [list of str, str] The possible answers to choose from. If
'' (empty string), then no answer will be suggested.
:param default_answer: [str or numeric] The default answer for when the user
replies by pressing Enter. If '' (empty string), nothing will happen
when user hits Enter.
:param type_validator: Validate answer. Useful for when the user shall input
an int, for instance, and replies with a float. Examples are str, float,
int (no quotes, this is the class itself).
:return user_answer: What the user has typed, validated according to
type_validator.
Note that this function does not raise Exceptions when user input cannot be
validated or answer is not included among default ones. It keeps trying until
user inputs a valid answer.
"""
# Get number of answers
if not isinstance(answers, list):
answers = [answers]
if answers[0] == '':
n_answers = 0
else:
n_answers = len(answers)
# Append answer hints to question
if n_answers > 0 or str(default_answer) != '':
question_to_show = question + ' ('
if str(default_answer) != '':
question_to_show += '[%s]' % str(default_answer)
if n_answers > 0:
question_to_show += '/'
if n_answers > 0:
question_to_show += '%s' % '/'.join([i for i in answers if str(i) != str(default_answer)])
question_to_show += ')'
else:
question_to_show = question
# Ask user for an answer
while True:
user_answer = input(question_to_show)
# Set default option when user presses Enter
if user_answer == '':
if str(default_answer) == '':
print('Please try again')
else:
user_answer = default_answer
# Validate user answer
try:
user_answer = type_validator(user_answer)
except ValueError:
print('Answer type not allowed. Reply something that can be converted to \'%s\'' % repr(type_validator))
continue
# Stop if got an answer that is allowed, or if there are no good answers
if user_answer in answers or n_answers == 0:
break
else:
print('Please try again')
return user_answer
|
1e3c687eab1179591136b2390817ee45557a7e21
| 242,981 |
def checkH1(strings_h1, text_h1):
"""Checks if the string in h1 tags is present.
:param strings_h1: A list with all the strings contained in the H1 tags.
:param text_h1: the string to look for inside the tags.
:return: True if the text is present in the tags, False otherwise
:rtype: Boolean
"""
for i in strings_h1:
if i.string == text_h1:
return True
return False
# def checkH3(soup_h3, texth3):
"""Checks if the string in h3 tags is present.
:param strings_h3: A list with all the strings contained in the H3 tags.
:param text_h3: the string to look for inside the tags.
:return: True if the text is present in the tags, False otherwise
:rtype: Boolean
"""
|
cd13dbed935633ae11ed5e16fa2a5c5c469cdb2f
| 265,996 |
def _find_label_rows(sheet):
"""Search excel file column A for cells containing 'Label'.
Return a list of zero-indexed rows.
"""
label_rows = []
for i in range(sheet.nrows):
if "Label" in sheet.cell_value(i, 0):
label_rows.append(i)
return label_rows
|
5515874500c5ef514df02019e745d609b0474b2f
| 38,151 |
def padded_print(field_name, value):
"""Pad a string with leading spaces to be the same length as field_name."""
offset = len(field_name) - len(str(value))
if offset > 0:
return (' ' * offset) + str(value)
return str(value)
|
05cbe76850b5f41c55a4180477783b5f32f635f3
| 136,861 |
import base64
def Base64WSEncode(s):
"""
Return Base64 web safe encoding of s. Suppress padding characters (=).
Uses URL-safe alphabet: - replaces +, _ replaces /. Will convert s of type
unicode to string type first.
@param s: bytes to encode as Base64
@type s: bytes
@return: Base64 representation of s.
@rtype: bytes
"""
return base64.urlsafe_b64encode(s).replace(b"=", b"")
|
5b07d94f4e5cdc35eb4094fe85f9567ef9f93455
| 444,066 |
def _getShapeSize(df, shape_name_str):
"""
Get the arbitrary shape size with its shape name.
Args:
df: tuple-(df_CompartmentData, df_NodeData, df_ReactionData, df_ArbitraryTextData, df_ArbitraryShapeData).
shape_name_str: str-the shape name.
Returns:
shape_size_list: list of shape_size.
shape_size: list-1*2 matrix-size of the shape [width, height].
"""
idx_list = df[4].index[df[4]["shape_name"] == shape_name_str].tolist()
shape_size_list =[]
for i in range(len(idx_list)):
shape_size_list.append(df[4].iloc[idx_list[i]]["size"])
return shape_size_list
|
76bb20e423b26472c841bfd345fc8f42ce0888bd
| 624,034 |
import time
def date_str(t):
"""Convert seconds since the Epoch to formatted local date string."""
t = time.localtime(t)
return time.strftime('%Y-%m-%d',t)
|
c54ee8b480f843a9508025beed591905929a1f3a
| 358,693 |
def echo(message='hello'):
"""
Very simple endpoint that just echos your message back to you
:param message: str of the message to echo
:return: str of the message echoed
"""
return 'ECHO: %s' % message
|
7c023b7a33043459460c179e0f7f2abae36a5a99
| 355,675 |
def process_content(df, config, dataframes):
"""
Applies a function to each value of the column
"process_content": [
{
"function": lambda x: str(x),
"columns": ["column1", "column2"]
}
]
"""
for function_dict in config.process_content:
for column in function_dict['columns']:
df[column] = df[column].map(function_dict['function'])
return df
|
41040b0eafe0f97526b645fdecf363d653c1cf85
| 87,077 |
import codecs
def read_lines(infile, ignore_comments=True, strip=True, skip_empty=True):
"""Read lines from infile and return as a list, optionally stripping comments,
whitespace, and/or skipping blank lines."""
lines = []
with codecs.open(infile, "r", "utf-8") as f:
for line in f:
if ignore_comments:
ix = line.find("#")
if ix >= 0:
line = line[:ix]
if strip:
line = line.strip()
if not line and skip_empty:
continue
lines.append(line)
return lines
|
70aac2290bac6f99818fd7e199f1cd442ec56617
| 300,175 |
import logging
def _WarnAboutDuplicates(offsets):
"""Warns about duplicate offsets.
Args:
offsets: list of offsets to check for duplicates
Returns:
True if there are no duplicates, False otherwise.
"""
seen_offsets = set()
ok = True
for offset in offsets:
if offset not in seen_offsets:
seen_offsets.add(offset)
else:
ok = False
logging.warning('Duplicate offset: ' + hex(offset))
return ok
|
50f9e6891e79afc61f53ae5adb8d6832e8ff2856
| 70,427 |
def get_string_between(start, stop, s):
"""Search string for a substring between two delimeters. False if not found."""
i1 = s.find(start)
if i1 == -1:
return False
s = s[i1 + len(start):]
i2 = s.find(stop)
if i2 == -1:
return False
s = s[:i2]
return s
|
2a9927d2a7a5a1efc4774051d1f86247b6195f07
| 279,720 |
def field(name, transform=None):
""" Convenience function to explicitly return a "field" specification for
a Bokeh :class:`~bokeh.core.properties.DataSpec` property.
Args:
name (str) : name of a data source field to reference for a
``DataSpec`` property.
transform (Transform, optional) : a transform to apply (default: None)
Returns:
dict : ``{ "field": name }``
.. note::
This function is included for completeness. String values for
property specifications are by default interpreted as field names.
"""
if transform:
return dict(field=name, transform=transform)
return dict(field=name)
|
bc391f991fcebad956464c1f0cc741505d3d4028
| 637,027 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.