content
stringlengths 42
6.51k
|
---|
def B(i,j,k):
"""
Tensor B used in constructing ROMs.
Parameters
----------
i : int
j : int
k : int
Indices in the tensor.
Returns
-------
int
Tensor output.
"""
if i == j + k:
return -1
elif j == i + k or k == i + j:
return 1
else:
msg = "Possible Error: Indices ({},{},{})".format(i,j,k)
print(msg)
return 0
|
def filter_list_to_filter_policies(event_key: str, filter_list: list) -> dict:
"""
Helper function to convert defined filter policies to
AWS API acceptable format.
"""
processed_filters = []
for filters in filter_list:
event = filters.split(".")
assert len(event) == 2
if event[1] == "*":
processed_filters.append({"prefix": f"{event[0]}."})
else:
processed_filters.append(filters)
if len(processed_filters) > 0:
filter_policies = {event_key: processed_filters}
else:
filter_policies = {}
return filter_policies
|
def fail(error_msg):
"""
Create failure response
:param error_msg:
:return:
"""
return {'status': 'failure', 'error_msg': error_msg}
|
def ssh_cmd_task_exec(detail, command_on_docker, wait_press_key=None) -> str:
"""SSH command to execute a command inside the first docker containter of a task."""
wait_cmd = ""
if wait_press_key:
wait_cmd = "; echo 'Press a key'; read q"
return (
f"TERM=xterm ssh -t {detail['ec2InstanceId']} docker exec -ti {detail['containers'][0]['runtimeId']} {command_on_docker}"
+ wait_cmd
)
|
def oneHotEncode_4_evtypes_tau_decay_length(x, r_vals):
"""
This function one hot encodes the input for the event types
cascade, tracks, doubel-bang, starting tracks
"""
cascade = [1., 0., 0., 0.]
track = [0., 1., 0., 0.]
doublebang = [0., 0., 1., 0.]
s_track = [0., 0., 0., 1.]
cut = 5. # aus config auslesen, no hardcode
# map x to possible classes
if int(x) in [5, 6]:
if r_vals[8] >= cut:
return doublebang
else:
return cascade
else:
mapping = {0: cascade, 1: cascade, 2: track, 3: s_track, 4: track,
5: doublebang, 6: doublebang, 7: cascade, 8: track, 9: cascade}
return mapping[int(x)]
|
def tsp_not_return(num_vertex, distances, from_start=None):
"""
from_start: distance from a virtual start vertex (PAST3M)
"""
assert num_vertex < 20
if from_start == None:
from_start = [0] * num_vertex
INF = 9223372036854775807
SUBSETS = 2 ** num_vertex
memo = [[INF] * num_vertex for _i in range(SUBSETS)]
for subset in range(1, SUBSETS):
for v in range(num_vertex): # new vertex
mask = 1 << v
if subset == mask:
# previous vertex is start
memo[subset][v] = min(
memo[subset][v],
from_start[v])
elif subset & mask: # new subset includes v
for u in range(num_vertex):
memo[subset][v] = min(
memo[subset][v],
memo[subset ^ mask][u] + distances[u][v])
return min(memo[-1])
|
def split_digits(n1, n2):
"""Takes two numbers and returns them as lists of integer digits"""
num1 = [int(d) for d in str(n1)]
num2 = [int(d) for d in str(n2)]
return num1, num2
|
def parse_connection_string(connect_str):
"""Parse a connection string such as those provided by the Azure portal.
Connection string should be formatted like:
Key=Value;Key=Value;Key=Value
The connection string will be parsed into a dictionary.
:param connect_str: The connection string.
:type connect_str: str
:returns: dict[str, str]
"""
connect_info = {}
fields = connect_str.split(';')
for field in fields:
key, value = field.split('=', 1)
connect_info[key] = value
return connect_info
|
def get_lemma_mapping(word_pairs):
"""Get a dictionary that stores all orthographic
forms for a lemma.
Args:
word_pairs (Array) - a list of all word_pairs
Returns:
Dictionary - All lemma - word combinations
"""
# Initialize dictionaries that hold the
# mapping of a lemma to a word or of a word to a lemma
lemma_word_dic = {}
word_lemma_dic = {}
# Loop over every word pair
for pair in word_pairs:
# Temporary store source and target
# of current word pair
source = pair['source']
target = pair['target']
# Attach each mapping of lemma and corresponding
# word. Later we calculate the set
if lemma_word_dic.get(source['lemma']):
lemma_word_dic[source['lemma']].append(source['word'])
else:
lemma_word_dic[source['lemma']] = [source['word']]
# Add target
if lemma_word_dic.get(target['lemma']):
lemma_word_dic[target['lemma']].append(target['word'])
else:
lemma_word_dic[target['lemma']] = [target['word']]
# Attach each mapping of word and corresponding
# lemma. Later we calculate the set
if word_lemma_dic.get(source['word']):
word_lemma_dic[source['word']].append(source['lemma'])
else:
word_lemma_dic[source['word']] = [source['lemma']]
if word_lemma_dic.get(target['word']):
word_lemma_dic[target['word']].append(target['lemma'])
else:
word_lemma_dic[target['word']] = [target['lemma']]
# Build lemma dic without redundant words
lemma_word_dic_non_redundant = {}
word_lemma_dic_non_redundant = {}
# Build sets of both dictionaries
for field, words in lemma_word_dic.items():
lemma_word_dic_non_redundant[field] = list(set(words))
for field, words in word_lemma_dic.items():
word_lemma_dic_non_redundant[field] = list(set(words))
return {'lemma_word': lemma_word_dic_non_redundant,
'word_lemma': word_lemma_dic_non_redundant}
|
def return_position_str(position_tuple):
"""Conversion of position-tuple to str if entries are "None" they are set to "-1"
"""
if position_tuple[1] is None:
return "{};{};{}".format(position_tuple[0], -1, -1)
else:
return "{};{};{:.3f}".format(position_tuple[0], position_tuple[1], position_tuple[2])
|
def reformat_asterisks(text):
"""
Fixes Pandas docstring warning about using * and ** without ending \* and \*\*.
The fix distinguishes single * and ** by adding \\ to them. No changes for *italic* and **bold** usages.
:param text: Original text with warnings
:return: Modified text that fixes warnings
"""
lines = text.split('\n')
new_text = ''
for line in lines:
idx = 0 # Starting parsing position within the ``line``
while idx < len(line): # Parsing until end of string reached
idx1 = line.find('*', idx)
if idx1 >= idx:
# There is at least one asterisk in the line
idx2 = line.find('*', idx1+1)
if idx2 == -1:
# Only one single asterisk in the line - Reformat to `\*`
line = line.replace('*', '\\*')
idx = len(line) # Parsed the line. Go to another line
elif idx2 == idx1+1:
# First double asterisk met in the line
idx2 = line.find('**', idx1+2)
if idx2 == -1:
# Only one double asterisk in the line Reformat to `\*\*`. But there could be more asterisks
line = line.replace('**', '\\*\\*')
idx = idx1+4
else:
# At least two double asterisks in the line
idx = idx2+2 # Deal with remaining asterisks on the next ``while`` loop iteration
else:
# There is another asterisk apart from the first asterisk
if idx2+1 < len(line):
# There is at least one more symbol in the line after second asterisk
if line[idx2+1] == '*':
# Situation when double asterisk is met after the first single asterisk - Reformat to `\*`
line = line.replace('*', '\\*', 1) # Replace the first single asterisk
idx = idx2 # Handle double asterisk on the next ``while`` iteration
else:
# Two asterisks met in the line to italize characters between them
idx = idx2+1
else:
# Second asterisk was the last symbol in the line
idx = len(line)
else:
# No asterisks in the line
idx = len(line)
new_text += line + '\n'
return new_text
|
def getChunkIndex(chunk_id):
""" given a chunk_id (e.g.: c-12345678-1234-1234-1234-1234567890ab_6_4)
return the coordinates of the chunk. In this case (6,4)
"""
# go to the first underscore
n = chunk_id.find('_') + 1
if n == 0:
raise ValueError("Invalid chunk_id: {}".format(chunk_id))
suffix = chunk_id[n:]
index = []
parts = suffix.split('_')
for part in parts:
index.append(int(part))
return index
|
def MakeCallString(params):
"""Given a list of (name, type, vectorSize) parameters, make a C-style
formal parameter string.
Ex return: 'index, x, y, z'.
"""
result = ''
i = 1
n = len(params)
for (name, type, vecSize) in params:
result += name
if i < n:
result = result + ', '
i += 1
#endfor
return result
|
def revcomp(seq):
""" Reverse complement sequence
Args:
seq: string from alphabet {A,T,C,G,N}
Returns:
reverse complement of seq
"""
complement = {
'A':'T',
'T':'A',
'G':'C',
'C':'G',
'N':'N',
'R':'N',
'Y':'N',
'K':'N',
'M':'N',
'S':'N',
'W':'N',
'B':'N',
'D':'N',
'H':'N',
'V':'N'
}
return ''.join([complement[_] for _ in seq[::-1]])
|
def exclusion_filter(exclude: str):
"""Converts a filter string "*.abc;*.def" to a function that can be passed to pack().
If 'exclude' is None or an empty string, returns None (which means "no filtering").
"""
if not exclude:
return None
import re
import fnmatch
# convert string into regex callback that operates on bytes
# "*.txt;*.png;*.rst" --> rb".*\.txt$|.*\.png$|.*\.rst$"
pattern = b'|'.join(fnmatch.translate(f).encode('utf-8')
for f in exclude.split(';')
if f)
compiled_pattern = re.compile(pattern, re.IGNORECASE)
def filename_filter(fname: bytes):
return not compiled_pattern.match(fname)
return filename_filter
|
def get_workflow_wall_time(workflow_states_list):
"""
Utility method for returning the workflow wall time given all the workflow states
@worklow_states_list list of all workflow states.
"""
workflow_wall_time = None
workflow_start_event_count = 0
workflow_end_event_count = 0
is_end = False
workflow_start_cum = 0
workflow_end_cum = 0
for workflow_state in workflow_states_list:
if workflow_state.state == 'WORKFLOW_STARTED':
workflow_start_event_count += 1
workflow_start_cum += workflow_state.timestamp
else:
workflow_end_event_count += 1
workflow_end_cum += workflow_state.timestamp
if workflow_start_event_count >0 and workflow_end_event_count > 0:
if workflow_start_event_count == workflow_end_event_count:
workflow_wall_time = workflow_end_cum - workflow_start_cum
return workflow_wall_time
|
def lowercase(text: str) -> str:
"""Transforms text to lower case"""
return text.lower()
|
def load_str(filename: str, method="read") -> str:
"""Loads a file to string"""
with open(filename, "r") as f:
return getattr(f, method)()
|
def protoc_command(args):
"""Composes the initial protoc command-line including its include path."""
cmd = [args.protoc]
if args.include is not None:
cmd.extend(['-I%s' % path for path in args.include])
return cmd
|
def enclose_string(text):
"""enclose text with either double-quote or triple double-quote
Parameters
----------
text (str): a text
Returns
-------
str: a new string with enclosed double-quote or triple double-quote
"""
text = str(text)
fmt = '"""{}"""' if len(text.splitlines()) > 1 else '"{}"'
enclosed_txt = fmt.format(text.replace('"', r'\"'))
return enclosed_txt
|
def make_optic_canula_cylinder(
atlas,
root,
target_region=None,
pos=None,
offsets=(0, 0, -500),
hemisphere="right",
color="powderblue",
radius=350,
alpha=0.5,
**kwargs,
):
"""
Creates a cylindrical vedo actor to scene to render optic cannulas. By default
this is a semi-transparent blue cylinder centered on the center of mass of
a specified target region and oriented vertically.
:param target_region: str, acronym of target region to extract coordinates
of implanted fiber. By defualt the fiber will be centered on the center
of mass of the target region but the offset arguments can be used to
fine tune the position. Alternative pass a 'pos' argument with AP-DV-ML coords.
:param pos: list or tuple or np.array with X,Y,Z coordinates. Must have length = 3.
:param x_offset, y_offset, z_offset: int, used to fine tune the coordinates of
the implanted cannula.
:param **kwargs: used to specify which hemisphere the cannula is and parameters
of the rendered cylinder: color, alpha, rotation axis...
"""
# Get coordinates of brain-side face of optic cannula
if target_region is not None:
pos = atlas.get_region_CenterOfMass(
target_region, unilateral=True, hemisphere=hemisphere
)
elif pos is None:
print(
"No 'pos' or 'target_region' arguments were \
passed to 'add_optic_cannula', nothing to render"
)
return
# Offset position
for i, offset in enumerate(offsets):
pos[i] += offset
# Get coordinates of upper face
bounds = root.bounds()
top = pos.copy()
top[1] = bounds[2] - 500
# Create actor
return dict(pos=[top, pos], c=color, r=radius, alpha=alpha, **kwargs)
|
def parse_ellipsis(index):
"""
Helper to split a slice into parts left and right of Ellipses.
:param index: A tuple, or other object (None, int, slice, Funsor).
:returns: a pair of tuples ``left, right``.
:rtype: tuple
"""
if not isinstance(index, tuple):
index = (index,)
left = []
i = 0
for part in index:
i += 1
if part is Ellipsis:
break
left.append(part)
right = []
for part in reversed(index[i:]):
if part is Ellipsis:
break
right.append(part)
right.reverse()
return tuple(left), tuple(right)
|
def consecutive_seconds(rel_seconds, window_size_sec, stride_sec=1):
"""
Return a list of all the possible [window_start, window_end] pairs
containing consecutive seconds of length window_size_sec inside.
Args:
rel_seconds: a list of qualified seconds
window_size_sec: int
stride_sec: int
Returns:
win_start_end: a list of all the possible [window_start, window_end] pairs.
Test:
>>> rel_seconds = [2,3,4,5,6,7,9,10,11,12,16,17,18]; window_size_sec = 3; stride_sec = 1
>>> print(consecutive_seconds(rel_seconds, window_size_sec))
>>> [[2, 4], [3, 5], [4, 6], [5, 7], [9, 11], [10, 12], [16, 18]]
"""
win_start_end = []
for i in range(0, len(rel_seconds) - window_size_sec + 1, stride_sec):
if rel_seconds[i + window_size_sec - 1] - rel_seconds[i] == window_size_sec - 1:
win_start_end.append([rel_seconds[i], rel_seconds[i + window_size_sec - 1]])
return win_start_end
|
def solution(number):
"""This function returns all multiples of 3 and 5 below the input number"""
answer = 0
for i in range(number):
if i % 3 == 0 or i % 5 == 0:
answer += i
return answer
|
def _mel2hz(mel):
"""Convert a value in Mels to Hertz
:param mel: a value in Mels. This can also be a numpy array, conversion proceeds element-wise.
:returns: a value in Hertz. If an array was passed in, an identical sized array is returned.
"""
return 700 * (10 ** (mel / 2595.0) - 1)
|
def _is_sub_partition(sets, other_sets):
"""
Checks if all the sets in one list are subsets of one set in another list.
Used by get_ranked_trajectory_partitions
Parameters
----------
sets : list of python sets
partition to check
other_sets : list of python sets
partition to check against
Returns
-------
bool
whether all sets are included in other_sets
"""
for set_ in sets:
if not any(map(set_.issubset, other_sets)):
#here if at least one set is not fully included in another set in the other partition
return False
return True
|
def enc(text):
"""Yes a bodge function. Get over it."""
try:
return text.decode("utf-8")
except UnicodeEncodeError:
try:
return text.encode("utf-8")
except UnicodeEncodeError:
return text
|
def getDuration(results) :
"""Returns the first timestamp of the simulation after the prepull and the
duration of the simulation after the prepull
"""
timestamps = [ r['timestamp'] for r in results if 'damage' in r and 'timestamp' in r ]
return (min(timestamps), max(timestamps) - min(timestamps))
|
def fix_dec(x):
"""
TLE formate assumes a leading decimal point, just putting it back in
"""
if x[0] == "-":
ret = float("-0." + x[1:])
else:
ret = float("0." + x)
return ret
|
def freeRotor(pivots, top, symmetry):
"""Read a free rotor directive, and return the attributes in a list"""
return [pivots, top, symmetry]
|
def py_type_name(type_name):
"""Get the Python type name for a given model type.
>>> py_type_name('list')
'list'
>>> py_type_name('structure')
'dict'
:rtype: string
"""
return {
'blob': 'bytes',
'character': 'string',
'double': 'float',
'long': 'integer',
'map': 'dict',
'structure': 'dict',
'timestamp': 'datetime',
}.get(type_name, type_name)
|
def prefix_max(array: list, i: int) -> int:
"""Return index of maximum item in array[:i+1]"""
if i >= 1:
j = prefix_max(array, i-1)
if array[i] < array[j]:
return j
return i
|
def join_genres(genres):
"""Trim and join genres to 255 characters."""
return ';'.join(genres)[:255]
|
def strip_lines(text):
"""
Given text, try remove unnecesary spaces and
put text in one unique line.
"""
output = text.replace("\r\n", " ")
output = output.replace("\r", " ")
output = output.replace("\n", " ")
return output.strip()
|
def find_secondary_lithology(tokens_and_primary, lithologies_adjective_dict, lithologies_dict):
"""Find a secondary lithology in a tokenised sentence.
Args:
tokens_and_primary (tuple ([str],str): tokens and the primary lithology
lithologies_adjective_dict (dict): dictionary, where keys are exact, "clear" markers for secondary lithologies (e.g. 'clayey'). Keys are the lithology classes.
lithologies_dict (dict): dictionary, where keys are exact markers as match for lithologies. Keys are the lithology classes.
Returns:
str: secondary lithology if dectected. empty string for none.
"""
tokens, prim_litho = tokens_and_primary
if prim_litho == '': # cannot have a secondary lithology if no primary
return ''
# first, let's look at adjectives, more likely to semantically mean a secondary lithology
keys = lithologies_adjective_dict.keys()
for x in tokens:
if x in keys:
litho_class = lithologies_adjective_dict[x]
if litho_class != prim_litho:
return litho_class
# then, as a fallback let's look at a looser set of terms to find a secondary lithology
keys = lithologies_dict.keys()
for x in tokens:
if x in keys:
litho_class = lithologies_dict[x]
if litho_class != prim_litho:
return litho_class
return ''
|
def isStablePile(a):
"""Returns true is a is stable, false otherwise"""
false_count = 0
for row in a:
for column in row:
if column > 3:
false_count += 1
if false_count > 0:
return False
else:
return True
|
def add (mean1, var1, mean2, var2):
""" add the Gaussians (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean,var).
var1 and var2 are variances - sigma squared in the usual parlance.
"""
return (mean1+mean2, var1+var2)
|
def _split(container, count):
"""
Simple function splitting a container into equal length chunks.
Order is not preserved but this is potentially an advantage depending on
the use case.
"""
return [container[_i::count] for _i in range(count)]
|
def gulpease_index(words, letters, points):
"""
As written in https://it.wikipedia.org/wiki/Indice_Gulpease
"""
index = 89 + ((300 * points) - (10 * letters)) / words
return 100 if index > 100 else index
|
def _get_file_contents(file_path: str) -> str:
"""Get contents of a text file."""
with open(file_path, 'r') as fdes:
return fdes.read()
|
def _get_tensor_name(node_name, output_slot):
"""Get tensor name given node name and output slot index.
Parameters
----------
node_name : str
Name of the node that outputs the tensor, as a string.
output_slot : int
Output slot index of the tensor, as an integer.
Returns
-------
tensor_name : str
Name of the tensor, as a string.
"""
return "%s:%d" % (node_name, output_slot)
|
def FindDuplicates(seq):
"""Identifies duplicates in a list.
Does not preserve element order.
@type seq: sequence
@param seq: Sequence with source elements
@rtype: list
@return: List of duplicate elements from seq
"""
dup = set()
seen = set()
for item in seq:
if item in seen:
dup.add(item)
else:
seen.add(item)
return list(dup)
|
def clamp(value, minimum, maximum):
"""
Clamp a number between a minimum and a maximum.
:param value: value to be clamped
:param minimum: minimum value
:param maximum: maximum value
:return: the clamped value
"""
return max(minimum, min(value, maximum))
|
def calculateNetIncome(gross, state):
"""
Calculate the net income after federal and state tax
:param gross: Gross Income
:param state: State Name
:return: Net Income
"""
state_tax = {'LA': 10, 'SA': 0, 'NY': 9}
# Calculate net income after federal tax
net = gross - (gross * .10)
# Calculate net income after state tax
if state in state_tax:
net = net - (gross * state_tax[state]/100)
print("Your net income after all the heavy taxes is: " + str(net))
return net
else:
print("State not in the list")
return None
|
def read_file(filename: str, offset: int, size: int) -> str:
"""Read the specified interval content of text file"""
with open(filename, 'r') as f:
f.seek(offset)
return f.read(size)
|
def temperature_closest_to_zero(temperatures):
"""Receives an array of temperatures and returns the closest one to zero"""
closest_to_zero = temperatures[0]
for temperature in temperatures:
if (abs(temperature) < abs(closest_to_zero) or
(abs(temperature) == abs(closest_to_zero) and temperature > 0)):
closest_to_zero = temperature
return closest_to_zero
|
def merge_dicts(dict1, dict2):
""" _merge_dicts
Merges two dictionaries into one.
INPUTS
@dict1 [dict]: First dictionary to merge.
@dict2 [dict]: Second dictionary to merge.
RETURNS
@merged [dict]: Merged dictionary
"""
merged = {**dict1, **dict2}
return merged
|
def tap_type_to_target_type(mongo_type):
"""Data type mapping from MongoDB to Postgres"""
return {
'string': 'CHARACTER VARYING',
'object': 'JSONB',
'array': 'JSONB',
'date': 'TIMESTAMP WITHOUT TIME ZONE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
'timestamp': 'TIMESTAMP WITHOUT TIME ZONE',
}.get(mongo_type, 'CHARACTER VARYING')
|
def matrix_multiply(a, b):
"""
Multiply a matrix of any dimension by another matrix of any dimension.
:param a: Matrix as list of list
:param b: Matrix as list of list
:return:
"""
return [[sum(_a * _b for _a, _b in zip(a_row, b_col)) for b_col in zip(*b)] for a_row in a]
|
def learner_name(learner):
"""Return the value of `learner.name` if it exists, or the learner's type
name otherwise"""
return getattr(learner, "name", type(learner).__name__)
|
def parade_bias(p):
""" Model bias is negative of mean residual """
return [mj.get('mean') for mj in p['moments']]
|
def is_dict(inst):
"""Returns whether or not the specified instance is a dict."""
return hasattr(inst, 'iteritems')
|
def _safe_snr_calculation(s, n):
"""
Helper used in this module for all snr calculations. snr is
always defined as a ratio of signal amplitude divided by noise amplitude.
An issue is that with simulation data it is very common to have a noise
window that is pure zeros. If computed naively snr would then be
normally be returned as NaN. NaNs can cause a lot of mysterious errors
so we handle that differently here. When noise amplitude is 0 we
then test the signal amplitude. If it is nonzero we return large
number defined inside this function as 999999.9 (just under 1 million).
If both amplitudes are zero we return -1.0 which can be properly treated
as an error or data with low snr.
"""
if n == 0.0:
if s > 0.0:
return 999999.9
else:
return -1.0
else:
return s / n
|
def _roman(n):
"""Converts integer n to Roman numeral representation as a string"""
if not (1 <= n <= 5000):
raise ValueError(f"Can't represent {n} in Roman numerals")
roman_numerals = (
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
)
roman = ""
for value, numeral in roman_numerals:
while n >= value:
roman += numeral
n -= value
return roman
|
def _af_rmul(a, b):
"""
Return the product b*a; input and output are array forms. The ith value
is a[b[i]].
Examples
========
>>> from sympy.combinatorics.permutations import _af_rmul, Permutation
>>> Permutation.print_cyclic = False
>>> a, b = [1, 0, 2], [0, 2, 1]
>>> _af_rmul(a, b)
[1, 2, 0]
>>> [a[b[i]] for i in range(3)]
[1, 2, 0]
This handles the operands in reverse order compared to the ``*`` operator:
>>> a = Permutation(a); b = Permutation(b)
>>> list(a*b)
[2, 0, 1]
>>> [b(a(i)) for i in range(3)]
[2, 0, 1]
See Also
========
rmul, _af_rmuln
"""
return [a[i] for i in b]
|
def _parse_range_header(range_header):
"""Parse HTTP Range header.
Args:
range_header: A str representing the value of a range header as retrived
from Range or X-AppEngine-BlobRange.
Returns:
Tuple (start, end):
start: Start index of blob to retrieve. May be negative index.
end: None or end index. End index is exclusive.
(None, None) if there is a parse error.
"""
if not range_header:
return None, None
try:
# ValueError if <1 split.
range_type, ranges = range_header.split('=', 1)
if range_type != 'bytes':
return None, None
ranges = ranges.lstrip()
if ',' in ranges:
return None, None
end = None
if ranges.startswith('-'):
start = int(ranges)
if start == 0:
return None, None
else:
split_range = ranges.split('-', 1)
start = int(split_range[0])
if len(split_range) == 2 and split_range[1].strip():
end = int(split_range[1]) + 1
if start > end:
return None, None
return start, end
except ValueError:
return None, None
|
def serialize_length(l):
""" Returns minimal length prefix (note length prefix is not unique),
in new style."""
l = int(l)
if l < 0:
raise ValueError(l)
elif l < 192:
return bytes((l,))
elif l < 8384:
x = l - 192
b0 = (x >> 8) + 192
b1 = x & 0xff
return bytes((b0, b1))
elif l <= 4294967295:
return b'\xff' + l.to_bytes(4,'big')
else:
raise OverflowError(l)
|
def display_benign_list(value):
"""
Function that either displays the list of benign domains or hides them
depending on the position of the toggle switch.
Args:
value: Contains the value of the toggle switch.
Returns:
A dictionary that communicates with the Dash interface whether to
display the list of benign domains or hide them.
"""
if value is False:
return {'display': 'none'}
else:
return {'display': 'unset'}
|
def interpolate_force_line(form, x, tol=1E-6):
"""Interpolates a new point in a form polyline
Used by the `add_force_line` function"""
form_out = [form[0]]
for pt1, pt2 in zip(form[:-1], form[1:]):
if (x - pt1[0] > 0.5 * tol and
pt2[0] - x > 0.5 * tol):
y = pt1[1] + (x - pt1[0]) * (pt2[1] - pt1[1]) / (pt2[0] - pt1[0])
form_out.extend(2 * [[x, y]])
form_out.append(pt2)
return form_out
|
def replace_all(string: str, old: str, new: str) -> str:
"""Iteratively replace all instances of old with new
:param old: String to be acted upon
:param old: Substring to be replaced
:param new: String replacement
:returns: A copy of string with new replacing old
"""
if old in string:
string = string.replace(old, new)
return string
|
def filter_blocks(blocks):
"""Filter out spurious diffs caused by XML deserialization/serialization."""
new_blocks = []
for block in blocks:
if any(l.startswith('-<rdf:RDF') for l in block):
continue
if any(l.startswith('-<math') for l in block):
continue
if any(l.startswith('-<sbml') for l in block):
continue
if any(l.startswith('-<body') for l in block):
continue
if any(''' in l for l in block):
continue
new_blocks.append(block)
return new_blocks
|
def _average(input_list: list) -> float:
"""
Find the average of a list.
Given a list of numbers, calculate the average of all values in the list.
If the list is empty, default to 0.0.
Parameters
----------
input_list : list
A ``list`` of ``floats`` to find an average of.
Returns
-------
float
Returns a ``float`` of the average value of the list.
"""
try:
return float(sum(input_list) / len(input_list))
except ZeroDivisionError:
return 0.0
|
def updateBounds(bounds, p, min=min, max=max):
"""Add a point to a bounding rectangle.
Args:
bounds: A bounding rectangle expressed as a tuple
``(xMin, yMin, xMax, yMax)``.
p: A 2D tuple representing a point.
min,max: functions to compute the minimum and maximum.
Returns:
The updated bounding rectangle ``(xMin, yMin, xMax, yMax)``.
"""
(x, y) = p
xMin, yMin, xMax, yMax = bounds
return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
|
def _SumRows(*rows):
"""
BRIEF Total each of the columns for all the rows
"""
total = [0.0]*len(rows[0])
for row in rows:
for i, col in enumerate(row):
total[i] += col
return total
|
def format_subject(subject):
# type: (str) -> str
"""
Escape CR and LF characters.
"""
return subject.replace('\n', '\\n').replace('\r', '\\r')
|
def change_key_names(rows, changes):
"""
Renames the keys specified in rows
Where changes = {new:original, new:original}
"""
for row in rows:
for new, original in changes.items():
row[new] = row.pop(original)
return rows
|
def md_heading(raw_text, level):
"""Returns markdown-formatted heading."""
adjusted_level = min(max(level, 0), 6)
return '%s%s%s' % (
'#' * adjusted_level, ' ' if adjusted_level > 0 else '', raw_text)
|
def calc_add_bits(len, val):
""" Calculate the value from the "additional" bits in the huffman data. """
if (val & (1 << len - 1)):
pass
else:
val -= (1 << len) - 1
return val
|
def vocab(table, text):
"""
Return a new table containing only the vocabulary in the source text.
Create a new translation table containing only the rules that are
relevant for the given text. This is created by checking all source
terms against a copy of the text.
"""
text_rules = []
text_copy = str(text)
for rec in table:
if rec[0] in text_copy:
text_copy = text_copy.replace(rec[0], '\x1f')
text_rules.append(rec)
return text_rules
|
def possible_moves(state: list) -> list:
"""
Returns list of all possible moves
For the given board position 'state' returns
list of all possible moves for the next turn
Parameters
----------
state : list
The board position to be evaluated
"""
moves = []
for x, row in enumerate(state):
for y, cell in enumerate(row):
if cell == 0:
moves.append([x,y])
return moves
|
def form_definition_entries_special(pos, provenance, pw_id, pw_entry, kb_id, kb_entry):
"""
Create def match entry
:param pos:
:param pw_id:
:param pw_entry:
:param kb_id:
:param kb_entry:
:return:
"""
entries = []
for pw_def in pw_entry['definition']:
entries.append({
'label': pos,
'provenance': provenance,
'pw_id': pw_id,
'pw_cls': pw_def,
'kb_id': kb_id,
'kb_cls': kb_entry[1]
})
return entries
|
def odd_numbers_list(start, stop):
"""
Generate all the odd numbers in a given range.
:param start: The start of the range.
:param stop: The end of the range.
:return: A list of odd numbers.
"""
# Build a list with a list comprehension. This only includes elements in the list if they are odd, because of
# the if statement in the comprehension.
return [i for i in range(start, stop + 1) if i % 2 == 1]
|
def _total_probe_count_without_interp(params, probe_counts):
"""Calculate a total probe count without interpolation.
This assumes that params are keys in the datasets of probe_counts.
The result of ic._make_total_probe_count_across_datasets_fn should give
the same count as this function (if params are keys in the datasets
of probe_counts). But this uses probe_counts directly and can be
used as a sanity check -- i.e., it does not do any interpolation.
Args:
params: parameter values to use when determining probe counts;
params[i] is the (i % N)'th parameter of the (i/N)'th dataset,
where N is the number of datasets
probe_counts: dict giving number of probes for each dataset and
choice of parameters
Returns:
total number of probes across all datasets, according to the
given values of params
"""
num_datasets = len(probe_counts)
# The total number of parameters must be a multiple of the number
# of datasets
assert len(params) % num_datasets == 0
num_params = int(len(params) / num_datasets)
s = 0
for i, dataset in enumerate(sorted(probe_counts.keys())):
p = tuple(params[num_params * i + j] for j in range(num_params))
s += probe_counts[dataset][p]
return s
|
def multirate_padding(used_bytes, align_bytes):
"""
The Keccak padding function.
"""
padlen = align_bytes - used_bytes
if padlen == 0:
padlen = align_bytes
# note: padding done in 'internal bit ordering', wherein LSB is leftmost
if padlen == 1:
return [0x81]
else:
return [0x01] + ([0x00] * (padlen - 2)) + [0x80]
|
def make_key_mapping(key):
""" Make a mapping for this key."""
# Sort the characters.
chars = list(key)
chars.sort()
sorted_key = "".join(chars)
# Make the mapping.
mapping = []
for i in range(len(key)):
mapping.append(sorted_key.index(key[i]))
# Make the inverse mapping.
inverse_mapping = [0 for i in range(len(key))]
for i in range(len(key)):
inverse_mapping[mapping[i]] = i
return mapping, inverse_mapping
|
def WithChanges(resource, changes):
"""Apply ConfigChangers to resource.
It's undefined whether the input resource is modified.
Args:
resource: KubernetesObject, probably a Service.
changes: List of ConfigChangers.
Returns:
Changed resource.
"""
for config_change in changes:
resource = config_change.Adjust(resource)
return resource
|
def query_release(release):
"""
Build formatted query string for ICESat-2 release
Arguments
---------
release: ICESat-2 data release to query
Returns
-------
query_params: formatted string for CMR queries
"""
if release is None:
return ''
#-- maximum length of version in CMR queries
desired_pad_length = 3
if len(str(release)) > desired_pad_length:
raise RuntimeError('Release string too long: "{0}"'.format(release))
#-- Strip off any leading zeros
release = str(release).lstrip('0')
query_params = ''
while len(release) <= desired_pad_length:
padded_release = release.zfill(desired_pad_length)
query_params += '&version={0}'.format(padded_release)
desired_pad_length -= 1
return query_params
|
def typeName(obj):
"""
Gets the object name of the passed instance as a string
Args:
obj (object): Instance of object to get the type name of
Returns:
str: name of the passed objects type
"""
return obj.__class__.__name__
|
def wrap_in_tag(tag: str, content: str, attributes: str = ""):
"""
Wraps some content in HTML tags with specific attributes.
:param tag: The tag that wraps the content
:param content: The content being wrapped
:param attributes: Optional attributes that can be assigned to the opening tag
:return: The wrapped content with the correct attributes.
"""
return f"<{tag} {attributes}>{content}</{tag}>"
|
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
|
def UInt4ToASCII(ints):
"""
Turn a List of unsigned 4-bit ints into ASCII. Pure magic.
"""
return ''.join([chr((ints[x] << 4) + ints[x+1]) for x in range(0,len(ints),2)])
|
def new(name, num_servings):
""" Create and return a new recipe.
"""
return {'name' : name,
'num_servings' : num_servings,
'instructions' : [],
'ingredients' : []}
|
def make_remote(ref, remote):
"""Return a Git reference based on a local name and a remote name.
Usage example:
>>> make_remote("mywork", "origin")
'refs/remotes/origin/mywork'
>>> make_remote("mywork", "github")
'refs/remotes/github/mywork'
"""
return "refs/remotes/" + remote + "/" + ref
|
def create_neighbours(region):
"""neighbour is a dict containing
all countries as keys and all
neighbouring countries as values.
#
# (0, 0, 0, 3),
# (0, 1, 1, 1),
# (0, 0, 2, 0)
#
corresponds to:
# {0: {1, 2, 3}, 1: {0, 2, 3}, 2: {0, 1}, 3: {0, 1}}"""
neighbours = dict()
for i, row in enumerate(region):
for j, cell in enumerate(row):
# new entry in neighbours
if cell not in neighbours:
neighbours[cell] = set()
# check all adjacent countries
if j > 0 and cell != region[i][j-1]:
neighbours[cell].add(region[i][j-1])
if j < len(row)-1 and cell != region[i][j+1]:
neighbours[cell].add(region[i][j+1])
#
if i > 0 and cell != region[i-1][j]:
neighbours[cell].add(region[i-1][j])
if i < len(region)-1 and cell != region[i+1][j]:
neighbours[cell].add(region[i+1][j])
return neighbours
|
def hmsm_to_days(hour=0, min=0, sec=0, micro=0):
"""
Convert hours, minutes, seconds, and microseconds to fractional days.
Parameters
----------
hour : int, optional
Hour number. Defaults to 0.
min : int, optional
Minute number. Defaults to 0.
sec : int, optional
Second number. Defaults to 0.
micro : int, optional
Microsecond number. Defaults to 0.
Returns
-------
days : float
Fractional days.
Examples
--------
>>> hmsm_to_days(hour=6)
0.25
"""
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24.
|
def numero_caracteres_especiales(url):
""" Cuenta la cantidad de caracteres especiales que hay en la url
url: es la direccion de la pagina web """
try:
# cuantas letras hay por linea
contador_alfabeto = sum(1 for c in url if c.isalpha())
# cuantos numero hay por linea
contador_digitos = sum(1 for c in url if c.isdigit())
caracteres_alfanumericos = contador_digitos + contador_alfabeto
longitud = len(str(url)) # longitud de la linea
caracteres_especiales = longitud - caracteres_alfanumericos
except Exception:
caracteres_especiales = -1
return caracteres_especiales
|
def ra_as_hours(ra_degrees):
""" Input: float of Right Ascension in degrees.
Returns: string of RA as hours, in hex, to the nearest 0.001 RA seconds.
"""
if (ra_degrees < 0) | (ra_degrees > 360):
return None
n_ra_milliseconds = round((ra_degrees * 3600 * 1000) / 15)
ra_hours, remainder = divmod(n_ra_milliseconds, 3600 * 1000)
ra_minutes, remainder = divmod(remainder, 60 * 1000)
ra_seconds = round(remainder / 1000, 3)
format_string = "{0:02d}:{1:02d}:{2:06.3f}"
ra_str = format_string.format(int(ra_hours), int(ra_minutes), ra_seconds)
if ra_str[:3] == "24:":
ra_str = format_string.format(0, 0, 0)
return ra_str
|
def get_dtype_str(dtype, byteorder="little"):
"""Parses dtype and byte order to return a type string code.
Parameters
----------
dtype : `numpy.dtype` or `str`
Either a dtype (e.g., ``numpy.uint32``) or a string with the type code
(``'>u4'``). If a string type code and the first character indicates
the byte order (``'>'`` for big, ``'<'`` for little endian),
``byteorder`` will be ignored. The type code refers to bytes, while the
dtype classes refer to bits, i.e., ``'u2'`` is equivalent to
byteorder : str
Either ``'big'`` for big endian representation or ``'little'`` for
little end. ``'>'`` and ``'<'`` are also accepted, respectively.
Returns
-------
type_code : `str`
The type code for the input dtype and byte order.
Examples
--------
::
>>> get_dtype_str(numpy.uint32, byteorder='big')
'>u4'
>>> get_dtype_str('u2', byteorder='>')
'>u2'
>>> get_dtype_str('<u2', byteorder='big')
'<u2'
"""
if byteorder == "big":
byteorder = ">"
elif byteorder == "little":
byteorder = "<"
elif byteorder in [">", "<"]:
pass
else:
raise ValueError(f"invalid byteorder {byteorder}")
if isinstance(dtype, str):
if dtype[0] in [">", "<"]:
return dtype
elif dtype[0] == "=":
raise ValueError("invalid byte order =. Please, use a specific endianess.")
else:
return byteorder + dtype
dtype_str = dtype().dtype.str
return byteorder + dtype_str[1:]
|
def split_into_n(s, n):
"""Split into lists - each of size 'n'."""
return [s[k:k + n] for k in range(0, len(s), n)]
|
def _reverse_repeat_list(t, n):
"""Reverse the order of `t` and repeat each element for `n` times.
This can be used to translate padding arg used by Conv and Pooling modules
to the ones used by `F.pad`.
"""
return list(x for x in reversed(t) for _ in range(n))
|
def execute_chronos_api_call_for_job(api_call, job):
"""Attempt a call to the Chronos api, catching any exception.
We *have* to catch Exception, because the client catches
the more specific exception thrown by the http clients
and rethrows an Exception -_-.
The chronos api returns a 204 No Content when the delete is
successful, and chronos-python only returns the body of the
response from all http calls. So, if this is successful,
then None will be returned.
https://github.com/asher/chronos-python/pull/7
We catch it here, so that the other deletes are completed.
"""
try:
return api_call(job)
except Exception as e:
return e
|
def binary_search_recursive(list, target):
"""
Returns the index position of the target if found, else returns None
complexity: O(log n)
"""
if len(list) <= 0:
return None
midpoint = len(list)//2
if list[midpoint] == target:
return True
if list[midpoint] < target:
return binary_search_recursive(list[midpoint + 1:], target)
else:
return binary_search_recursive(list[:midpoint], target)
|
def create_non_array_attr_operation(var_attribute_name, var_attribute_value):
"""Return request string for a single operation to create a non-array attribute."""
return '{"op":"AddUpdateAttribute","attributeName":"' + var_attribute_name + '", "addUpdateAttribute":"' + var_attribute_value + '"},'
|
def convertBytes(nbytes):
"""Convert a size in bytes to a string."""
if nbytes >= 1e9:
return '{:.1f} GB'.format(nbytes / 1e9)
elif nbytes >= 1e6:
return '{:.1f} MB'.format(nbytes / 1e6)
elif nbytes >= 1e3:
return '{:.1f} KB'.format(nbytes / 1e3)
else:
return '{:.1f} B'.format(nbytes)
|
def parse_prefix(prefix, default_length=24):
"""
Splits the given IP prefix into a network address and a prefix length.
If the prefix does not have a length (i.e., it is a simple IP address),
it is presumed to have the given default length.
:type prefix: string
:param prefix: An IP mask.
:type default_length: long
:param default_length: The default ip prefix length.
:rtype: string, int
:return: A tuple containing the IP address and prefix length.
"""
if '/' in prefix:
network, pfxlen = prefix.split('/')
else:
network = prefix
pfxlen = default_length
return network, int(pfxlen)
|
def generate_readme(data):
"""
Takes in a dictionary containing all the relevant information on the
project and produces a string that can be passed to mne-bids
* might actually change this to produce a .md file to have nicer formatting
"""
out_str = ""
out_str += "Project Title:\t\t{0}\n".format(
data.get('ProjectTitle', 'None'))
out_str += "Project ID:\t\t{0}\n\n".format(data.get('ProjectID', 'None'))
out_str += "Expected experimentation period:\n"
s_date = data.get('StartDate', None)
if s_date is not None:
start_date = '/'.join(s_date)
else:
start_date = 'Unknown'
out_str += "Start date:\t\t{0}\n".format(start_date)
e_date = data.get('EndDate', None)
if e_date is not None:
end_date = '/'.join(e_date)
else:
end_date = 'Unknown'
out_str += "End date:\t\t{0}\n\n".format(end_date)
out_str += "Project Description:\n"
out_str += data.get("Description", "None") + "\n\n"
groups = data.get("Groups", [])
if len(groups) != 0:
out_str += 'Participant groups:\n'
for group in groups:
out_str += ' - ' + group[0] + '\n'
out_str += '\n'
triggers = data.get('DefaultTriggers', [])
if len(triggers) != 0:
out_str += 'Trigger channels:\n'
for trigger in triggers:
out_str += '{0}:\t{1}\n'.format(trigger[0], trigger[1])
return out_str
|
def _get_module_ver_hash(prov):
"""Get module commit hash, falling back to semantic version, and finally 'UNKNOWN'"""
ver = None
subacts = prov[0].get('subactions')
if subacts:
ver = subacts[0].get('commit')
if not ver:
ver = prov[0].get('service_ver', 'UNKNOWN')
return ver
|
def find_magic_number_distinct(numbers):
"""Find magic number in list of distinct numbers
:param numbers List of sorted distinct integers
:return Index of magic number or -1
"""
def magic_number_helper(numbers, min_num, max_num):
"""Find magic number in list of distinct numbers
:param numbers List of sorted distinct integers
:param min_num minimum index
:param max_num maximum index
:return Index of magic number or -1
"""
if min_num > max_num:
return -1
middle = (max_num + min_num) // 2
if numbers[middle] == middle:
return middle
elif numbers[middle] > middle:
return magic_number_helper(numbers, min_num, middle - 1)
elif numbers[middle] < middle:
return magic_number_helper(numbers, middle + 1, max_num)
return magic_number_helper(numbers, 0, len(numbers) - 1)
|
def unique_paths(rows: int, columns: int) -> int:
"""Recursive function that returns the number of unique paths in a
non-blocking grid.
>>> unique_paths(9, 4)
165
>>> unique_paths(2, 3)
3
>>> unique_paths(9, 6)
1287
>>> unique_paths(3, 9)
45
>>> unique_paths(8, 14)
77520
"""
if (rows == 1 or columns == 1):
return 1
return unique_paths(int(rows) - 1,
int(columns)) + unique_paths(int(rows), int(columns) - 1)
|
def Sample_n_Hold_Plot(_holds,_period,_end):
"""Starting from a list of points: create the corresponding piecewise-constant x- and y-axis data
Return: piecewise x-axis-data, piecewise y-axis-data"""
square_sig = []
square_x = []
for i in range(0,len(_holds)-1):
square_x += [i*_period] + [(i+1)*_period]
square_sig += [_holds[i]]*2
square_x +=[(len(_holds)-1)*_period]+[_end]
square_sig += [_holds[-1]]*2
return square_x, square_sig
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.