content
stringlengths 42
6.51k
|
---|
def get_by_ids(use_ids, list_ids_tfrecords):
"""
:param use_data: list of path to tfrecords
:param use_ids: list of ids
:param list_ids_tfrecords: videos corresponding to each ids
:return:
"""
ret_data = []
for ids in use_ids:
ret_data = ret_data + list_ids_tfrecords[ids].tolist()
return sorted(ret_data)
|
def load_w(dtype):
"""
Return the function name to load data.
This should be used like this::
code = '%s(ival)' % (load_w(input_type),)
"""
if dtype == 'float16':
return 'ga_half2float'
else:
return ''
|
def truncate_lines(str, num_lines, eol="\n"):
"""Truncate and remove EOL characters
:param str: String to truncate
:type str: `str`
:param num_lines: Number of lines to process
:type num_lines: `int`
:param EOL: EOL char
:type EOL: `char`
:return: Joined string after truncation
:rtype: `str`
"""
return eol.join(str.split(eol)[0:num_lines])
|
def is_provider_redis(provider):
"""
Check if a provider configuration is a redis configuration
"""
return provider.get("type") is not None and provider.get("type") == "redis"
|
def __precision(prediction, expectation):
"""
prediction: list cut at-k pmid, each element should be a tuple (pmid,score) or (pmid)
expectation: list of valid pmid
return precision
"""
if len(prediction)==0:
return 0 # assume 0
return sum([ 1 if pmid in expectation else 0 for pmid in prediction])/len(prediction)
|
def reverseDictWithDuplicates(D):
"""for testing, values are lists always
"""
reverseD = {}
for k,v in D.items():
reverseD.setdefault(v, []).append(k)
return reverseD
|
def int_to_str_add_zeros(number):
"""
Turn an int to a sting and make sure it has 6 digit (add zeros before if
needed)
"""
if type(number) != int:
print("File number should be an integer.\n")
if number < 0:
print("File number can't be negative.\n")
elif number > 999999:
print("File number can't be longer than 6 digits.\n")
string = ""
for i in range(6 - len(str(number))):
string = string + "0"
string = string + str(number)
return string
|
def fix_whitespace_for_phantom(text: str):
"""Transform output for proper display
This is important to display pandas DataFrames, for instance
"""
text = text.replace(' ', r' ')
text = '<br>'.join(text.splitlines())
return text
|
def sanitize(data, some_property):
"""Strip whitespace from a dict value"""
value = data.get(some_property, '')
return value.strip() if value else None
|
def _detect(value, assumption):
"""
Definition format string
"""
if assumption == 'array':
return value.startswith('[') and value.endswith(']')
return False
|
def getNoMappedReadsStrCheck(inFile, outFiles):
"""File from map-reduce steps may carry no data. This is indicated by
the string 'no_mapped_reads. This must be caught within the wrappers
to know when not to exit the algorithm required by the wrapper and
sebsequently exit gracefully. Given and input and output file variable
name we create a string of bash code that will check for the 'no_mapped-reads'
string and will print this to the output file if present (to propogate
the checking behavior)
Will take array with multiple output files"""
noMappedCat = ''
for outFile in outFiles:
noMappedCat = noMappedCat + '\techo no_mapped_reads > $' + outFile + '\n'
return ('# Check for empty files/no data here\n'
'if [ "$(head -1 $' + inFile + ')" == "no_mapped_reads" ]; then\n'
'\techo "$' + inFile + ' contains no mapped reads... exiting" >> $logFile 2>&1\n'
+ noMappedCat +
'\trm -rf $tmpDir\n'
'\texit 0\n'
'else\n'
'\techo "$' + inFile + ' contains mapped reads... continuing" >> $logFile 2>&1\n'
'fi\n\n')
|
def flatten(nested_list):
"""
Flatten an arbitrarily nested list.
Nesting may occur when anchors are used inside a YAML list.
"""
flattened = []
for l in nested_list:
if isinstance(l, list):
flattened.extend(flatten(l))
else:
flattened.append(l)
return flattened
|
def _slice_at_axis(sl, axis):
"""
Construct tuple of slices to slice an array in the given dimension.
Parameters
----------
sl : slice
The slice for the given dimension.
axis : int
The axis to which `sl` is applied. All other dimensions are left
"unsliced".
Returns
-------
sl : tuple of slices
A tuple with slices matching `shape` in length.
Examples
--------
>>> _slice_at_axis(slice(None, 3, -1), 1)
(slice(None, None, None), slice(None, 3, -1), (...,))
"""
return (slice(None),) * axis + (sl,) + (...,)
|
def ports_from_output_port_acts(output_port_acts):
"""Return unique port numbers from OFPActionOutput actions.
Args:
list of ryu.ofproto.ofproto_v1_3_parser.OFPActionOutput: output to port actions.
Returns:
set of port number ints.
"""
return {output_port_act.port for output_port_act in output_port_acts}
|
def is_dicom_file(filename):
"""
This function is used to check whether a file is in the DICOM format. We
do this by checking that bytes 129 to 132 are DICM. See
http://stackoverflow.com/questions/4792727/validating-dicom-file
for more details
"""
try:
with open(filename, 'rb') as f:
f.read(128)
if f.read(4) == b'DICM':
return True
else:
return False
except Exception:
return False
|
def check_occuring_variables(formula,variables_to_consider,allowed_variables) :
"""
Checks if the intersection of the variables in <formula> with the variables
in <variables_to_consider> is contained in <allowed_variables>
Parameters
----------
formula : list of list of integers
The formula to consider.
variables_to_consider : list of integers
Those variables in <formula> that shall be considered.
allowed_variables : list of integers
Must be contained in <variables_to_consider>.
Gives the subset of <variables_to_consider> that may occur in <formula>
Returns
-------
True if the intersection of the variables in <formula> with <variables_to_consider>
is contained in <allowed_variables>
"""
variable_set=set(allowed_variables)
for clause in formula :
variables_in_clause = {abs(l) for l in clause if abs(l) in variables_to_consider}
if not variables_in_clause <= variable_set:
return False, [v for v in variables_in_clause if not v in variable_set]
return True, []
|
def pfreduce(func, iterable, initial=None):
"""A pointfree reduce / left fold function: Applies a function of two
arguments cumulatively to the items supplied by the given iterable, so
as to reduce the iterable to a single value. If an initial value is
supplied, it is placed before the items from the iterable in the
calculation, and serves as the default when the iterable is empty.
:param func: A function of two arguments
:param iterable: An iterable yielding input for the function
:param initial: An optional initial input for the function
:rtype: Single value
Example::
>>> from operator import add
>>> sum_of_squares = pfreduce(add, initial=0) * pfmap(lambda n: n**2)
>>> sum_of_squares([3, 4, 5, 6])
86
"""
iterator = iter(iterable)
try:
first_item = next(iterator)
if initial:
value = func(initial, first_item)
else:
value = first_item
except StopIteration:
return initial
for item in iterator:
value = func(value, item)
return value
|
def neg_relu(x_in, inf_point):
""" negative relu fxn"""
if x_in <= inf_point:
return inf_point - x_in
return 0
|
def harass_metric(damage_taken_p1: float, damage_taken_p2: float) -> float:
"""
A metric that looks at damage taken as a fraction of total damage dealt in lane.
Formula: 1 - (damage_taken_p1/(damage_taken_p1 + damage_taken_p2))
:param damage_taken_p1: Damage taken by player one
:param damage_taken_p2: Damage taken by player two
:return: A float of the percent of damage player 1 did to player 2
"""
hm = 1-(damage_taken_p1 / (damage_taken_p1 + damage_taken_p2))
return hm
|
def is_overlapping(bbox1, bbox2):
"""
:param bbox1:
:param bbox2:
:return:
"""
overlap = (max(bbox1[0], bbox2[0]), min(bbox1[1], bbox2[1]),
max(bbox1[2], bbox2[2]), min(bbox1[3], bbox2[3]))
return overlap[0] < overlap[1] and overlap[2] < overlap[3]
|
def iob_iobes(tags):
"""IOB -> IOBES
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif tag.split('-')[0] == 'B':
if i + 1 != len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif tag.split('-')[0] == 'I':
if i + 1 < len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise Exception('Invalid IOB format!')
return new_tags
|
def _match_event(key, value, event):
"""Return true if `key` and `value` exist within the given event."""
val = str(value).upper()
if key in event:
return str(event[key]).upper() == val
return False
|
def find_largest_diff(list_of_nums):
"""Find the largest difference between *consecutive*
numbers in a list.
"""
largest_diff = 0
for i in range(len(list_of_nums)-1):
diff = abs(list_of_nums[i] - list_of_nums[i+1])
if diff > largest_diff:
largest_diff = diff
return largest_diff
|
def _resolve_name(name, package, level):
"""Resolve a relative module name to an absolute one."""
bits = package.rsplit(".", level - 1)
if len(bits) < level:
raise ValueError("attempted relative import beyond top-level package")
base = bits[0]
return "{}.{}".format(base, name) if name else base
|
def is_array(obj):
"""
Return True if object is list or tuple type.
"""
return isinstance(obj,list) or isinstance(obj,tuple)
|
def getRankAttribute(attribute, reverse = False):
""" Takes as input an attribute (node or edge) and returns an attribute where
each node is assigned its rank among all others according to the attribute values.
The node/edge with lowest input value is assigned 0, the one with second-lowest
value 1, and so on.
Keyword arguments:
attribute -- the input node/edge attribute
reverse -- reverses the ranking, if set to True
"""
#Example input: [0.1, 0.05, 0.9, 0.2], ascending
#Example output: [1, 0, 3, 2]
_attribute = zip([x for x in range(0, len(attribute))], attribute)
_attribute = sorted(_attribute, key=lambda x: x[1], reverse=reverse)
_index = 0
result = [0] * len(attribute)
for (i, v) in _attribute:
result[i] = _index
_index = _index + 1
return result
|
def lat_to_km(latitude) -> float:
"""Expresses given latitude in kilometers to the north
Args:
latitude (float): Latitude in degrees.
Returns:
float: Latitude expressed in kilometers to the north
"""
km_north = 110.574 * latitude
return km_north
|
def get_inband_info(cfg_facts):
"""
Returns the inband port and IP addresses present in the configdb.json.
Args:
cfg_facts: The "ansible_facts" output from the duthost "config_facts" module.
Returns:
A dictionary with the inband port and IP addresses.
"""
intf = cfg_facts['VOQ_INBAND_INTERFACE']
ret = {}
for a_intf in intf:
for addrs in intf[a_intf]:
ret['port'] = a_intf
intf_ip = addrs.split('/')
if ':' in intf_ip[0]:
ret['ipv6_addr'] = intf_ip[0]
ret['ipv6_mask'] = intf_ip[1]
elif ':' not in intf_ip[0]:
ret['ipv4_addr'] = intf_ip[0]
ret['ipv4_mask'] = intf_ip[1]
return ret
|
def SelectDBSeveralFittest(n, db_list):
"""
Function: SelectDBSeveralFittest
=================================
Select n fittest individual
@param n: the number of fittest individuals
@param db_list: the ordered list of fitnesses with associated unique ids obtained from the database
@return: the reference of the one fittest individual
"""
return db_list[:n]
|
def goal_key(tup):
"""Sort goals by position first and by id second (transform str ids into ints)"""
goal_id, goal_pos = tup
if isinstance(goal_id, str):
return goal_pos, int(goal_id.split("_")[0])
return goal_pos, goal_id
|
def nameCheck(teamName):
"""
Checks names to ensure consistent naming
:param teamName: str, team name to be checked
:return: str, corrected team name
"""
switch = {
'Central Florida': 'UCF',
'Gardner Webb': 'Gardner-Webb',
'Wisconsin Milwaukee': 'Milwaukee',
'Connecticut': 'UConn',
'Illinois Chicago': 'UIC',
'Texas San Antonio': 'UTSA',
'Louisiana Lafayette': 'Louisiana',
'Southeastern Louisiana': 'SE Louisiana',
'Texas A&M Corpus Christi': 'Texas A&M-CC',
'Miami Ohio': 'Miami (OH)',
'Central Connecticut St': 'Central Connecticut',
'Mount St Marys': 'Mount St. Mary\'s',
'Cal St Fullerton': 'CSU Fullerton',
'Texas Arlington': 'UT Arlington',
'Cal St Northridge': 'CSU Northridge',
'Morgan St': 'Morgan St',
'Stephen F Austin': 'Stephen F. Austin',
'Santa Barbara': 'UC Santa Barbara',
'Arkansas Pine Bluff': 'Arkansas-Pine Bluff',
'Long Island Brooklyn': 'Long Island University',
'St Johns': 'St. John\'s',
'Loyola Maryland': 'Loyola (MD)',
'Southern Mississippi': 'Southern Miss',
'Detroit': 'Detroit Mercy',
'American': 'American',
'Massachusetts': 'UMass',
'Cal Irvine': 'UC Irvine',
'Hawaii': 'Hawai\'i',
'Cal St Bakersfield': 'CSU Bakersfield',
'Wisconsin Green Bay': 'Green Bay',
'Middle Tennessee St': 'Middle Tennessee',
'Arkansas Little Rock': 'Little Rock',
'College of Charleston': 'Charleston'
}
team = switch.get(teamName, teamName)
if team.split(' ')[0] == 'St':
if team.split(' ')[1] == 'Peters' or team.split(' ')[1] == 'Josephs' \
or team.split(' ')[1] == 'Louis' or team.split(' ')[1] == 'Marys':
team = team.replace('St', 'Saint').replace('Peters', 'Peter\'s').replace('Josephs', 'Joseph\'s').replace(
'Marys', 'Mary\'s')
else:
team = team.replace('St', 'St.')
# Morgan State for womens team
elif team.split(' ')[-1] == 'St' and team != 'Morgan St':
team = teamName.replace('St', 'State')
return team
|
def map2matrix(matrix_size, index):
"""Map index in a time series to the corresponding index in the matrix.
:param matrix_size:
:param index:
:return: index in the matrix
"""
row_index = index // matrix_size[1]
col_index = index % matrix_size[1]
matrix_index = (row_index, col_index)
return matrix_index
|
def url_map(x):
"""
Standardizes a URL by ensuring it ends with a '/' and does not contain hash fragments.
"""
if '#' in x:
x = x[:x.index('#')]
x = x if x[-1] == '/' else (x + '/')
return x
|
def convert_fish_cmd_to_zsh_cmd(cmd: str) -> str:
"""
Convert given fish cmd to zsh cmd
:param cmd: Fish cmd
:return: Zsh cmd
"""
return cmd.replace('; and ', '&&').replace('; or ', '||')
|
def _get_dicom_comment(dicom_file):
""" Returns the contents of the comment field of a DICOM file.
Args:
dicom_file <dicom.dataset>: Opened DICOM file
Returns:
image_comment <str>: DICOM comment
"""
if hasattr(dicom_file, 'ImageComments'):
return dicom_file.ImageComments
else:
return None
|
def adstockGeometric(x, theta):
"""
:param x:
:param theta:
:return: numpy
"""
x_decayed = [x[0]] + [0] * (len(x) - 1)
for i in range(1, len(x_decayed)):
x_decayed[i] = x[i] + theta * x_decayed[i - 1]
return x_decayed
|
def ismagic (l):
""" test if combination is magic """
return (
l[0]+l[1]+l[2] ==
l[3]+l[4]+l[5] ==
l[6]+l[7]+l[8] ==
l[0]+l[3]+l[6] ==
l[1]+l[4]+l[7] ==
l[2]+l[5]+l[8] ==
l[0]+l[4]+l[8] ==
l[2]+l[4]+l[6] )
|
def mac_addr(address):
"""Convert a MAC address to a readable/printable string
Args:
address (str): a MAC address in hex form (e.g. '\x01\x02\x03\x04\x05\x06')
Returns:
str: Printable/readable MAC address
"""
return ':'.join('%02x' % ord(chr(x)) for x in address)
|
def _convert_to_int_list(check_codes):
"""Takes a comma-separated string or list of strings and converts to list of ints.
Args:
check_codes: comma-separated string or list of strings
Returns:
list: the check codes as a list of integers
Raises:
ValueError: if conversion fails
RuntimeError: if cannot determine how to convert input
"""
if isinstance(check_codes, list):
if all(isinstance(x, int) for x in check_codes):
return check_codes # good input
else:
return [int(x) for x in check_codes] # list of str
elif isinstance(check_codes, str):
return [int(x) for x in check_codes.split(",")] # str, comma-separated expected
raise RuntimeError("Could not convert values: {} of type {}".format(check_codes, type(check_codes)))
|
def map_phred33_ascii_to_qualityscore(phred33_char: str) -> float:
"""Maps a ASCII phred33 quality character to a quality score
>>> map_phred33_ascii_to_qualityscore("#")
2
>>> map_phred33_ascii_to_qualityscore("J")
41
"""
return ord(phred33_char) - 33
|
def mixup(x1,x2,y1,y2,mix_ratio):
""" MIXUP creates a inter-class datapoint using mix_ratio
"""
x = mix_ratio * x1 + (1-mix_ratio) * x2
y = mix_ratio * y1 + (1-mix_ratio) * y2
return (x,y)
|
def filter_loan_to_value(loan_to_value_ratio, bank_list):
"""Filters the bank list by the maximum loan to value ratio.
Args:
loan_to_value_ratio (float): The applicant's loan to value ratio.
bank_list (list of lists): The available bank loans.
Returns:
A list of qualifying bank loans.
"""
loan_to_value_approval_list = []
for bank in bank_list:
if loan_to_value_ratio <= float(bank[2]):
loan_to_value_approval_list.append(bank)
return loan_to_value_approval_list
|
def add(x, y=None):
"""Add two integers.
This is the function docstring. It should contain details about what the
function does, its parameters (inputs), and what it returns (if anything).
There are several standard formats, but this one follows the numpy
docstring style.
These docstrings can be turned into a webpage using Sphinx and can
incorporate ReStructured Text (ReST) directives. For example, here are the
links to the `numpy docstring documentation
<https://numpydoc.readthedocs.io/en/latest/format.html>`_ and the `Sphinx
documentation <https://www.sphinx-doc.org/en/master/>`_
Parameters
----------
x : int
The first integer to add.
y : int, optional
The second integer to add. If ``None``, ``x`` is added to itself.
Returns
-------
int
The sum of ``x`` and ``y``.
"""
if y is None:
y = x
return int(x) + int(y)
|
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(L):
index_str = str(l + 1)
parameters['W' + index_str] -= learning_rate * grads['dW' + index_str]
parameters['b' + index_str] -= learning_rate * grads['db' + index_str]
return parameters
|
def _repeat_f(repetitions, f, args):
"""Auxiliary function for function par_search()
This function executes ``f(*args)`` repeatedly (at most
``repetitions`` times) until the return value is a result
differnt from None. It returns the pair ``(result, n)``,
where ``result`` is the result of the last execution of
``f``, and ``n`` is the number of ececutions of `f``.
"""
for i in range(repetitions):
result = f(*args)
if result is not None:
return i+1, result
return repetitions, None
|
def _wrap_string_values(_where_value):
"""This function wraps values going in the WHERE clause in single-quotes if they are not integers.
.. versionchanged:: 3.4.0
Renamed the function to adhere to PEP8 guidelines.
:param _where_value: The value to be evaluated and potentially wrapped in single-quotes
:returns: The value in int or string format
"""
try:
_where_value = int(_where_value)
except (TypeError, ValueError):
_where_value = f"'{_where_value}'"
return _where_value
|
def filter_data_points(x, y, xmin, xmax, ymin, ymax):
"""
Remove points outside the axes limits
"""
xnew, ynew = [], []
for xi, yi in zip(x, y):
if xi is None or yi is None:
continue
elif xmin <= xi <= xmax and ymin <= yi <= ymax:
xnew.append(xi)
ynew.append(yi)
return xnew, ynew
|
def determine_edge_label_by_layertype(layer, layertype):
"""Define edge label based on layer type
"""
edge_label = '""'
return edge_label
|
def double_quote(text):
"""Safely add double quotes around given text e.g. to make PostgreSQL identifiers.
Identical to pglib's `PQescapeInternal` with `as_ident = True`. See:
https://github.com/postgres/postgres/blob/master/src/interfaces/libpq/fe-exec.c#L3443
That also makes it the same as the PostgreSQL function `quote_ident`, but
with the difference that it is always quoted.
Assume Python is working with the same string encoding as PostgreSQL e.g.
both UTF8.
"""
text = str(text)
quote = '"'
opening = quote
closing = quote
text = text.replace(quote, quote * 2) # Double up every quote
return f"{opening}{text}{closing}"
|
def memcached_connection(run_services, memcached_socket):
"""The connection string to the local memcached instance."""
if run_services:
return 'unix:{0}'.format(memcached_socket)
|
def hexi(WIDTH, INT):
"""
Converts int to hex with padding
"""
assert WIDTH > 0
return ('%0' + str(WIDTH) + 'X') % INT
|
def _concatenate(*args):
"""
Reduces a list of a mixture of elements and lists down to a single list.
Parameters
----------
args : tuple of (object or list of object)
Returns
-------
list
"""
lst = []
for arg in args:
if isinstance(arg, list):
for elem in arg:
lst.append(elem)
else:
lst.append(arg)
return lst
|
def leastload(loads):
"""Always choose the lowest load.
If the lowest load occurs more than once, the first
occurance will be used. If loads has LRU ordering, this means
the LRU of those with the lowest load is chosen.
"""
return loads.index(min(loads))
|
def show_exercises(log, date):
"""Show all exercises for given workout date"""
all_exercises_during_workout = []
for item in log:
if item["date"] == date:
for k, _ in item["exercises"].items():
all_exercises_during_workout.append(k)
return all_exercises_during_workout
|
def hailstone(n):
"""Print out the hailstone sequence starting at n, and return the
number of elements in the sequence.
>>> a = hailstone(10)
10
5
16
8
4
2
1
>>> a
7
"""
# similar to hw01, but asked for a recursive solution here
print(n)
if n == 1:
return 1
if n % 2 == 0:
return 1 + hailstone(n // 2)
else:
return 1 + hailstone(n * 3 + 1)
|
def equation_id(equation):
"""This function determines which kind of equation has been entered.
It analyses the equation with each possible operator to find a match. It then isolates the numbers
and sends them to the corresponding functions. These functions return the solution, and this function
returns that to the main method."""
# Strips spaces and commas from entry
equation = equation.replace(' ','')
equation = equation.replace(',','')
# Error message if equation input is invalid
error_message = 'The solution cannot be calculated due to an invalid input.'
# This dictionary includes the symbols, and the tuple holds lambdas.
symbols = {0:'+', 1:'-', 2:'**', 3:'/', 4:'*'}
functions = (
(lambda num1,num2: num1 + num2),
(lambda num1,num2: num1 - num2),
(lambda num1,num2: num1 ** num2),
(lambda num1,num2: num1 / num2),
(lambda num1,num2: num1 * num2)
)
# Checks if question is remainder
if equation[1:].find('//') != -1:
# isolates numbers and converts to float
try:
num1, num2 = equation.split('//')
num1 = float(num1)
num2 = float(num2)
except ValueError:
return error_message
# Calculate quotient and remainder
quotient = num1 // num2
remainder = num1 % num2
#Format the solution
return 'The quotient is {} and the remainder is {:.8f}.'.format(int(quotient),remainder)
# Iterates through possible equations except remainder
for s in symbols:
if equation[1:].find(symbols[s]) != -1: # An operator has been located (avoids first character due to negatives)
# Isolates the numbers and converts to float
try:
num1, num2 = equation.split(symbols[s])
num1 = float(num1)
num2 = float(num2)
except ValueError:
return error_message
# Calculate solution and format
return 'The solution is {:.8f}.'.format(functions[s](num1,num2))
return error_message
|
def split_tag(image_name):
"""Split docker image by image name and tag"""
image = image_name.split(":", maxsplit=1)
if len(image) > 1:
image_repo = image[0]
image_tag = image[1]
else:
image_repo = image[0]
image_tag = None
return image_repo, image_tag
|
def single_or(l):
"""Return True iff only one item is different than ``None``, False otherwise.
Note that this is not a XOR function, according to the truth table of the XOR
boolean function with n > 2 inputs. Hence the name ``single_or``."""
# No
i = iter(l)
return any(i) and not any(i)
|
def move_items_back(garbages):
"""
Moves the items/garbage backwards according to the speed the background is moving.
Args:
garbages(list): A list containing the garbage rects
Returns:
garbages(list): A list containing the garbage rects
"""
for garbage_rect in garbages: # Loops through the garbages
garbage_rect.centerx -= 2 # Decrease the centerx coordinate by 2
return garbages
|
def normalize(item):
"""
lowercase.
"""
item = item.lower()
return item
|
def unflatten_mapping(mapping):
"""Unflatten a dict with dot-concatenated keys to a dict of dicts
Examples
--------
>>> x = {'a1.b1.c1': 1,
... 'a1.b2': 2,
... 'a2.b1': 3}
>>> unflatten_mapping(x) # doctest: +SKIP
{'a1': {'b1': {'c1': 1},
'b2': 2},
'a2': {'b1': 3}}
"""
out = {}
for k, v in mapping.items():
keys = k.split('.')
o = out
for k2 in keys[:-1]:
o = o.setdefault(k2, {})
o[keys[-1]] = v
return out
|
def is_between(value, min_value, max_value):
""" (number, number, number) -> bool
Precondition: min_value <= max_value
Return True if and only if value is between min_value and max_value,
or equal to one or both of them.
>>> is_between(1.0, 0.0, 2)
True
>>> is_between(0, 1, 2)
False
"""
# Students are to complete the body of this function, and then put their
# solutions for the other required functions below this function.
return min_value <= value <= max_value
|
def generate_module_dictionary(category_to_process, data):
"""
This Function generates module dictionary from the JSON.
We select specific modules we need to process, and use them.
Module indices are mention in the list 'category_to_process'
'data' is the json, which we use to extract these parameters.
Returns key/value pair information
key : ModuleName
Value :
:param category_to_process:
:param data:
:return:
"""
module_name = dict()
for item in category_to_process:
for key in data['beans'][item]:
if key == 'name':
try:
if 'type' in str(data['beans'][item][key]):
module_name[item] = str(data['beans'][item][key]).split('=')[1]
elif 'name' in str(data['beans'][item][key]):
module_name[item] = str(data['beans'][item][key]).split('name=')[1]
except:
print("Some Error Occurred in module_gen - But will continue for other modules")
continue
return module_name
|
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape do not match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return tuple(shape), ''.join(axes)
|
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
|
def time_to_string(secondsInput):
"""Convert time in seconds to a string description."""
string = ""
prior = False # Used to ensure if the preceding period has a value that the following are all included.
# Creates the desired time calculations
days = secondsInput / (24 * 3600)
hours = (secondsInput % (24 * 3600)) // 3600
minutes = secondsInput % 3600 // 60
seconds = secondsInput % 60
if days >= 2:
string += "{:0.0f} days ".format(days)
prior = True
elif days == 1:
string += "{:0.0f} day ".format(days)
prior = True
if hours >= 2 or prior:
string += "{:0.0f} hours ".format(hours)
prior = True
elif hours == 1 or prior:
string += "{:0.0f} hour ".format(hours)
prior = True
if minutes >= 2 or prior:
string += "{:0.0f} minutes ".format(minutes)
prior = True
elif minutes == 1 or prior:
string += "{:0.0f} minute ".format(minutes)
prior = True
if seconds >= 2 or prior:
string += "{:0.0f} seconds".format(seconds)
prior = True
elif seconds == 1 or prior:
string += "{:0.0f} second".format(seconds)
prior = True
return string
|
def uniq(string):
"""Removes duplicate words from a string (only the second duplicates).
The sequence of the words will not be changed.
>>> uniq('This is a test. This is a second test. And this is a third test.')
'This is a test. second And this third'
"""
words = string.split()
return ' '.join(sorted(set(words), key=words.index))
|
def quotify(text: str, limit: int = 1024):
"""
Format text in a discord quote, with newline handling
"""
converted = text.strip().replace("\n", "\n> ")
ret = f"> {converted}"
if len(ret) > limit:
ret = ret[: limit - 3] + "..."
return ret
|
def _build_var_list_str(var_names):
"""
Builds the string that contains the list of variables in the parameterized
variable format for SQL statements.
Args:
var_names ([str]): The list of var names, likely the keys in the dict
returned by `_prep_sanitized_vars()`.
Returns:
(str): The single string that contains the list of all variable names
provided in comma-separated format, but as parameterized inputs (i.e.
`%(<>)s` format). An empty string if no var names.
"""
return ', '.join([f'%({v})s' for v in var_names])
|
def build_url_kwargs(url, **kwargs):
"""
Builds a url with query and kwargs
Expects kwargs to be in the format of key=value
:param url:
:param kwargs:
:return:
"""
if len(kwargs) == 0:
return url
url += '?'
for key, val in kwargs.items():
url += key + '=' + val
url += '&'
url = url[:-1]
return url
|
def load_data(data_dir, files):
"""
Loads all references sentences and all hypothesis sentences for all the
given data files in the given directory. Returns two aligned lists.
:param data_dir: the root directory of the transcript files
:param files: a list of file names
:return: references_list, hypothesis_list
"""
refs, hyps = [], []
for file in files:
with open(data_dir + file + '.txt', 'r', encoding = 'utf8') as f:
lines = [line.strip().split('\t') for line in f]
hyps.extend([line[4].split('|')[0] for line in lines])
refs.extend([line[-1] for line in lines])
# Remove all empty hypotheses.
refs, hyps = [r for r, h in zip(refs, hyps) if h], [h for h in hyps if h]
return refs, hyps
|
def groupby_tx(exons, sambamba=False):
"""Group (unordered) exons per transcript."""
transcripts = {}
for exon in exons:
if sambamba:
ids = zip(exon['extraFields'][-3].split(','),
exon['extraFields'][-2].split(','),
exon['extraFields'][-1].split(','))
else:
ids = exon['elements']
elements = {}
for tx_id, gene_id, symbol in ids:
elements[tx_id] = dict(symbol=symbol, gene_id=gene_id)
exon['elements'] = elements
for transcript_id in exon['elements']:
if transcript_id not in transcripts:
transcripts[transcript_id] = []
transcripts[transcript_id].append(exon)
return transcripts
|
def add_toffoli_to_line(local_qasm_line, qubit_1, qubit_2, target_qubit):
"""
Add a single Toffoli gate application to the given line of qasm.
Args:
local_qasm_line: The line of qasm
qubit_1: The first control qubit
qubit_2: The second control qubit
target_qubit: The target qubit
Returns: The same line of qasm with the Toffoli gate added in parallel
"""
single_application = "Toffoli q[{}],q[{}],q[{}]".format(qubit_1, qubit_2, target_qubit)
# if the line is not empty, we need to consider what's already present
if local_qasm_line != "":
# a bracket indicates this line is parallelized with the { gate_1 | gate_2 | gate_3 } syntax
if "{" in local_qasm_line:
# remove } from the line and add it back at the end
local_qasm_line = local_qasm_line.rstrip("}| \n") + \
" | " + \
single_application + \
"}\n"
# no bracket means we have to add the parallelization syntax ourselves
else:
local_qasm_line = "{" + local_qasm_line.rstrip("\n") + \
" | " + \
single_application + "}\n"
# else, if the line IS empty, we can just put this gate in directly
else:
local_qasm_line = single_application + "\n"
return local_qasm_line
|
def is_html_like(text):
"""
Checks whether text is html or not
:param text: string
:return: bool
"""
if isinstance(text, str):
text = text.strip()
if text.startswith("<"):
return True
return False
return False
|
def validate(raw):
""" Checks the content of the data provided by the user.
Users provide tickers to the application by writing them into a file
that is loaded through the console interface with the <load filename>
command.
We expect the file to be filled with coma separated tickers :class:`string`.
Parameters:
- `raw` : :class:`string` content of the user provided file.
The function strips the raw data from spaces, carrier returns and split the
content around comas. It will also check if there are trailing comas or if
the user mistakenly put two comas instead of one between tickers.
Returns a :class:`list` of sanitized tickers
"""
tickers = []
raw = raw.replace(' ', '') # remove spaces
raw = raw.replace('\n', '') # removes cr
for item in raw.split(','): # comma split
if item is not '':
tickers.append(str(item).upper())
return tickers
|
def scan_for_agents(do_registration=True):
"""Identify and import ocs Agent plugin scripts. This will find all
modules in the current module search path (sys.path) that begin
with the name 'ocs_plugin\_'.
Args:
do_registration (bool): If True, the modules are imported,
which likely causes them to call register_agent_class on
each agent they represent.
Returns:
The list of discovered module names.
"""
import pkgutil
import importlib
items = []
for modinfo in pkgutil.iter_modules():
if modinfo.name.startswith('ocs_plugin_'):
items.append(modinfo.name)
if do_registration:
importlib.import_module(modinfo.name)
return items
|
def strictly_decreasing(values):
"""True if values are stricly decreasing."""
return all(x > y for x, y in zip(values, values[1:]))
|
def construct_geometry_filter(filter_type, coords):
"""
:param filter_type: determine whether the Point or Polygon filter is desired
:param coords: either a list of two coordinates, or a list of lists of two coordinates
:return: a geojson geometry filter
"""
filter = {
"type": "GeometryFilter",
"field_name": "geometry",
"config": {
"type": filter_type,
"coordinates": coords
}
}
return filter
|
def swallow(err_type, func, *args, **kwargs):
"""
Swallow an exception.
swallow(KeyError, lambda: dictionary[x])
vs
try:
dictionary[x]
except KeyError:
pass
"""
try:
return func(*args, **kwargs)
except err_type:
pass
|
def valid_bidi(value):
"""
Rejects strings which nonsensical Unicode text direction flags.
Relying on random Unicode characters means that some combinations don't make sense, from a
direction of text point of view. This little helper just rejects those.
"""
try:
value.encode("idna")
except UnicodeError:
return False
else:
return True
|
def _percent_identity(a, b):
"""Calculate % identity, ignoring gaps in the host sequence
"""
match = 0
mismatch = 0
for char_a, char_b in zip(list(a), list(b)):
if char_a == '-':
continue
if char_a == char_b:
match += 1
else:
mismatch += 1
if match + mismatch == 0:
return 0
return 100 * float(match) / (match + mismatch)
|
def epsilon(n):
"""
Compute Jacobi symbol (5/n).
"""
if n % 5 in [1, 4]:
return 1
elif n % 5 in [2, 3]:
return -1
else:
return 0
|
def initialize_counts(nS, nA):
"""Initializes a counts array.
Parameters
----------
nS: int
Number of states
nA: int
Number of actions
Returns
-------
counts: np.array of shape [nS x nA x nS]
counts[state][action][next_state] is the number of times that doing "action" at state "state" transitioned to "next_state"
"""
counts = [[[0 for _ in range(nS)] for _ in range(nA)] for _ in range(nS)]
return counts
|
def is_tachycardic(heart_rate, patient_age): # test
"""Checks to see if heart rate is tachycardic considering age
Args:
heart_rate (int): heart rate of specified patient
patient_age (int): age of specified patient
Returns:
str: tachycardic or not tachycardic
"""
if 1 <= patient_age <= 2:
threshold = 151
elif 3 <= patient_age <= 4:
threshold = 137
elif 5 <= patient_age <= 7:
threshold = 133
elif 8 <= patient_age <= 11:
threshold = 130
elif 12 <= patient_age <= 15:
threshold = 119
else:
threshold = 100
if heart_rate > threshold:
return "tachycardic"
else:
return "not tachycardic"
|
def km_to_mile(km):
"""
Converts Kilometers to Miles
"""
try:
return float(km) / 1.609344
except ValueError:
return None
|
def compute_x_mod_power_of_2(x, k):
"""return x % (2^k) in O(1)
77 % 8 = 5
100 1101 = 77
8 = 2^3. so 77/8 is 77 right shift 3 bits.
the "shifted off" 3 bits is the remainder
"""
return x & ((1 << k) - 1)
|
def write(triple, writer):
"""Write a file using the input from `gentemp` using `writer` and return
its index and filename.
Parameters
----------
triple : tuple of int, str, str
The first element is the index in the set of chunks of a file, the
second element is the path to write to, the third element is the data
to write.
Returns
-------
i, filename : int, str
File's index and filename. This is used to return the index and
filename after splitting files.
Notes
-----
This could be adapted to write to an already open handle, which would
allow, e.g., multipart gzip uploads. Currently we open write a new file
every time.
"""
i, filename, data = triple
with writer(filename, mode='wb') as f:
f.write(data)
return i, filename
|
def get_final_aggregation(thing_list, operation):
"""Generate the HTTP response content according to the operation and the result thing list
Args:
thing_list(list): the list of thing description
operation(str): one of the five aggregation operations
Returns:
dict: formatted result containing the aggregation data
"""
if operation != "COUNT" and len(thing_list) == 0:
return {"operation": operation, "result": "unknown"}
result = {"operation": operation}
if operation == "COUNT":
result["result"] = len(thing_list)
elif operation == "MIN":
result["result"] = min([thing_description["_query_data"] for thing_description in thing_list])
elif operation == "MAX":
result["result"] = max([thing_description["_query_data"] for thing_description in thing_list])
elif operation == "AVG":
result["result"] = sum([thing_description["_query_data"] for thing_description in thing_list]) / len(thing_list)
elif operation == "SUM":
result["result"] = sum([thing_description["_query_data"] for thing_description in thing_list])
return result
|
def find_closest_point(p, qs):
"""
Return in index of the closest point in 'qs' to 'p'.
"""
min_dist = None
min_i = None
for i, q in enumerate(qs):
dist = abs(q-p)
if min_dist is None or dist < min_dist:
min_dist = dist
min_i = i
return min_i
|
def build_deed_url(license_code, version, jurisdiction_code, language_code):
"""
Return a URL to view the deed specified by the inputs. Jurisdiction
and language are optional.
language_code is a CC language code.
"""
# UGH. Is there any way we could do this with a simple url 'reverse'? The URL regex would
# be complicated, but we have unit tests to determine if we've got it right.
# See test_templatetags.py.
# https://creativecommons.org/licenses/by-sa/4.0/
# https://creativecommons.org/licenses/by-sa/4.0/deed.es
# https://creativecommons.org/licenses/by/3.0/es/
# https://creativecommons.org/licenses/by/3.0/es/deed.fr
if jurisdiction_code:
if language_code == "en" or not language_code:
return f"/licenses/{license_code}/{version}/{jurisdiction_code}/"
else:
return f"/licenses/{license_code}/{version}/{jurisdiction_code}/deed.{language_code}"
else:
if language_code == "en" or not language_code:
return f"/licenses/{license_code}/{version}/"
else:
return f"/licenses/{license_code}/{version}/deed.{language_code}"
|
def y_of(square: str) -> int:
"""Returns y coordinate of an algebraic notation square (e.g. 'f4')."""
return int(square[1]) - 1
|
def validate_projectfilesystemlocation_type(projectfilesystemlocation_type):
"""
Validate ProjectFileSystemLocation type property
Property: ProjectFileSystemLocation.Type
"""
VALID_PROJECTFILESYSTEMLOCATION_TYPE = "EFS"
if projectfilesystemlocation_type not in VALID_PROJECTFILESYSTEMLOCATION_TYPE:
raise ValueError(
"ProjectFileSystemLocation Type must be one of: %s"
% ", ".join(VALID_PROJECTFILESYSTEMLOCATION_TYPE)
)
return projectfilesystemlocation_type
|
def filter_case_insensitive(names, complete_list):
"""Filter a sequence of process names into a `known` and `unknown` list."""
contained = []
missing = []
complete_list_lower = set(map(str.lower, complete_list))
if isinstance(names, str):
names = [names, ]
for name in names:
if name.lower() in complete_list_lower:
contained.append(name)
else:
missing.append(name)
return contained, missing
|
def combine_rel_error(poly_rel_err):
"""Returns a simplified expression for a given relative error bound.
The input should be of the type [(Monomial, n)] where n's are integers.
This function multiplies all monomials by corresponding n's and finds
the maximum value of n's.
The result is ([Monomial], int).
"""
err = []
max_n = 0
for (m, n) in poly_rel_err:
if n > max_n:
max_n = n
k = m.copy()
k.c *= n
err.append(k)
return (err, max_n)
|
def split_levels(fields):
"""
Convert dot-notation such as ['a', 'a.b', 'a.d', 'c'] into
current-level fields ['a', 'c'] and next-level fields
{'a': ['b', 'd']}.
"""
first_level_fields = []
next_level_fields = {}
if not fields:
return first_level_fields, next_level_fields
for e in fields:
if '.' in e:
first_level, next_level = e.split('.', 1)
first_level_fields.append(first_level)
next_level_fields.setdefault(first_level, []).append(next_level)
else:
first_level_fields.append(e)
first_level_fields = list(set(first_level_fields))
return first_level_fields, next_level_fields
|
def good_suffix_mismatch(i, big_l_prime, small_l_prime):
"""
Given a mismatch at offset i and given L/L' and l'
:param i: the position of mismatch
:param big_l_prime: L'
:param small_l_prime: l'
:return: the amount of shift
"""
length = len(big_l_prime)
assert i < length
if i == length - 1:
# no place to shift any more
return 0
i += 1 # i points to leftmost matching position of P
# L'[i] > 0, there are something matching the suffix of this P
if big_l_prime[i] > 0:
# move to the place by aligning the substring (matched with suffix) in P
# with i
return length - big_l_prime[i]
# L'[i] = 0
# do not have a suffix-matched substring, aligning the suffix-prefix-match
# if exists otherwise, we will move all the way left
return length - small_l_prime[i]
|
def remove_name_counter(name_in):
"""
Tensorflow adds a counter to layer names, e.g. <name>/kernel:0 ->
<name>_0/kernel:0. Need to remove this _0.
The situation gets complicated because SNN toolbox assigns layer names
that contain the layer shape, e.g. 00Conv2D_3x32x32. In addition,
we may get another underscore in the parameter name, e.g.
00DepthwiseConv2D_3X32x32_0/depthwise_kernel:0.
"""
if '_' not in name_in or '/' not in name_in:
return name_in
split_dash = str(name_in).split('/')
assert len(split_dash) == 2, "Variable name must not contain '/'."
# We are only interested in the part before the /.
split_underscore = split_dash[0].split('_')
# The first '_' is assigned by SNN toolbox and should be kept.
return (split_underscore[0] + '_' + split_underscore[1] + '/' +
split_dash[1])
|
def _strip_major_version(version: str) -> str:
"""
>>> _strip_major_version('1.2.3')
'2.3'
>>> _strip_major_version('01.02.03')
'02.03'
>>> _strip_major_version('30.40')
'40'
>>> _strip_major_version('40')
''
"""
return ".".join(version.split(".")[1:])
|
def format_name(f_name, l_name):
""" This is a docstring. Take the first and last name and format it to return the title case version of the name."""
if f_name == "" or l_name == "":
return "You didn't provide valid inputs." # Whenno valid inputs are given == this exits the function
formated_f_name = f_name.title()
formated_l_name = l_name.title()
return f"Result: {formated_f_name} {formated_l_name}"
|
def quicksort(arr):
"""
>>> quicksort(arr)
[-23, 0, 1, 1, 2, 6, 7, 10, 23, 53, 53, 235, 256]
>>> from string import ascii_letters
>>> quicksort(list(reversed(ascii_letters))) == sorted(ascii_letters)
True
"""
length = len(arr)
if length in (0, 1):
return arr
pi = 0
left = [arr[i] for i in range(length) if arr[i] <= arr[pi] and i != pi]
right = [arr[i] for i in range(length) if arr[i] > arr[pi]]
return quicksort(left) + [arr[pi]] + quicksort(right)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.