content
stringlengths 42
6.51k
|
---|
def get_sum(lst):
"""
Sum the values in the list
:param lst:
:return:
"""
total = 0
for v in lst:
total = total + v
return total
|
def policyNoAction(decstate):
""" No shaping action is taken """
return set([])
|
def map_serial_number(facilities) -> str:
"""Map serial number."""
facility = facilities.get("body", {}).get("facilitiesList", [])[0]
return str(facility.get("serialNumber", None))
|
def calculate_prelog_term(bandwidth, number):
""" Function that calculates the prelog term!
Args:
bandwidth: (int) bandwidth allocated to basestation which is can be exclusive or pooled!
number: (numpy array) (N) which is the number of total users associated to every basestation for the basestation with
which the UE of interest is associated to!
Returns:
prelog_term: (float) prelog_term is the ratio of the bandwidth and number!
"""
prelog_term = bandwidth / number
return prelog_term
|
def clean_symbols(in_text):
""" Convert subscripts/superscripts, symbols, etc to characters
"""
if u'\u00B2' in str(in_text):
return in_text.replace(u'\u00B2', '2')
elif u'\u00B3' in str(in_text):
return in_text.replace(u'\u00B3', '3')
elif u'\u00B0' in str(in_text):
return in_text.replace(u'\u00B0', 'Degrees')
else:
return in_text
|
def _ProcessOwnerLastVisitSD(fmt):
"""Convert a 'ownerlastvisit' sort directive into SQL."""
left_joins = [
(fmt('User AS {alias} ON (Issue.owner_id = {alias}.user_id OR '
'Issue.derived_owner_id = {alias}.user_id)'), [])]
order_by = [
(fmt('ISNULL({alias}.last_visit_timestamp) {sort_dir}'), []),
(fmt('{alias}.last_visit_timestamp {sort_dir}'), [])]
return left_joins, order_by
|
def char_to_ix(chars):
"""
Make a dictionary that maps a character to an index
Arguments:
chars -- list of character set
Returns:
dictionary that maps a character to an index
"""
return {ch: i for i, ch in enumerate(chars)}
|
def is_prime(n):
"""
Checks if n is a prime number
Returns: boolean (True if n is prime, False otherwise)
"""
if n < 2:
return False
# the first prime number
if n == 2:
return True
# all even numbers, except 2, are not prime numbers
if n % 2 == 0:
return False
# primality test
for x in range(3, int(n ** 0.5) + 1, 2):
# dividing n for all numbers in range [3, sqrt(n) + 1]
if n % x == 0:
# n is divisible by x, so n is not a prime
return False
# if we get here, then n is a prime
return True
|
def search_to_url(search: str) -> str:
"""Transform user search terms into ready to use URL"""
search = search.lower().replace(" ", "%20")
return f"https://uk.indeed.com/jobs?q={search}&l=United%20Kingdom"
|
def midi_to_chroma(pitch):
"""Given a midi pitch (e.g. 60 == C), returns its corresponding
chroma class value. A == 0, A# == 1, ..., G# == 11 """
return ((pitch % 12) + 3) % 12
|
def left_index(i):
"""Vrati index leveho potomka prvku na pozici 'i'."""
return i * 2 + 1
|
def merge_dict(d1, d2, merge=lambda x, y: y):
"""
Merges two dictionaries, non-destructively, combining
values on duplicate keys as defined by the optional merge
function. The default behavior replaces the values in d1
with corresponding values in d2. (There is no other generally
applicable merge strategy, but often you'll have homogeneous
types in your dicts, so specifying a merge technique can be
valuable.)
Examples:
>>> d1 = {'a': 1, 'c': 3, 'b': 2}
>>> d2 = merge_dict(d1, d1)
>>> len(d2)
3
>>> [d2[k] for k in ['a', 'b', 'c']]
[1, 2, 3]
>>> d3 = merge_dict(d1, d1, lambda x,y: x+y)
>>> len(d3)
3
>>> [d3[k] for k in ['a', 'b', 'c']]
[2, 4, 6]
"""
if not isinstance(d1, dict):
return merge(d1, d2)
result = dict(d1)
if d2 is None:
return result
for k, v in list(d2.items()):
if k in result:
result[k] = merge_dict(result[k], v, merge=merge)
else:
result[k] = v
return result
|
def from_key_to_line_number(key):
"""
Takes a key and returns the line number in it
:param key: The key to parse
:return: line number
"""
n = key.split(".", 2)[1]
# Sometimes line number contains a redundant "l" at the end ("Q005624.1l" for example), so we ignore it.
if n[-1] == "l":
n = n[:-1]
if not n.isdigit():
return -1
line_number = int(n)
return line_number
|
def parse_file_path(input_file_path):
"""
Parse a file path into components
:param input_file_path:
:return:
"""
import os
dirname, basename = os.path.split(input_file_path)
root, ext = os.path.splitext(basename)
return dirname, basename, root, ext
|
def calculate_handlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
# Store the total length of the hand
hand_len = 0
# For every letter in the hand
for key in hand.keys():
# Add the number of times that letter appears in the hand
# to the variable storing hand length
hand_len += hand[key]
# Return the number of letters in the current hand
return hand_len
|
def remove_custom_words(text, custom_wordlist):
"""Returns A String with the custom wordlist removed """
result = [word for word in text.split() if word.lower() not in custom_wordlist]
return " ".join(result)
|
def get_insert_query(table_name: str) -> str:
"""Build a SQL query to insert a RDF triple into a MVCC-PostgreSQL table.
Argument: Name of the SQL table in which the triple will be inserted.
Returns: A prepared SQL query that can be executed with a tuple (subject, predicate, object).
"""
return f"INSERT INTO {table_name} (subject, predicate, object, insert_t, delete_t) VALUES (%s, %s, %s, transaction_timestamp(), 'infinity'::timestamp) ON CONFLICT DO NOTHING"
|
def percentify(value : float) -> str:
"""
"""
return '{:.2f} %'.format(value * 100.0)
|
def union(arr1, arr2):
"""
Time Complexity : O(n)
Space Complexity : O(n)
"""
return len(set(arr1 + arr2))
|
def escape_url(raw):
"""
Escape urls to prevent code injection craziness. (Hopefully.)
"""
from urllib.parse import quote
return quote(raw, safe='/#:')
|
def findUTMZone(lon, lat):
"""Find utm zone for lon and lat values.
lon -180 -- -174 -> 1 ... 174 -- 180 -> 60
lat < 0 hemisphere = S, > 0 hemisphere = N
Parameters
----------
lon : float
lat : float
Returns
-------
str :
zone + hemisphere
"""
zone = int((int(lon) + 180) / 6) + 1
if lat > 0:
return str(zone) + 'N'
return str(zone) + 'S'
|
def dist2(x1, y1, x2, y2):
"""
Computes Euclidean distance squared
"""
return (x2 - x1)**2 + (y2 - y1)**2
|
def _combined_model_name(org, repo=None):
"""Return the name of the combined model for a repo or organization.
If repo is specified looks for a repo specific model. If repo is
none we return an org wide model.
Args:
org: Name of the org.
repo: (Optional) The name of the repo
"""
if repo:
return f"{org}/{repo}_combined"
return f"{org}_combined"
|
def editFile(linesIn):
"""
This function carries out the automatic file editing, adjusting lines in
Tapenade generated code as needed.
Parameters
----------
linesIn : list of string
Lines of original source code to be parsed
Returns
-------
linesOut : list of string
Lines of updated code returned
"""
iLine = 0
linesOut = []
# Fix Lines
while iLine < len(linesIn):
linesOut.append(linesIn[iLine])
iLine += 1
return linesOut
|
def add_link_(s, addee):
"""
Returns <a href="s">addee</a>
For example, <a href="page_url">page_label</a>
"""
if s.startswith('http'):
a = '<a href="{0}" target="_blank">'.format(s)
a += addee
a += '</a>'
return a
|
def compare_version(version_req, current_version):
""" Performs version comparison to report if host is already at required
version
Args:
version_req: Version that want to be at
current_version: Version that host is currently at
Returns:
True if current_version >= version_req
False otherwise
Raises:
ValueError: if version is in unsupported format
"""
if current_version == None:
return False
if version_req == None:
return True
if current_version == version_req:
return True
# Split by .
current_vers = current_version.split(".")
req_vers = version_req.split(".")
# Will loop checking values of each sub-part, so as to cope with
# comparing 2.1.1 to 2.2, will loop which ever is shorter
num_loops = len(current_vers)
if len(req_vers) < num_loops:
num_loops = len(req_vers)
# Now go through each index
for index in range(num_loops):
if int(current_vers[index]) < int(req_vers[index]):
# Current is less than required, so return False
return False
elif int(current_vers[index]) > int(req_vers[index]):
# Current is greater than required, so return True
return True
# else we are at same, so need to go onto next index to compare
# So so far we are at the same version, but that might mean we have
# compared 2.1.1 with 2.1 so still need more checks
if len(current_vers) > len(req_vers):
# We were same until stopped checking, but current has more
# values then required, e.g. 2.1.1 compared to 2.1, so return True
return True
elif len(req_vers) > len(current_vers):
# We were same until stopped checking, but required has more
# values then required, e.g. 2.1 compared to 2.1.1, so return False
return False
else:
# We must be exact match!
return True
|
def _filter_out_underscore(object_name):
"""Since class names don't allow underscore in names, we're parsing them
out"""
return object_name if "_" not in object_name \
else object_name.replace("_", "")
|
def arctan(talj, kvot, product):
"""Compute arctangent using a series approximation"""
summation = 0
talj *= product
qfactor = 1
while talj:
talj //= kvot
summation += (talj // qfactor)
qfactor += 2
return summation
|
def list_to_dict(list_to_convert: dict, key_name: str):
"""
USed to convert a list of dictionaries to a dictionary using some common properties (i.e. name)
Careful as data will be lost for duplicate entries, this assumes the list is a "set".
:param list_to_convert: A list of dictionaries
:param key_name: A value from each dict to use as the key.
:return: A dictionary.
"""
converted_dict = dict()
if list_to_convert:
for item in list_to_convert:
converted_dict[item[key_name]] = item
return converted_dict
|
def add_prefix(dct, prefix, sep="/"):
"""
Add a prefix to all keys in `dct`.
:param dct: Input dict
:type dct: dict
:param prefix: Prefix
:type prefix: str
:param sep: Separator between prefix and key
:type sep: str
:return: Dict with prefix prepended to all keys
:rtype: dict
"""
return {prefix + sep + key: value for key, value in dct.items()}
|
def hamming_str(str1, str2):
"""Count the # of differences between equal length strings str1 and str2"""
diffs = 0
for ch1, ch2 in zip(str1, str2):
if ch1 != ch2:
diffs += 1
#
#
return diffs
|
def my_isnumeric(value: str):
"""
Determines if a string can be converted to a number.
"""
try:
float(value)
except ValueError:
return False
return True
|
def find_first_duplicate(arr):
"""
Return the number for which the second occurrence has a smaller index
than the second occurrence of the other number does. If there are no
such elements, return -1.
"""
if len(arr) == len(set(arr)):
return -1
# for each value in the list:
# i check if I saw the value before if i see the value before
# check if value is not equal to len(arr) that means we already grabbed the 2nd
# occurance of the element I add it to the dictionary with the current
# index if it is the 1st occurance of the elem set the value equal to none
ind_set = set()
for elem in arr:
if elem in ind_set:
return elem
else:
ind_set.add(elem)
# # finds the min value and returns the key associated with that value
# return min(ind_dict, key=ind_dict.get)
|
def convert_ann_to_seq_label(ann):
"""Convert structure annotation with integer time positions (given in indices)
into label sequence
Args:
ann: Annotation (list [[s,t,'label'], ...], with s,t being integers)
Returns:
X: Sequencs of labels
"""
X = []
for seg in ann:
K = seg[1] - seg[0]
for k in range(K):
X.append(seg[2])
return X
|
def tuple_set(value):
"""
Serializes odml style tuples to a string representation.
:param value: odml style tuple values.
:return: string.
"""
if not value:
return None
return "(%s)" % ";".join(value)
|
def get_mask_ids(tokens, max_seq_length):
"""attention Mask id for padding 1 for original 0 for padded"""
if len(tokens)>max_seq_length:
raise IndexError("Token length more than max seq length!")
return [1]*len(tokens) + [0] * (max_seq_length - len(tokens))
|
def _specs(iport, entity, features):
"""Args:
iport {} -- [description]
entity {[type]} -- [description]
features {[type]} -- [description]
Returns:
[type] -- [description]
"""
return {"import": iport, "features": features, "entity": entity}
|
def is_control_char(c):
"""Return True if 'c' is a control character.
c is considered a control character if
it is outside of the extended ASCII set or
has a code below 32 with some exclusions.
An ASCII compatible character set is assumed.
"""
charcode = 0
# The following assignment
# should make this module compatible with
# at least Python 2.7 (tested on 2.7.9).
try:
charcode = ord(c)
except TypeError:
charcode = c
excludes = ("\t", "\r", "\n")
if charcode in [ord(char) for char in excludes]:
return False
return (charcode < 32 or
charcode > 255)
|
def default_colors_to_resets(s):
"""Hack to make sphinxcontrib.ansi recognized sequences"""
return s.replace(b"[39m", b"[0m").replace(b"[49m", b"[0m")
|
def is_empty(in_str):
""" return empty string with default value "na"
"""
if len(str(in_str)) > 0:
return str(in_str)
else:
return "na"
|
def parse_int(m, default = 0):
"""parse_int
:param m:
:param default:
"""
if type(m) == int:
return m
elif type(m) == str:
try:
return int(m)
except:
return default
else:
raise Exception('error input %s, cannot parse' % m)
|
def reverse(domain):
"""Reverse domain name
Usable for reverse lookups when the IP address should be reversed
"""
return '.'.join([a for a in domain.split(".")][::-1])
|
def dump_observations(profiles):
"""
Dump observations data into json format accepted by the plotting library
:return: list of dicts representing observations in the profiles
"""
data = []
for profile in profiles:
for i, yhat in enumerate(profile.new_observation_predictions):
d = dict(zip(profile.all_variable_names, profile.new_observation.iloc[i]))
d['_yhat_'] = yhat
d['_label_'] = profile._label
d['_ids_'] = i
d['_y_'] = profile.new_observation_true[i] if profile.new_observation_true is not None else None
data.append(d)
return data
|
def front_pad_string(string, length: int = 2, char: str = "0") -> str:
""" Front pad a string with a certain char if it isn't already at least a given length long. """
string = str(string) # Cast whatever this is into a string.
while len(string) < length:
string = char + string
return string
|
def target_pattern_to_path(target_pattern):
"""
Converts a bazel target pattern to a directory path.
For example:
//projects/libs/servicelibs/srpc/srpc-api:srpc-api -> projects/libs/servicelibs/srpc/srpc-api
//projects/libs/servicelibs/srpc/... -> projects/libs/servicelibs/srpc
"""
if target_pattern.startswith("//"):
target_pattern = target_pattern[2:]
elif target_pattern.startswith("/"):
target_pattern = target_pattern[1:]
if ":" in target_pattern:
target_pattern = target_pattern[0:target_pattern.rfind(":")]
if target_pattern.endswith("/..."):
target_pattern = target_pattern[0:-4]
return target_pattern
|
def ngrams(words, n=1):
"""
Return all the `n`-grams in the list `words`. The n-grams are
returned as a list of tuples, each tuple containing an n-gram, as
per the description in `candidate_answers`.
"""
return [tuple(words[j:j + n]) for j in range(len(words) - n + 1)]
|
def imagej_shape(shape, rgb=None):
"""Return shape normalized to 6D ImageJ hyperstack TZCYXS.
Raise ValueError if not a valid ImageJ hyperstack shape.
>>> imagej_shape((2, 3, 4, 5, 3), False)
(2, 3, 4, 5, 3, 1)
"""
shape = tuple(int(i) for i in shape)
ndim = len(shape)
if 1 > ndim > 6:
raise ValueError("invalid ImageJ hyperstack: not 2 to 6 dimensional")
if rgb is None:
rgb = shape[-1] in (3, 4) and ndim > 2
if rgb and shape[-1] not in (3, 4):
raise ValueError("invalid ImageJ hyperstack: not a RGB image")
if not rgb and ndim == 6 and shape[-1] != 1:
raise ValueError("invalid ImageJ hyperstack: not a non-RGB image")
if rgb or shape[-1] == 1:
return (1, ) * (6 - ndim) + shape
else:
return (1, ) * (5 - ndim) + shape + (1,)
|
def isCollision(x: int, y: int, wallBuf: list) -> bool:
"""Check of Collision bird to wall"""
for wall in wallBuf:
if x in range(wall[0], wall[0]+40): # bird in location of wall
if not y in range(wall[1]-100, wall[1]): # bird not in space between top and buttom wall (Collision)
return True
return False
|
def handle_count(data):
"""
Handle tick-count action.
:param data: dict
:return:
"""
try:
print("Data: {}".format(data))
# Key was not found, so create new key
if data["status"] == "error":
return {"cmd": "put", "msg": 'put\ntick\n{"count": 0}\n'}
# Handle only to messages which has value and count keys
# If those keys are not found, then ask tick-key again
if "value" in data and "count" in data["value"]:
current_count = int(data["value"]["count"])
# return {"cmd": "tick", "msg": 'put\ntick\n{"count": '+str(current_count+1)+'}\n'}
return {"cmd": "tick", "msg": current_count}
else:
return {"cmd": "get", "msg": "get\ntick\n"}
except KeyError:
raise KeyError("Server sent unknown message")
|
def decode(tuple_):
"""Decode tuple_'s first and second elements and XOR them."""
first = int(tuple_[0], base=16)
second = int(tuple_[1], base=16)
return '{:x}'.format(first ^ second)
|
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
|
def quotient(x, y, *args, **kwds):
"""
Return the quotient object x/y, e.g., a quotient of numbers or of a
polynomial ring x by the ideal generated by y, etc.
EXAMPLES::
sage: quotient(5,6)
5/6
sage: quotient(5.,6.)
0.833333333333333
sage: R.<x> = ZZ[]; R
Univariate Polynomial Ring in x over Integer Ring
sage: I = Ideal(R, x^2+1)
sage: quotient(R, I)
Univariate Quotient Polynomial Ring in xbar over Integer Ring with modulus x^2 + 1
"""
try:
return x.quotient(y, *args, **kwds)
except AttributeError:
return x / y
|
def minimum(numbers):
"""Find the min using an iterable of numbers
Return None if the iterable is empty
"""
if not numbers:
return None
min_number = numbers[0]
for number in numbers:
min_number = number if number < min_number else min_number
return min_number
|
def organize_by_chromosome(genes, transcripts):
""" Iterate through genes and transcripts and group them by chromosome """
gene_dict = {}
transcript_dict = {}
for ID in genes:
gene = genes[ID]
chromosome = gene.chromosome
if chromosome not in gene_dict:
chrom_genes = {}
chrom_genes[ID] = gene
gene_dict[chromosome] = chrom_genes
gene_dict[chromosome][ID] = gene
for ID in transcripts:
transcript = transcripts[ID]
chromosome = transcript.chromosome
if chromosome not in transcript_dict:
chrom_transcripts = {}
chrom_transcripts[ID] = transcript
transcript_dict[chromosome] = chrom_transcripts
transcript_dict[chromosome][ID] = transcript
transcript_dict[chromosome][ID] = transcript
return gene_dict, transcript_dict
|
def field_is_empty(field):
"""check for empty field value, post str.strip"""
#FIXME: field_is_empty not valid response on field bool=True !
if field \
and str(field) != '' \
and str(field).strip() != '' :
empty = False
else:
empty = True
return empty
|
def str2dicts(data):
"""
Create a list of dictionaries from a whitespace and newline delimited text.
For example, this:
cpu 1100
ram 640
cpu 2200
ram 1024
becomes:
[{'cpu': '1100', 'ram': '640'}, {'cpu': '2200', 'ram': '1024'}]
"""
list_data = []
list_data.append({})
d = list_data[-1]
lines = data.split('\n')
for line in lines:
line = line.strip()
if not line:
d = {}
list_data.append(d)
d = list_data[-1]
continue
whitespace = line.find(' ')
if not whitespace:
continue
key = line[0:whitespace]
value = line[whitespace + 1:]
d.update({key: value})
list_data = [val for val in list_data if val != {}]
return list_data
|
def _get_eeg_sfreq(stream: dict) -> int:
"""Retrieve the nominal sampling rate from the stream."""
return int(stream["info"]["nominal_srate"][0])
|
def testNumeric(var):
"""
test if var is numeric
:params var: any variable
:returns: true if var is int or float, false otherwise
"""
if isinstance(var, int):
return True
elif isinstance(var, float):
return True
else:
return False
|
def compute_triangle_vol(node_coords):
"""Compute triangle volume."""
# Shoelace formula.
return 0.5 * abs(
node_coords[0][0] * node_coords[1][1]
- node_coords[0][1] * node_coords[1][0]
+ node_coords[1][0] * node_coords[2][1]
- node_coords[1][1] * node_coords[2][0]
+ node_coords[2][0] * node_coords[0][1]
- node_coords[2][1] * node_coords[0][0]
)
|
def format_gross_range(param, sensor_range, user_range, site, node, sensor, stream, source):
"""
Creates a dictionary object that can later be saved to a CSV formatted
file for use in the Gross Range lookup tables.
:param param: parameter name of the variable for the calculated user range
:param sensor_range: default sensor, or fail range, usually referenced
from the vendor documentation
:param user_range: user range, or sensor range, calculated from the data
:param site: Site designator, extracted from the first part of the reference
designator
:param node: Node designator, extracted from the second part of the reference
designator
:param sensor: Sensor designator, extracted from the third and fourth part of
the reference designator
:param stream: Stream name that contains the data of interest
:param source: Notes or comments about how the Gross Range values were
obtained
:return qc_dict: dictionary with the sensor and user gross range values
added in the formatting expected by the QC lookup tables
"""
# create the dictionary
qc_dict = {
'subsite': site,
'node': node,
'sensor': sensor,
'stream': stream,
'parameter': {
'inp': param
},
'qcConfig': {
'qartod': {
'gross_range_test': {
'suspect_span': ['{}'.format(user_range[0]), '{}'.format(user_range[1])],
'fail_span': ['{}'.format(sensor_range[0]), '{}'.format(sensor_range[1])]
}
}
},
'source': source
}
return qc_dict
|
def count_items(dicionario):
"""Totaliza a quantidade de valores diferentes e totaliza a quantidade de ocorrencias de todos eles.
INPUT:
data: dicionario da coluna
ex: {'foo11' : 1, 'foo22' : 2}
OUTPUT:
answer: tolizador dos valores e ocorrencias.
ex: 3, 10000
"""
item_types = []
count_items = []
for chave in dicionario:
valor = dicionario[chave];
item_types.append(chave)
count_items.append(valor)
print(sum(count_items))
return item_types, count_items
|
def to_int(rating_count):
""" Return rating count as an int """
if isinstance(rating_count, int):
return rating_count
else:
rating_count = rating_count.split()[0]
if ',' in rating_count:
return int(rating_count.replace(',', ''))
return int(rating_count)
|
def sign_string(value):
"""
Return a string representing the sign of a numerical value, "+" for positive, "-" for negative,
and "+/-" for 0.
:param value: A numerical value.
:return: The sign of that value as a string.
"""
return "+" if value > 0 else "-" if value < 0 else "+/-"
|
def run_threaded(fx, *args, **kwargs):
"""
Helper function to run a function in a separate thread
:param fx: the function to run in a separate thread
:param args: list arguments to pass to fx
:param kwargs: dictionary keyword arguments to pass to fx
:return: whatever fx returns
"""
return fx(*args, **kwargs)
|
def lerp(a,b,t):
""" Linear interpolation between from @a to @b as @t goes between 0 an 1. """
return (1-t)*a + t*b
|
def _convert_camel_to_pascal(camel_case_string: str) -> str:
"""
Convert a string provided in camelCase to PascalCase
"""
return camel_case_string[:1].upper() + camel_case_string[1:]
|
def get_additive_lane_count(non_zero_width_fields, side_filter=None):
"""This function will determine the number of lanes on a street given a list of tuples with additive field names
and values. If a side is specified ("left" or "right"), it will only return the lane count for that side.
:param - non_zero_width_fields - a list of tuples in the form [(field_name,field_value),...]
:param - side_filter = either "right" or "left". Filters count based ont hat. """
lane_count = 0
through_lane_check = "Through_Lane"
if side_filter is None:
pass
elif side_filter == "right":
through_lane_check = "Right_" + through_lane_check
else:
through_lane_check = "Left_" + through_lane_check
through_lanes = [slice_tuple for slice_tuple in non_zero_width_fields if
through_lane_check in slice_tuple[0] and slice_tuple[1] > 0]
lane_count = len(through_lanes)
return lane_count
|
def get_tolerance_min_max(value, expected_tolerance):
"""
Get minimum and maximum tolerance range
Args:
value(int): value to find minumum and maximum range
expected_tolerance ('int'): Expected tolerance precentage
Returns:
minimum and maximum value of tolerance
"""
# Convert it to int
value = int(value)
# Expected tolerance %
tolerance = (value * expected_tolerance) / 100
# Minimum tolerance value
min_value = abs(value - tolerance)
# Maximum tolerance value
max_value = abs(value + tolerance)
return (min_value, max_value)
|
def Swap(xs, **unused_kwargs):
"""Swaps two elements."""
return (xs[1], xs[0])
|
def find_longest_name(benchmark_list):
"""
Return the length of the longest benchmark name in a given list of
benchmark JSON objects
"""
longest_name = 1
for bc in benchmark_list:
if len(bc['name']) > longest_name:
longest_name = len(bc['name'])
return longest_name
|
def readonly_date_field_as_table_row(label, value):
"""Prints a field value formatted as the given format string.
"""
import datetime
if isinstance(value, datetime.datetime):
if value.day % 10 == 1 and value.day != 11:
ord_suf = 'st'
elif value.day % 10 == 2 and value.day != 12:
ord_suf = 'nd'
elif value.day % 10 == 3 and value.day != 13:
ord_suf = 'rd'
else:
ord_suf = 'th'
fmt = "%d" + ord_suf + " %B %Y, %H:%M"
value = value.strftime(fmt)
return {'field_label': label,
'field_value': value}
|
def _is_datetime(dict_):
"""Return True if dict is a JSON-LD datetime, False otherwise."""
return (len(dict_) == 2 and '@value' in dict_ and '@type' in dict_ and
dict_['@type'] == 'http://www.w3.org/2001/XMLSchema#dateTimeStamp')
|
def clean_int(s):
""" Clean an integer """
while len(s) > 0:
if s[0] == ' ':
s = s[1:]
continue
if s[-1] in " ,":
s = s[0:-1]
continue
break
return int(s)
|
def get_table_id(tablename):
"""Return an identifier for a model class.
>>> get_table_id('items')
u'alkey:items#*'
"""
return u'alkey:{0}#*'.format(tablename)
|
def add_vat(price, vat):
"""
Returns price including vat amount, and vat amount
"""
vat_paid = price * (vat/100)
return price + vat_paid, vat_paid
|
def _set(data, name, value, unset_value=None):
"""Safely set data attribute or unset if user set it to a special value."""
if value is None:
return False
if unset_value is not None and value == unset_value:
data.pop(name, None)
elif value in ('-', ['-']):
data.pop(name, None)
else:
data[name] = value
return True
|
def parse_filters(filters=[]):
"""Function to parse `filters` CLI argument.
Args:
filters: A list of "key=value" strings.
Returns:
list: Parsed filters
"""
ret = {}
for f in filters:
k, v = f.split("=")
ret[k] = v
return ret
|
def format_variables_info_string(variables: dict):
"""
Return some textual information about the variables contained in this data source.
Useful for CLI / REPL applications.
:param variables:
:return:
"""
if not variables:
return 'No variables information available.'
info_lines = []
for variable in variables:
info_lines.append('%s (%s):' % (variable.get('name', '?'), variable.get('units', '-')))
info_lines.append(' Long name: %s' % variable.get('long_name', '?'))
info_lines.append(' CF standard name: %s' % variable.get('standard_name', '?'))
info_lines.append('')
return '\n'.join(info_lines)
|
def time_per_line(
desired_wpm,
words_per_line
):
"""
Args:
desired_wpm (int): the target words-per-minute value
you wish to achieve
words_per_line (int): how many words per line your
test book contains on average
Returns:
seconds
"""
# words_per_line * 1 min / 3 * desired_wpm
minute_fraction = words_per_line/(3 * desired_wpm)
return minute_fraction * 60
|
def marker_in_points(points, marker):
"""
Checks if the marker is in the list of points.
:params points: a list of dict that contains x, y, z
:params marker: a dict that contains x, y, z
:returns: index of the matching marker in list
"""
for index, point in enumerate(points):
if (
point["x"] == marker["x"]
and point["y"] == marker["y"]
and point["z"] == marker["z"]
):
return index
return None
|
def swap_short_telos_group_number(row):
"""
swaps all group 1 patients to group 2 & all group 2 patients to group 1
this is done purely for visualization reasons and has no impact whatsoever
on interpretation of results; just swapping #s for interpretability
"""
if row == 1:
row = 2
elif row == 2:
row =1
return row
|
def rename_header(headers: list) -> list:
"""This function is replacing all the column names of the given excel sheet with the field names of the Type8"""
for i in range(len(headers)):
name = headers[i].lower().split()
headers[i] = "_".join(name).replace("#", "") # convert fields like 'invoice#' to 'invoice
return headers
|
def generate_str_of_list_elements_with_indices(list_name, list_size):
"""
Parameters
----------
list_name: str, name of the list
list_size: int, number of list elements
Returns
-------
str of list elements with angular bracket indexation separated with commas
Example
-------
list_name = "l"
list_size = 3
return = "l[0], l[1], l[2]"
"""
result_str = ""
for i in range(0, list_size):
result_str += list_name + "[" + str(i) + "], "
return result_str[: -2]
|
def oneHotEncode_4_evtypes_tau_decay_length_strack_length(x, r_vals):
"""
This function one hot encodes the input for the event types
cascade, tracks, doubel-bang, starting tracks
"""
cascade = [1., 0., 0., 0.]
track = [0., 1., 0., 0.]
doublebang = [0., 0., 1., 0.]
s_track = [0., 0., 0., 1.]
cut_tau = 5. # aus config auslesen, no hardcode
cut_track = 75. # aus config auslesen, no hardcode
# map x to possible classes
if int(x) in [5, 6]:
if r_vals[8] >= cut_tau:
return doublebang
else:
return cascade
elif int(x) in [3]:
if r_vals[22] >= cut_track:
return s_track
else:
return cascade
else:
mapping = {0: cascade, 1: cascade, 2: track, 3: s_track, 4: track,
5: doublebang, 6: doublebang, 7: cascade, 8: track, 9: cascade}
return mapping[int(x)]
|
def almost_zero(number):
"""
quick function to tell if a number should be zero but is off
due to computer error
"""
return (abs(number) < 1e-10)
|
def str_filesize(size):
"""
>>> print str_filesize(0)
0
>>> print str_filesize(1023)
1023
>>> print str_filesize(1024)
1K
>>> print str_filesize(1024*2)
2K
>>> print str_filesize(1024**2-1)
1023K
>>> print str_filesize(1024**2)
1M
"""
import bisect
d = [(1024-1,'K'), (1024**2-1,'M'), (1024**3-1,'G'), (1024**4-1,'T')]
s = [x[0] for x in d]
index = bisect.bisect_left(s, size) - 1
if index == -1:
return str(size)
else:
b, u = d[index]
return str(size / (b+1)) + u
|
def matrix_length(matrix):
"""
Returns the shortest value length found in >matrix<
"""
return min(set([len(vals) for vals in matrix.values()]))
|
def concat_filename(path: str, fname: str) -> str:
"""Concatenates the source filename and source path
Arguments:
path -- the path of the file
fname -- the filename
"""
if path[-1] != "/":
return path + "/" + fname
else:
return path + fname
|
def dotprod(a, b):
""" Compute dot product
Args:
a (dictionary): first dictionary of record to value
b (dictionary): second dictionary of record to value
Returns:
dotProd: result of the dot product with the two input dictionarieyes
"""
dotProd = 0
for token in a:
if b.has_key(token):
dotProd = dotProd + a[token]*b[token]
#dotProd = dotProd + math.pow(a[token]*b[token],2)
return dotProd
|
def int2bin(num, len_):
"""
Convert ``num`` of int type to list of length ``len_`` with 0's and 1's.
``num`` and ``len_`` have to non-negative.
For e.g., ``num`` = `5`; `int2bin(num`, ``len_`` =4) = `[0, 1, 0, 1]`.
For e.g., ``num`` = `12`; `int2bin(num`, ``len_`` =3) = `[1, 0, 0]`.
Input
-----
num: int
An integer to be converted into binary representation.
len_: int
An integer defining the length of the desired output.
Output
-----
: list of int
Binary representation of ``num`` of length ``len_``.
"""
assert num >= 0, "Input integer should be non-negative"
assert len_ >= 0, "width should be non-negative"
bin_ = format(num, f'0{len_}b')
binary_vals = [int(x) for x in bin_[-len_:]] if len_ else []
return binary_vals
|
def calculate_individual_geo_mean(lines, index, geo_mean, num):
""" Calculate an individual line of parse results goe_mean """
previous_value = None
for line in lines:
line = line.split("\t")[4:]
current_value = line[index]
try:
if float(current_value) > 0:
geo_mean *= float(current_value)
num += 1
except ValueError:
if not previous_value:
previous_value = current_value
elif current_value != previous_value:
previous_value = "-1"
return geo_mean, num, previous_value
|
def cherrypick_dict(dictionary, keys):
"""
Receives dictionary and cherry picks key values.
Returns dictionary with only keys in the keys list.
"""
return_dict = dict()
for key, value in dictionary.items():
if key in keys:
return_dict[key] = value
return return_dict
|
def classification_result(y, y_pred):
"""
:param y:
:param y_pred:
:return:
"""
assert len(y) == len(y_pred)
correct = []
wrong = []
for i in range(len(y)):
if y[i] == y_pred[i]:
correct.append(i)
else:
wrong.append(i)
return correct, wrong
|
def condense_font_weight(css: str) -> str:
"""Condense multiple font weights into shorter integer equals.
"""
return css.replace('font-weight:normal;', 'font-weight:400;').replace('font-weight:bold;', 'font-weight:700;')
|
def get_volumes(container):
"""
retrieve container volumes details
"""
mounts = container.get('Mounts')
return [dict(source=mount['Source'], destination=mount['Destination']) for mount in mounts]
|
def readline_invisible(code):
"""
Wrap ``code`` with the special characters to tell readline that it is
invisible.
"""
return '\001%s\002' % code
|
def sortSELs(events):
"""
sorts the sels by timestamp, then log entry number
@param events: Dictionary containing events
@return: list containing a list of the ordered log entries, and dictionary of keys
"""
logNumList = []
timestampList = []
eventKeyDict = {}
eventsWithTimestamp = {}
logNum2events = {}
for key in events:
if key == 'numAlerts': continue
if 'callout' in key: continue
timestamp = (events[key]['timestamp'])
if timestamp not in timestampList:
eventsWithTimestamp[timestamp] = [events[key]['logNum']]
else:
eventsWithTimestamp[timestamp].append(events[key]['logNum'])
#map logNumbers to the event dictionary keys
eventKeyDict[str(events[key]['logNum'])] = key
timestampList = list(eventsWithTimestamp.keys())
timestampList.sort()
for ts in timestampList:
if len(eventsWithTimestamp[ts]) > 1:
tmplist = eventsWithTimestamp[ts]
tmplist.sort()
logNumList = logNumList + tmplist
else:
logNumList = logNumList + eventsWithTimestamp[ts]
return [logNumList, eventKeyDict]
|
def num_successful_go_nogo_trials(eventcode):
"""
:param eventcode: list of event codes from operant conditioning file
:return: number of successful go and no go trials in the go/no go tasks
"""
return eventcode.count('SuccessfulGoTrial'), eventcode.count('SuccessfulNoGoTrial')
|
def make_users_groups_url(base_url, duke_unique_id):
"""
Create url for fetching a users groups.
:param base_url: base group manager url (eg. 'https://groups.oit.duke.edu/grouper-ws/servicesRest/json/v2_1_500/')
:param duke_unique_id: str: unique id (number) of the user we want to build a url for
:return: str: url we created
"""
return "{}/subjects/{}/groups".format(base_url, duke_unique_id)
|
def group_items(time_sheet):
"""
group items
"""
def get_work_time_objs_via_item_num(time_sheet, item_num):
"""
returns a list of dictionaries that have a given item number
within them
"""
result = []
for item in time_sheet:
if item['item'] == item_num:
result.append(item)
return result
new_sheet = {}
temp_items = []
for i in time_sheet:
temp_items.append(i['item'])
set_of_item_nums = set(temp_items)
for item_num in set_of_item_nums:
new_sheet[str(item_num)] = get_work_time_objs_via_item_num(
time_sheet, item_num)
for work_item_group in new_sheet:
for work_item in new_sheet[work_item_group]:
del work_item['item']
return new_sheet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.