content
stringlengths 42
6.51k
|
---|
def safe_append_in_dict(dictionary, key, val):
"""
Check if a list already exists for a given key. In that case append
val to that list, otherwise create a list with val as the first element
Parameters
==========
dictionary: target dictionary to be filled with new value
key: key for the dictionary
val: value to append to the list whch should exist or be created at the key
Returns
=======
dictionary: modified version of the input dictionary,
with added value to the target key
"""
existing_result = dictionary.get(key)
if existing_result is None:
dictionary[key] = [val]
else:
dictionary[key].append(val)
return dictionary
|
def get_id_by_link(link):
"""Takes link on vk profile and returns identificator of user, that can
be used in vkapi functions.
Vkontakte has to types of links:
1)vk.com/id[userid] ex.: vk.com/id1
2)vk.com/[user_alias] ex.: vk.com/anton21 , vk.com/vasya
We need to parse both of them.
"""
last_part = link.split("/")[-1]
is_id = True if last_part[:2] == "id" and last_part[2:].isdigit()\
else False
return last_part[2:] if is_id else last_part
|
def str2bool(str_val):
"""
Convert string to boolean
:param str_val: boolean in str
:return: value in bool or raise error if not matched
"""
state = str_val.strip().lower()
if state in ('t', 'y', 'true', 'yes', '1'):
return True
elif state in ('f', 'n', 'false', 'no', '0'):
return False
else:
raise ValueError
|
def extract_name_from_tags(cs, default_no_name="NO NAME"):
""" extract the host name from tags associated with an instance or vpc or subnet on Amazon EC2 """
if cs is None: return default_no_name
for d in cs:
if 'Key' in d and d['Key'] == 'Name' and 'Value' in d: return d['Value']
return default_no_name
|
def step_gradient(b_current, m_current, points, learning_rate):
"""One step of a gradient linear regression.
To run gradient descent on an error function, we first need to compute
its gradient. The gradient will act like a compass and always point us
downhill. To compute it, we will need to differentiate our error function.
Since our function is defined by two parameters (m and b), we will need
to compute a partial derivative for each.
Each iteration will update m and b to a line that yields slightly lower
error than the previous iteration.
The learning_rate variable controls how large of a step we take downhill
during each iteration. If we take too large of a step, we may step over
the minimum. However, if we take small steps, it will require many
iterations to arrive at the minimum.
"""
b_gradient = 0
m_gradient = 0
n = float(len(points))
for x, y in points:
b_gradient += -(2./n) * (y - ((m_current * x) + b_current))
m_gradient += -(2./n) * x * (y - ((m_current * x) + b_current))
new_b = b_current - (learning_rate * b_gradient)
new_m = m_current - (learning_rate * m_gradient)
return [new_b, new_m]
|
def get_all_context_names(context_num):
"""Based on the nucleotide base context number, return
a list of strings representing each context.
Parameters
----------
context_num : int
number representing the amount of nucleotide base context to use.
Returns
-------
a list of strings containing the names of the base contexts
"""
if context_num == 0:
return ['None']
elif context_num == 1:
return ['A', 'C', 'T', 'G']
elif context_num == 1.5:
return ['C*pG', 'CpG*', 'TpC*', 'G*pA',
'A', 'C', 'T', 'G']
elif context_num == 2:
dinucs = list(set(
[d1+d2
for d1 in 'ACTG'
for d2 in 'ACTG']
))
return dinucs
elif context_num == 3:
trinucs = list(set(
[t1+t2+t3
for t1 in 'ACTG'
for t2 in 'ACTG'
for t3 in 'ACTG']
))
return trinucs
|
def checkTypes(args, **types):
"""Return **True** if types of all *args* match those given in *types*.
:raises: :exc:`TypeError` when type of an argument is not one of allowed
types
::
def incr(n, i):
'''Return sum of *n* and *i*.'''
checkTypes(locals(), n=(float, int), i=(float, int))
return n + i"""
for arg, allowed in types.items():
if arg in args and not isinstance(args[arg], types[arg]):
val = args[arg]
if isinstance(allowed, (list, tuple)):
if len(allowed) > 1:
tstr = ', '.join([repr(tp.__name__) for tp in allowed[:-1]]
) + ', or ' + repr(allowed[-1].__name__)
else:
tstr = repr(allowed[0].__name__)
else:
tstr = repr(allowed.__name__)
raise TypeError('{0} must be an instance of {1}, not {2}'
.format(repr(arg), tstr, repr(type(val).__name__)))
return True
|
def get_intent(query):
"""Returns the intent associated with the client_response
:param query:
:return: [String]
"""
try:
return query.intent
except Exception as e:
print(e)
return None
|
def item_present_all_lists(item, lists: list):
"""Check if an item is present in all lists"""
for index in range(len(lists)):
if item not in lists[index]:
return False
return True
|
def sort_012(input_list):
"""Sort an array of 0s, 1s, and 2s in place.
Args:
input_list: A list of ints consisting of 0s, 1s, and 2s to be sorted
Returns:
input_list: The list of ints passed in, sorted.
"""
idx = 0
left = 0
right = len(input_list) - 1
while idx <= right:
if input_list[idx] == 0:
input_list[idx], input_list[left] = (
input_list[left],
input_list[idx],
)
left += 1
idx += 1
elif input_list[idx] == 2:
input_list[idx], input_list[right] = (
input_list[right],
input_list[idx],
)
right -= 1
else:
idx += 1
return input_list
|
def _is_kpoint(line):
"""Is this line the start of a new k-point block"""
# Try to parse the k-point; false otherwise
toks = line.split()
# k-point header lines have 4 tokens
if len(toks) != 4:
return False
try:
# K-points are centered at the origin
xs = [float(x) for x in toks[:3]]
# Weights are in [0,1]
w = float(toks[3])
return all(abs(x) <= 0.5 for x in xs) and w >= 0.0 and w <= 1.0
except ValueError:
return False
|
def tag_name(name):
"""Removes expanded namespace from tag name."""
result = name[name.find('}') + 1:]
if result == 'encoded':
if name.find('/content/') > -1:
result = 'content'
elif name.find('/excerpt/') > -1:
result = 'excerpt'
return result
|
def flip_matrix(A):
"""
:param A:
:return:
"""
# 1. flip horizontally = reverse the elements in the sublists.
# reversed returns a iterator object vs .reverse() = inplace
# 2. 1-num gives us the compliment for a binary number
return [[1-num for num in A[i][::-1]] for i in range(len(A))]
|
def get_login_provider_items_from_database_cfgitems(_):
"""Return information for default login providers; present in all cluster
configurations.
"""
return {
"dcos-users": {
"authentication-type": "dcos-uid-password",
"description": "Default DC/OS login provider",
"client-method": "dcos-usercredential-post-receive-authtoken",
"config": {
"start_flow_url": "/acs/api/v1/auth/login"
}
},
"dcos-services": {
"authentication-type": "dcos-uid-servicekey",
"description": "Default DC/OS login provider",
"client-method": "dcos-servicecredential-post-receive-authtoken",
"config": {
"start_flow_url": "/acs/api/v1/auth/login"
}
}
}
|
def chi_par(x, A, x0, C):
"""
Parabola for fitting to chisq curve.
Arguments:
x -- numpy array of x coordinates of fit
A --
x0 -- x coordinate of parabola extremum
C -- y coordinate of extremum
"""
return A*(x - x0)**2 + C
|
def genome_info(genome, info):
"""
return genome info for choosing representative
if ggKbase table provided - choose rep based on SCGs and genome length
- priority for most SCGs - extra SCGs, then largest genome
otherwise, based on largest genome
"""
try:
scg = info['#SCGs']
dups = info['#SCG duplicates']
length = info['genome size (bp)']
return [scg - dups, length, genome]
except:
return [False, False, info['genome size (bp)'], genome]
|
def bearing_2_status(d):
""" Converts wind direction in degrees to a winddirection in letters
Used in wind devices
Args:
d (float): winddirection in degrees, 0 - 360
Returns:
description of the wind direction, eg. "NNE", WNW", etc.
Ref:
Based on https://gist.github.com/RobertSudwarts/acf8df23a16afdb5837f
"""
dirs = ["N", "NNE", "NE", "ENE", "E", "ESE", "SE", "SSE",
"S", "SSW", "SW", "WSW", "W", "WNW", "NW", "NNW"]
count = len(dirs) # Number of entries in list
step = 360 / count # Wind direction is in steps of 22.5 degrees (360/16)
ix = int((d + (step / 2)) / step) # Calculate index in the list
return dirs[ix % count]
|
def B0_rel_diff(v0w, b0w, b1w, v0f, b0f, b1f, config_string, prefact, weight_b0, weight_b1):
"""
Returns the relative difference in the bulk modulus.
THE SIGNATURE OF THIS FUNCTION HAS BEEN CHOSEN TO MATCH THE ONE OF ALL THE OTHER FUNCTIONS
RETURNING A QUANTITY THAT IS USEFUL FOR COMPARISON, THIS SIMPLIFIES THE CODE LATER.
Even though several inputs are useless here.
"""
return prefact*2*(b0w-b0f)/(b0w+b0f)
|
def tr(s):
"""[If you need to transform a string representation of the output provide a function that takes a string as input and returns one]
Arguments:
s {[str]} -- [string representation of a ruamel config]
Returns:
[str] -- [string that has had all new lines replaced]
"""
return s.replace("\n", "<\n")
|
def from_cps(codepoints: str) -> str:
"""The unicode datafiles often contain entries in this format, which is super useful for
copy-pasting reference test cases.
"""
return ''.join(chr(int(cp, 16)) for cp in codepoints.split())
|
def score_to_rating_string(score):
"""
Convert score to rating
"""
if score < 1:
rating = "Terrible"
elif score < 2:
rating = "Bad"
elif score < 3:
rating = "OK"
elif score < 4:
rating = "Good"
else:
rating = "Excellent"
return rating
|
def transposed_lists(list_of_lists, default=None):
"""Like `numpy.transposed`, but allows uneven row lengths
Uneven lengths will affect the order of the elements in the rows of the transposed lists
>>> transposed_lists([[1, 2], [3, 4, 5], [6]])
[[1, 3, 6], [2, 4], [5]]
>>> transposed_lists(transposed_lists([[], [1, 2, 3], [4]]))
[[1, 2, 3], [4]]
>>> l = transposed_lists([range(4),[4,5]])
>>> l
[[0, 4], [1, 5], [2], [3]]
>>> transposed_lists(l)
[[0, 1, 2, 3], [4, 5]]
"""
if default is None or default is [] or default is tuple():
default = []
elif default is 'None':
default = [None]
else:
default = [default]
N = len(list_of_lists)
Ms = [len(row) for row in list_of_lists]
M = max(Ms)
ans = []
for j in range(M):
ans += [[]]
for i in range(N):
if j < Ms[i]:
ans[-1] += [list_of_lists[i][j]]
else:
ans[-1] += list(default)
return ans
|
def build_complement(a):
"""
The function find out the DNA in correspondence.
:param a: str, input by user
:return: ans: str
"""
ans = ''
for i in range(len(a)):
ch = a[i]
if ch == 'A':
ans += 'T'
if ch == 'T':
ans += 'A'
if ch == 'G':
ans += 'C'
if ch == 'C':
ans += 'G'
return ans
|
def _tf_polynomial_to_string(coeffs, var='s'):
"""Convert a transfer function polynomial to a string"""
thestr = "0"
# Compute the number of coefficients
N = len(coeffs) - 1
for k in range(len(coeffs)):
coefstr = '%.4g' % abs(coeffs[k])
power = (N - k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == '1':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == '1':
newstr = '%s^%d' % (var, power,)
else:
newstr = '%s %s^%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if coeffs[k] < 0:
thestr = "%s - %s" % (thestr, newstr)
else:
thestr = "%s + %s" % (thestr, newstr)
elif (k == 0) and (newstr != '') and (coeffs[k] < 0):
thestr = "-%s" % (newstr,)
else:
thestr = newstr
return thestr
|
def getPrimeList(n):
""" Returns a list of primes < n """
sieve = [True] * n
for i in range(3,int(n**0.5)+1,2):
if sieve[i]:
sieve[i*i::2*i]=[False]*((n-i*i-1)//(2*i)+1)
return [2] + [i for i in range(3,n,2) if sieve[i]]
|
def lucas(n):
""" compute the nth Lucas number """
if n < 0:
return None
if n == 0:
return 2
elif n == 1:
return 1
else:
return lucas(n - 1) + lucas(n - 2)
|
def mode_dict(lst): # It returns a dict that contains frequencies of the input.
"""
>>> mode_dict([3,3,4,4,5,5,6])
{3: 2, 4: 2, 5: 2, 6: 1}
>>> mode_dict(["x","y","x"])
{'x': 2, 'y': 1}
"""
unique_values = {i: 0 for i in list(set(lst))}
for i in lst:
if i in unique_values.keys():
unique_values[i] += 1
return unique_values
|
def is_comment(txt_row):
""" Tries to determine if the current line of text is a comment line.
Args:
txt_row (string): text line to check.
Returns:
True when the text line is considered a comment line, False if not.
"""
if (len(txt_row) < 1):
return True
if ((txt_row[0] == '(') and (txt_row[len(txt_row) - 1] == ')')):
return True
else:
return False
|
def asBoolean(s):
"""Convert a string value to a boolean value."""
ss = str(s).lower()
if ss in ('yes', 'true', 'on'):
return True
elif ss in ('no', 'false', 'off'):
return False
else:
raise ValueError("not a valid boolean value: " + repr(s))
|
def degrees(d):
"""Convert degrees to radians.
Arguments:
d -- Angle in degrees.
"""
import math
return d / 180 * math.pi
|
def move1(state,b1,dest):
"""
Generate subtasks to get b1 and put it at dest.
"""
return [('get', b1), ('put', b1,dest)]
|
def interpret_line(line, splitter=','):
"""
Split text into arguments and parse each of them to an appropriate format (int, float or string)
Args:
line: text line
splitter: value to split by
Returns: list of arguments
"""
parsed = list()
elms = line.split(splitter)
for elm in elms:
try:
# try int
el = int(elm)
except ValueError as ex1:
try:
# try float
el = float(elm)
except ValueError as ex2:
# otherwise just leave it as string
el = elm.strip()
parsed.append(el)
return parsed
|
def _get_spaces_array_helper(n, memo):
"""Recursively determine number of spaces at each height.
:param n: The height to find num spaces for.
:param memo: Memoization table. But that dynamic programming tho!
"""
# Base case.
if n == 1:
return memo[n-1]
else:
# Check if memo already has value.
if memo[n-1] != 0:
return memo[n-1]
else:
# Damn, gotta do some work.
prev = _get_spaces_array_helper(n-1, memo)
next_val = 2 * prev + 1
memo[n-1] = next_val
return next_val
|
def format_version(v):
"""
Return a PEP 440-compliant version number from VERSION.
Using "major.minor.micro" versioning.
"""
version = f'{v[0]}.{v[1]}.{v[2]}'
return version
|
def key_ratio(map):
"""
Determine the ratio of major keys to minor keys.
A ration of 1 means that each major key has only one minor key.
"""
map_keys = [len(map[key].keys()) for key in map.keys()]
if len(map_keys) == 0:
return 0
return sum(map_keys) / float(len(map_keys))
|
def exist_dict_in_list(d, ls):
"""Check if an identical dictionary exists in the list."""
return any([d == i for i in ls])
|
def tag_ocds(mapping, bind, value, args):
"""
Makes tags array according to OCDS specification
"""
def check_existing(data):
return any((
item.get('id')
for item in data
))
default = args.get('default')
tags = [default] if default else []
if 'contracts' in value and\
check_existing(value.get('contracts', [])):
tags.append('contract')
if 'awards' in value and\
check_existing(value.get('awards', [])):
tags.append('award')
value.setdefault('tags', [])
value['tags'].extend(tags)
return value
|
def case_folder_to_name(folder_name):
"""Convert name of case folder to case name."""
return folder_name.replace('_', '/')
|
def get_closest_point(x0, y0, a, b, c):
"""
Returns closest point from x0,y0 to
ax + by + c = 0
"""
x = (b * (b * x0 - a * y0) - a * c) / (a ** 2 + b ** 2)
y = (a * (-b * x0 + a * y0) - b * c) / (a ** 2 + b ** 2)
return x, y
|
def return_text_by_bounds(options, num, overall):
"""Find to which group value belongs"""
keys = list(options.keys())
key = keys[1]
if num < overall[0] - overall[1]:
key = keys[0]
elif num > overall[0] + overall[1]:
key = keys[2]
return options[key]
|
def insert_to_table(db, table, q):
""" insert record into table or update an existing record if given query """
if 'record_id' in q:
def update_record(record):
if record[q['record_id']['key']] == q['record_id']['value']:
for key in record:
if key in q['value']:
record[key] = q['value'][key]
return record
db[table] = list(map(update_record, db[table]))
return db
else:
db[table].append(q)
return db
|
def ami_version(ami_info):
"""
Finds source AMI version AMI tag.
Parameters
----------
ami_info : dict
AMI information.
Returns
-------
string
Version of source AMI.
"""
for tag in ami_info['Tags']:
if tag['Key'] == 'Version':
return tag['Value']
|
def as_chunks(l, num):
"""
:param list l:
:param int num: Size of split
:return: Split list
:rtype: list
"""
chunks = []
for i in range(0, len(l), num):
chunks.append(l[i:i + num])
return chunks
|
def parseValue(value, search: bool = False) -> str:
"""
Parses a value into a valid value to insert or search in a sqlite database.
Whenever the search flag is set, % are appended to start and end of a string value to search for all string to
match the pattern.
Examples for search=False:
- 'string' -> '"string"'
- 4 -> '4'
Examples for search=True:
- 'string' -> '"%string%"'
- 4 -> '4'
:param search:whether the value should be converted for search or other database tasks.
:param value: to convert into a valid value for sqlite databases.
:return: valid value for sqlite databases.
"""
if value is None:
value = ""
if type(value) == str:
search_str = "%" if search else ""
return '"' + search_str + value + search_str + '"'
else:
return str(value)
|
def sound_to_ts_K(sound, eq_type='gill_manual', e_a=None):
"""
Convert speed of sound to
"""
if eq_type=="gill_manual":
return (sound ** 2) / 403
if eq_type=="s_audio":
return ((sound / 331.3) ** 2 - 1) * 273.15 + 273.15
else:
return None
|
def pearson_correlation_2(x,y):
"""incase pandas library is not allowed"""
xy = []
x2 = []
y2 = []
for i,j in zip(x,y):
xy.append(i*j)
x2.append(pow(i,2))
y2.append(pow(j,2))
n = len(x)
sxy = sum(xy)
sx = sum(x)
sy = sum(y)
sx2 = sum(x2)
sy2 = sum(y2)
top = (n*sxy) - (sx*sy)
mid = ((n*sx2)-pow(sx,2))*((n*sy2)-pow(sy,2))
bot = pow(mid,0.5)
return 1.0*top/bot
|
def _prefix_linear(s, op, false, true):
"""Apply associative binary operator linearly.
@param s: container
@param op: operator
@param false, true: values if treating `op` as disjunction
"""
if not s:
return false
u = s[0]
for v in s[1:]:
# controlling value ?
if u == true:
break
if v == true:
u = true
break
u = op + ' ' + u + ' ' + v
return u
|
def merge(l, r):
""" Merge sort helper
@type l: list
@type r: list
"""
merged = []
while len(l) and len(r):
if l[0] <= r[0]:
merged.append(l[0])
l.pop(0)
else:
merged.append(r[0])
r.pop(0)
return merged + l + r
|
def collatz(x):
"""
collatz(x)
Determina si un par de numeros son amigos.
Parameters
----------
x: int
Numero donde se inicia la serie.
Returns
----------
output: int
Lista de enteros de la serie de Collatz.
"""
lista = list()
n = x
while n != 1:
lista.append(n);
if n % 2 == 0:
n = int(n/2)
else:
n = 3*n+1
lista.append(1);
return lista
|
def dumb_factor(x, primeset):
""" If x can be factored over the primeset, return the
set of pairs (p_i, a_i) such that x is the product
of p_i to the power of a_i.
If not, return []
"""
factors = []
for p in primeset:
exponent = 0
while x % p == 0:
exponent = exponent + 1
x = x//p
if exponent > 0:
factors.append((p,exponent))
return factors if x == 1 else []
|
def find_arguments_region(edit, view, location):
"""Returns the start, end index of the selected argument list."""
end = location
start = location - 1
open_brackets = 0
while start >= 0:
if view.substr(start) == '(':
open_brackets -= 1
if open_brackets < 0:
start += 1
break
if view.substr(start) == ')':
open_brackets += 1
start -= 1
if start < 0:
return 0, 0
open_brackets = 0
while end < view.size():
if view.substr(end) == ')':
open_brackets -= 1
if open_brackets < 0:
break
if view.substr(end) == '(':
open_brackets += 1
end += 1
if end >= view.size():
return 0, 0
return start, end
|
def truncate_descriptions(requested_user_rooms):
"""
Cut descriptions if too long
"""
for i in range(0, len(requested_user_rooms)):
if len(requested_user_rooms[i]['description']) >= 85:
requested_user_rooms[i]['description'] = requested_user_rooms[i]['description'][0:85] + "..."
return requested_user_rooms
|
def is_loc_search_trace(conf_list, text):
"""Determine if text is location search that should be included."""
for index in range(len(conf_list)):
if text.find(conf_list[index].ident_text) != -1:
return True
return False
|
def subtraction(a, b):
"""subtraction: subtracts b from a, return result c"""
a = float(a)
b = float(b)
c = b - a
return c
|
def translate_sequence(rna_sequence, genetic_code):
"""Translates a sequence of RNA into a sequence of amino acids.
Translates `rna_sequence` into string of amino acids, according to the
`genetic_code` given as a dict. Translation begins at the first position of
the `rna_sequence` and continues until the first stop codon is encountered
or the end of `rna_sequence` is reached.
If `rna_sequence` is less than 3 bases long, or starts with a stop codon,
an empty string is returned.
"""
amino = ''
if len(rna_sequence) < 3:
return(amino)
else:
sequence=rna_sequence.upper()
#As long as the sequence is at least three characters, will put the
#first three in one string and save sequence as the remaining characters
#then looks up value in genetic_code
while len(sequence) >= 3:
codon = sequence[0:3]
sequence = sequence[3:]
#if the codon is a stop codon, returns current sequence and exits
if genetic_code[codon] =='*':
return(amino)
else:
amino = amino + genetic_code[codon]
return(amino)
|
def find_bucket_key(s3_path):
"""
This is a helper function that given an s3 path such that the path is of
the form: bucket/key
It will return the bucket and the key represented by the s3 path
"""
s3_components = s3_path.split('/')
bucket = s3_components[0]
s3_key = ""
if len(s3_components) > 1:
s3_key = '/'.join(s3_components[1:])
return bucket, s3_key
|
def create_headers(bearer_token):
"""Twitter function for auth bearer token
Args:
bearer_token (string): bearer token from twitter api
Returns:
headers
"""
headers = {"Authorization": "Bearer {}".format(bearer_token)}
return headers
|
def contained_name(name1, name2):
"""
Compares two lists of names (strings) and checks to see if all the names in one list also appear in the other list
Parameters
----------
name1: list
list of strings to be compared to name2
name2: list
list of strings to be compared to name1
Returns
-------
integer
1 if contained, else 0
Example
--------
>>> name1 = ['JOHN', 'JOHN', 'DAVE', 'JIM']
>>> name2 = ['JOHN', 'DAVE']
>>> contained_name(name1, name2)
1
"""
# Common elements
common_elements = 0
# Compare unique values in both lists
words1 = set(name1.split())
for word in set(name2.split()):
if word in words1:
common_elements += 1
# Minimum no. of elements
min_elements = min(len([s[0] for s in name1.split()]),
len([t[0] for t in name2.split()]))
# Contained Indicator
if min_elements > 1 and min_elements == common_elements:
contained = 1
else:
contained = 0
return contained
|
def _simplify_circuit_string(circuit_str):
"""
Simplify a string representation of a circuit.
The simplified string should evaluate to the same operation label tuple
as the original.
Parameters
----------
circuit_str : string
the string representation of a circuit to be simplified.
(e.g. "Gx{}", "Gy^1Gx")
Returns
-------
string
the simplified string representation.
"""
s = circuit_str.replace("{}", "")
s = s.replace("^1G", "G")
s = s.replace("^1(", "(")
s = s.replace("^1{", "{")
if s.endswith("^1"): s = s[:-2]
return s if len(s) > 0 else "{}"
|
def parse_custom_types(types):
"""
Parses curstom types format as sent through the service.
:param types: curstom types JSON
:type types: list
:return: custom types dictionary
:rtype: dict
"""
model_types = {}
for typ in types:
name = typ['name']
model_types[name] = [
x.lower().strip() for x in typ['keywords'].split(',')
]
return model_types
|
def contains_repeats(str):
"""Determines if a string has repeat letters.
This function is worst case O(n) because it
must at worst check each character in the
string. n is the length of the string.
Arguments:
str: the string to examine for
repeat letters
Returns:
True if the string contains at least
one repeat, False otherwise
"""
for i in range(1, len(str)):
if str[i] == str[i-1]:
return True
return False
|
def get_thumbs_up(review):
"""
Gets the total thumbs up given.
Parameters
----------------
review : BeutifulSoup object
The review from metacritic as a BeautifulSoup object.
Returns
----------------
thumbs_up : string
Returns the number of total thumbs up given as a string.
If the review doesn't got rated yet, the returned string is empty.
"""
try:
thumbs_up = review.find("span", class_="yes_count").text
if len(thumbs_up) != 0:
return thumbs_up
else:
return ""
except:
return ""
|
def pkg_key(pkg, type_):
"""
generate a package key for a given type string
Generates a compatible "package key" for a unsanitized package name ``pkg``
of a specific key ``type``. The package string is "cleaned" to replaces
select characters (such as dashes) with underscores and becomes uppercase.
For example, consider the package name "my-awesome-module". For a package
key "VERSION", the complete key for this package is
"MY_AWESOME_MODULE_VERSION".
Args:
pkg: the package name
type_: the package key type
Returns:
the completed package key
"""
clean = pkg
for c in [' ', '*', '-', '.', ':', '?', '|']:
clean = clean.replace(c, '_')
return '{}_{}'.format(clean.upper(), type_)
|
def copy3(v):
"""
copy3
"""
return (v[0], v[1], v[2])
|
def listOfSetCommands(sdict):
""" Returns a list of vclient command line options,
readily compiled for the setVclientData method.
"""
clist = []
cmds = []
for key, value in sdict.items():
cmds.append("%s %s" % (key, value))
clist.append('-c')
clist.append('"%s"' % ','.join(cmds))
return clist
|
def encode_helper(obj, to_builtin):
"""Encode an object into a two element dict using a function
that can convert it to a builtin data type.
"""
return dict(__class_name__=str(obj.__class__), __dumped_obj__=to_builtin(obj))
|
def returnZ(x, mu, std):
"""
Usage for sampling distribution of the difference between two means:
z = mgt2001.samp.returnZ(x, mgt2001.samp.returnE(mu1, mu2), mgt2001.samp.returnStd(std1, std2, n1, n2))
"""
return (x - mu) / std
|
def create_typo_table(typo_chars, score=3):
"""Create a dictionary mapping typographically similar characters to each
other.
Input is a list (of even length) of characters. The characters are
assummed to be listed in pairs, and their presence states that the first
character is typographically similar to the second.
Each pairing of similar characters is given the value (the 'score'
argument). The Jaro-Winkler routines use this score to increase the
amount of characters matching in their input strings.
An example:
>>> typo_chars = ['B', '8', '0', 'O', '0', 'Q', 'I', 'l']
>>> typo_table = create_typo_table(typo_chars, score=2)
This function returns a symmetrical dictionary of dictionaries:
>>> typo_table['B']['8'] == typo_table['8']['B']
True
... but note that this symmetry is not carried through in future
assignments:
>>> typo_table['8']['B'] = 5
>>> print_typo_table(typo_table)
08BIOQl
+-------+
0|....22.|0
8|..5....|8
B|.2.....|B
I|......2|I
O|2......|O
Q|2......|Q
l|...2...|l
+-------+
08BIOQl
"""
typo_table = {}
typo_chars = [s for s in typo_chars]
for i in range(len(typo_chars) // 2):
row_char = typo_chars[i*2]
col_char = typo_chars[i*2+1]
# Create the symmetric mappings from row_char to col_char,
# and vice versa.
for row, col in [(row_char, col_char), (col_char, row_char)]:
try:
row_dict = typo_table[row]
except KeyError:
typo_table[row] = row_dict = {}
if col in row_dict:
msg = u"Redundant entry (%s) in typo_chars." % col
raise ValueError(msg)
row_dict[col] = score
return typo_table
|
def has_abba(code):
"""Checks for existence of a 4-letter palindromic substring within `code`
The palindromic substring must contain 2 unique characters
"""
palindrome = None
for i in range(len(code) - 4 + 1):
# substring = code[i:i + 4]
if code[i] == code[i + 3] and code[i + 1] == code[i + 2]:
palindrome = code[i:i + 4]
break
result = palindrome and len(set([c for c in palindrome])) == 2
return result
|
def section_type(jats_content, section_map):
"""determine the section type of the jats_content looking at the section_map"""
content_section_type = None
for section_type, section_match in list(section_map.items()):
if jats_content.startswith(section_match):
content_section_type = section_type
return content_section_type
|
def get_bounding_box_volume(bb):
"""
:param bb:
:return:
"""
width = bb[1][0] - bb[0][0]
depth = bb[1][1] - bb[0][1]
height = bb[1][2] - bb[0][2]
return width * depth * height
|
def list_response(response_list, cursor=None, more=False, total_count=None):
"""Creates response with list of items and also meta data used for pagination
Args : response_list (list) : list of items to be in response
cursor (Cursor, optional) : ndb query cursor
more (bool, optional) : whether there's more items in terms of pagination
total_count(int, optional): Total number of items
Returns : (dict) : the response to be serialized and sent to client
"""
return { 'list' : response_list
, 'meta' : { 'nextCursor': cursor.urlsafe() if cursor else ''
, 'more' : more
, 'totalCount': total_count
} }
|
def cls_token(idx):
"""
Function helps in renaming cls_token weights
"""
token = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", "stage2.cls_token"))
return token
|
def format_value(value):
"""
Convert a list into a comma separated string, for displaying
select multiple values in emails.
"""
if isinstance(value, list):
value = ", ".join([v.strip() for v in value])
return value
|
def dunderkey(*args):
"""Produces a nested key from multiple args separated by double
underscore
>>> dunderkey('a', 'b', 'c')
>>> 'a__b__c'
:param *args : *String
:rtype : String
"""
return '__'.join(args)
|
def contain_subset(actual, expected):
"""Recursively check if actual collection contains an expected subset.
This simulates the containSubset object properties matcher for Chai.
"""
if expected == actual:
return True
if isinstance(expected, list):
if not isinstance(actual, list):
return False
return all(any(contain_subset(actual_value, expected_value)
for actual_value in actual) for expected_value in expected)
if not isinstance(expected, dict):
return False
if not isinstance(actual, dict):
return False
for key, expected_value in expected.items():
try:
actual_value = actual[key]
except KeyError:
return False
if callable(expected_value):
try:
if not expected_value(actual_value):
return False
except TypeError:
if not expected_value():
return False
elif not contain_subset(actual_value, expected_value):
return False
return True
|
def keys_exist(element: dict, *keys):
"""
Check if *keys (nested) exists in `element` (dict).
"""
if not isinstance(element, dict):
raise AttributeError('keys_exists() expects dict as first argument.')
if len(keys) == 0:
raise AttributeError(
'keys_exists() expects at least two arguments, one given.')
_element = element
for key in keys:
try:
_element = _element[key]
except KeyError:
return False
return True
|
def by_bag(bag, tiddlers):
"""
Return those tiddlers that have bag bag.
"""
return [tiddler for tiddler in tiddlers if tiddler.bag == bag]
|
def str2atom(a):
""" Helper function to parse atom strings given on the command line:
resid, resname/resid, chain/resname/resid, resname/resid/atom,
chain/resname/resid/atom, chain//resid, chain/resname/atom """
a = a.split("/")
if len(a) == 1: # Only a residue number:
return (None, None, int(a[0]), None)
if len(a) == 2: # Residue name and number (CYS/123):
return (None, a[0], int(a[1]), None)
if len(a) == 3:
if a[2].isdigit(): # Chain, residue name, residue number
return (None, a[1], int(a[2]), a[0])
else: # Residue name, residue number, atom name
return (a[2], a[0], int(a[1]), None)
return (a[3], a[1], int(a[2]), a[0])
|
def largest_prime_factor(n: int) -> int:
"""
Returns the largest prime factor of n
"""
i = 2
while i * i <= n:
if n % i:
i += 1
else:
n //= i
return n
|
def convertArr(arr):
"""convert string array into an integer array.
Input: arr(list)
Output: result(list)
"""
result = []
for i, val in enumerate(arr):
result.append([])
for j in val:
result[i].append(int(j))
return result
|
def clear_object_store(
securityOrigin: str, databaseName: str, objectStoreName: str
) -> dict:
"""Clears all entries from an object store.
Parameters
----------
securityOrigin: str
Security origin.
databaseName: str
Database name.
objectStoreName: str
Object store name.
"""
return {
"method": "IndexedDB.clearObjectStore",
"params": {
"securityOrigin": securityOrigin,
"databaseName": databaseName,
"objectStoreName": objectStoreName,
},
}
|
def last(iterator, default=None):
"""Return last member of an `iterator`
Example:
>>> def it():
... yield 1
... yield 2
... yield 3
...
>>> last(it())
3
"""
last = default
for member in iterator:
last = member
return last
|
def organize_combinations(attribute_names, attribute_combinations):
"""Organise the generated combinations into list of dicts.
Input:
attribute_name: ["latency", "reliability"]
attribute_combinations: [[1,99.99], [2,99.99], [3,99.99], [1,99.9], [2,99.9], [3,99.9]]
Output:
combinations = [{'latency': 1, 'reliability': 99.99},
{'latency': 2, 'reliability': 99.99},
{'latency': 3, 'reliability': 99.99},
{'latency': 1, 'reliability': 99.9},
{'latency': 2, 'reliability': 99.9},
{'latency': 3, 'reliability': 99.9}
]
"""
combinations = []
for combination in attribute_combinations:
comb = {}
for (name, value) in zip(attribute_names, combination):
comb[name] = value
combinations.append(comb)
return combinations
|
def is_input_port(prog, inp, tlsa):
"""Set the Tlsa object's port data if the input is 'port-like'.
Args:
prog (State): not changed.
inp (str): the input to check if it is a port number.
tlsa (Tlsa): the Tlsa object to set with the port number if 'inp'
is an integer.
Returns:
bool: 'True' if the port data in 'tlsa' was set to 'inp', 'False'
if not.
"""
try:
port_num = int(inp)
if port_num == 0 or port_num > 65535:
return False
except ValueError:
return False
tlsa.port = inp
return True
|
def tail(array):
"""Return all but the first element of `array`.
Args:
array (list): List to process.
Returns:
list: Rest of the list.
Example:
>>> tail([1, 2, 3, 4])
[2, 3, 4]
.. versionadded:: 1.0.0
.. versionchanged:: 4.0.0
Renamed from ``rest`` to ``tail``.
"""
return array[1:]
|
def merge_dict(*args):
""" Merges any number of dictionaries into a single dictionary.
# Notes
In Python 3.5+, you can just do this:
```python
r = {**x, **y}
```
But if you want a single expression in Python 3.4 and below:
```python
r = merge_dict(x, y)
```
"""
result = {}
for x in args:
result.update(x)
return result
|
def getNumFavorited(msg):
"""Counts the number of favorites the mssage received."""
num_favorited = msg['favorited_by']
return len(num_favorited)
|
def get_onet_occupation(job_posting):
"""Retrieve the occupation from the job posting
First checks the custom 'onet_soc_code' key,
then the standard 'occupationalCategory' key,
and falls back to the unknown occupation
"""
return job_posting.get('onet_soc_code', job_posting.get('occupationalCategory', '99-9999.00'))
|
def fill_na(symbols_map, symbols_list):
"""Fill symbol map with 'N/A' for unmapped symbols."""
filled_map = symbols_map.copy()
for s in symbols_list:
if s not in filled_map:
filled_map[s] = 'N/A'
return filled_map
|
def helper_int(val):
"""
Helper function for use with `dict_reader_as_geojson`. Returns `None` if
the input value is an empty string or `None`, otherwise the input value is
cast to an int and returned.
"""
if val is None or val == '':
return None
else:
return int(val)
|
def dataset_word_frequencies(nodes):
"""
Get frequency of words from an extracted dataset.
"""
freqs = {}
for node in nodes.values():
for t in node.tokens:
freqs[t.lower()] = freqs.get(t.lower(), 0) + 1
return freqs
|
def _preppin_nums(in_list):
"""Function to getting in_list ready for sorting."""
max_size = 0
output = []
for item in in_list:
breakdown = [int(d) for d in str(item)]
output.append(breakdown)
if len(breakdown) > max_size:
max_size = len(breakdown)
return [max_size, output]
|
def cuda_tpb_bpg_1d(x, TPB = 256):
"""
Get the needed blocks per grid for a 1D CUDA grid.
Parameters :
------------
x : int
Total number of threads
TPB : int
Threads per block
Returns :
---------
BPG : int
Number of blocks per grid
TPB : int
Threads per block.
"""
# Calculates the needed blocks per grid
BPG = int(x/TPB + 1)
return BPG, TPB
|
def try_convert_comparision_token(token):
"""
This method tries to convert the given token to be a desired
comparision token which can be accepted by the Pyomo model or
FSL file. Return None if failure.
Args:
token (str): the given token to be converted.
Returns:
Return the converted comparision token if the conversion
succeeds. Otherwise, return None.
"""
COMPARISION_TOKENS = {
'=': '==',
}
return COMPARISION_TOKENS.get(token, None)
|
def is_valid_file(ext, argument):
""" Checks if file format is compatible """
formats = {
'input_pdb_path': ['pdb'],
'output_pockets_zip': ['zip'],
'output_summary': ['json'],
'input_pockets_zip': ['zip'],
'input_summary': ['json'],
'output_filter_pockets_zip': ['zip'],
'output_pocket_pdb': ['pdb'],
'output_pocket_pqr': ['pqr']
}
return ext in formats[argument]
|
def get_routes_len(routes_list):
"""return list of length of routes"""
routes_length = []
for i, this_route in enumerate(routes_list):
routes_length.append(this_route.get_length())
return routes_length
|
def has_body(headers):
"""
:param headers: A dict-like object.
"""
return 'content-length' in headers
|
def get_crater_tuple(line_str):
"""Build crater tuple from line_str."""
line_list=[]
for item in line_str.split('\t'):
line_list.append(item)
try:
t=(line_list[0],line_list[1],float(line_list[2]),float(line_list[3]),float(line_list[4]))
except ValueError:
t=("","",0,0,0)
return(t)
|
def apply_to_dict_recursively(d, f):
"""Recursively apply function to a document
This modifies the dict in place and returns it.
Parameters
----------
d: dict
e.g. event_model Document
f: function
any func to be performed on d recursively
"""
for key, val in d.items():
if hasattr(val, 'items'):
d[key] = apply_to_dict_recursively(d=val, f=f)
d[key] = f(val)
return d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.