content
stringlengths 42
6.51k
|
---|
def cur_mkt_score_a(a, ds, fs):
"""
Given a bunch of participants' open-interests and fees,
calculate the total current market score.
"""
total = 0
for d, f in zip(ds, fs):
total += (d**(1-a)) * (f**(a))
return total
|
def topic_name_to_collection_name(topic_name):
"""
Converts the fully qualified name of a topic into legal mongodb collection name.
"""
return topic_name.replace("/", "_")[1:]
|
def image_id_to_prefix(image_id):
"""
`AOI_3_Paris_img585` -> `AOI_3_Paris`
"""
prefix = image_id.split('img')[0][:-1]
return prefix
|
def get_keys_from_dict_list(elements):
"""find all used keys that are used in a list of dictionaries"""
keys = []
for element in elements:
element_keys = element.keys()
for key in element_keys:
if not key in keys:
keys.append(key)
return keys
|
def percentof(value1, value2):
"""Return Percent Of"""
v1 = value1 or 0
v2 = value2 or 1
return (v1 / v2) * 100
|
def mue(X):
"""Mean molecular mass per electron"""
return 2. / (1+X)
|
def _Flatten(values):
"""Returns a simple list without sub-lists."""
ret = []
for entry in values:
if isinstance(entry, list):
ret.extend(_Flatten(entry))
else:
ret.append(entry)
return ret
|
def setbits(val, base, length, new):
""" Set particular bits, used to update particular fields in MSR values"""
return (val ^ (val & ((2 ** length - 1) << base))) + (new << base)
|
def rk4_alt_step_func(func, t, dt, y, k1=None):
"""Smaller error with slightly more compute."""
if k1 is None: k1 = func(t, y)
k2 = func(t + dt / 3, tuple(y_ + dt * k1_ / 3 for y_, k1_ in zip(y, k1)))
k3 = func(t + dt * 2 / 3, tuple(y_ + dt * (k1_ / -3 + k2_) for y_, k1_, k2_ in zip(y, k1, k2)))
k4 = func(t + dt, tuple(y_ + dt * (k1_ - k2_ + k3_) for y_, k1_, k2_, k3_ in zip(y, k1, k2, k3)))
return tuple((k1_ + 3 * k2_ + 3 * k3_ + k4_) * (dt / 8) for k1_, k2_, k3_, k4_ in zip(k1, k2, k3, k4))
|
def create_user(p_dict):
"""Creates a user"""
p_dict["active"] = True
p_dict["site_admin"] = False if "site_admin" not in p_dict else (
p_dict["site_admin"])
p_dict["site_manager"] = False if "site_manager" not in p_dict else (
p_dict["site_manager"])
p_dict["site_spectator"] = False if "site_spectator" not in p_dict else (
p_dict["site_spectator"])
p_dict["created_at"] = "2015-05-23"
p_dict["deleted_at"] = None
del(p_dict["password"])
return p_dict
|
def extract_relation(res,resource_type):
"""
this function takes a unique resource and create the entries for relation.csv
Logic:
Profile:
Bound (Req) = element.binding[strength = required].valueset
Bound (Ext) = element.binding[strength = extensible].valueset
Bound (Pref) = element.binding[strength = preferred].valueset
Bound (Exam) = element.binding[strength = example].valueset
Extension = element.type[code = extension].profile
ValueSet:
valuesFrom = compose.include.system
valuesFrom = expansion.contains.system
includes = compose.include.valueSet
"""
dict_relat=[]
relation_type_data={"required":"Bound_Req","extensible":"Bound_Ext","preferred":"Bound_Pref","example":"Bound_Exam"}
# if res.get("id")=="be-ext-laterality":
# print(resource_type,res.get("id"))
if resource_type in ["Profile","Data type"]:
elements=res.get('snapshot', {}).get('element',[] )
for element in elements:
binding=element.get("binding",{}).get("strength")
value=element.get("binding",{}).get("valueSet")
if binding:
# print(value)
stripped = value.split("|", 1)[0] #remove pipes
# if res.get("id")=="be-allergyintolerance":
# print(stripped)
#print(resource_type,"binding -> ",binding,value)
dict_relat.append({"source":res.get("id"),"target_url":stripped,"relation":relation_type_data[binding]})
for l in element.get("type",[]):
if l.get("code",{})=="Extension":
#pass
if l.get("profile"):
dict_relat.append({"source":res.get("id"),"target_url":l.get("profile")[0],"relation":"extension"})
for target_profile in l.get("targetProfile",[]):
dict_relat.append({"source":res.get("id"),"target_url":target_profile,"relation":"references"})
# print()
elements=res.get('differential', {}).get('element', [])
for element in elements:
binding=element.get("binding",{}).get("strength")
value=element.get("binding",{}).get("valueSet")
if binding:
# print(res.get("id"),value)
# print(value,res.get("id"))
stripped = value.split("|", 1)[0] #remove pipes
#print(resource_type,"binding -> ",binding,value)
dict_relat.append({"source":res.get("id"),"target_url":stripped,"relation":relation_type_data[binding]})
for l in element.get("type",[]):
if l.get("code",{})=="Extension":
#pass
if l.get("profile"):
# print(l.get("profile")[0],res.get("id"))
dict_relat.append({"source":res.get("id"),"target_url":l.get("profile")[0],"relation":"extension"})
for target_profile in l.get("targetProfile",[]):
dict_relat.append({"source":res.get("id"),"target_url":target_profile,"relation":"references"})
# print()
elif resource_type=="ValueSet":
for s in res.get("compose",{}).get("include",[]):
#print(s)
if s.get("system"):
dict_relat.append({"source":res.get("id"),"target_url":s.get("system"),"relation":"valuesFrom"})
if s.get("valueSet"):
# print(s.get("valueSet"))
dict_relat.append({"source":res.get("id"),"target_url":s.get("valueSet")[0],"relation":"includes"})
#print(res.get("expansion",{}).get("contains",[]))
return dict_relat
|
def repair_ids(shared_state, original_id_to_label, label_to_id):
"""Given an optimizer state and some new ids for the parameters,
creates a new state with the new ids linked to the same tensors.
Useful when using an optimizer state used on some old model on a
new one.
"""
new_state = {}
for param_id, state in shared_state.items():
new_param_id = label_to_id[original_id_to_label[param_id]]
new_state[new_param_id] = state
return new_state
|
def five_bet_labouchere_bankroll(target):
"""
Pick a target $-amount you want to win. and then make a sequence of bets that follows the ratio output below.
Each wager must be at odds such that the profit (payout) is the sum of the first and last number in your list.
If bet 1 wins 20 (.1 + .1) then cross off the first and last numbers.
If you lose add your stake to the list below.
:param target: Integer. How much do you want to win?
:return: List. Sequence of $ amount bets to make.
"""
labouchere_div = [.1, .2, .4, .2, .1]
return [target * x for x in labouchere_div]
|
def context_string(context=None, with_comma=True):
###############################################################################
"""Return a context string if <context> is not None otherwise, return
an empty string.
if with_comma is True, prepend string with ', at ' or ', in '.
>>> context_string()
''
>>> context_string(with_comma=True)
''
>>> context_string(context= ParseContext(linenum=32, filename="source.F90"), with_comma=False)
'source.F90:33'
>>> context_string(context= ParseContext(linenum=32, filename="source.F90"), with_comma=True)
', at source.F90:33'
>>> context_string(context= ParseContext(linenum=32, filename="source.F90"))
', at source.F90:33'
>>> context_string(context= ParseContext(filename="source.F90"), with_comma=False)
'source.F90'
>>> context_string(context= ParseContext(filename="source.F90"), with_comma=True)
', in source.F90'
>>> context_string(context= ParseContext(filename="source.F90"))
', in source.F90'
"""
if context is None:
cstr = ""
elif with_comma:
if context.line_num < 0:
cstr = ", in {}".format(context)
else:
cstr = ", at {}".format(context)
# End if
else:
cstr = "{}".format(context)
# End if
return cstr
|
def tool_dependency_is_orphan_in_tool_shed( type, name, version, tools ):
"""
Determine if the combination of the received type, name and version is defined in the <requirement> tag for at least one tool in the received list of tools.
If not, the tool dependency defined by the combination is considered an orphan in it's repository in the tool shed.
"""
if tools:
if type == 'package':
if name and version:
for tool_dict in tools:
requirements = tool_dict.get( 'requirements', [] )
for requirement_dict in requirements:
req_name = requirement_dict.get( 'name', None )
req_version = requirement_dict.get( 'version', None )
req_type = requirement_dict.get( 'type', None )
if req_name == name and req_version == version and req_type == type:
return False
elif type == 'set_environment':
if name:
for tool_dict in tools:
requirements = tool_dict.get( 'requirements', [] )
for requirement_dict in requirements:
req_name = requirement_dict.get( 'name', None )
req_type = requirement_dict.get( 'type', None )
if req_name == name and req_type == type:
return False
return True
|
def delist(l, result=None):
"""
returns touple of lines from the recursive list of tuples (item, [sub-items-touples])
"""
if not result: result = []
for line, sub in l:
result.append(line)
delist(sub, result=result)
return tuple(result)
|
def pad_or_truncate_list(some_list, target_len=21, pad_value=""):
"""Pads (with a value) or truncates list to a target length.
Parameters
----------
some_list: list
Input list to pad or truncate.
target_len: int
Length of the desired output list.
pad_value: anything
Value to pad with if the input length needs to be extended.
Returns
-------
list
Padded or truncated list of target length.
"""
return some_list[:target_len] + [pad_value] * (target_len - len(some_list))
|
def merge_to_flat_list(_list):
""" [(1,2), (3,4)] -> [1, 2, 3, 4]"""
return sum([list(item) for item in _list], [])
|
def translit(locallangstring):
"""
Translit func
:param locallangstring: orign
:return: translit of locallangstring
"""
conversion = {
u'\u0410': 'A',
u'\u0430': 'a',
u'\u0411': 'B',
u'\u0431': 'b',
u'\u0412': 'V',
u'\u0432': 'v',
u'\u0413': 'G',
u'\u0433': 'g',
u'\u0414': 'D',
u'\u0434': 'd',
u'\u0415': 'E',
u'\u0435': 'e',
u'\u0401': 'Yo',
u'\u0451': 'yo',
u'\u0416': 'Zh',
u'\u0436': 'zh',
u'\u0417': 'Z',
u'\u0437': 'z',
u'\u0418': 'I',
u'\u0438': 'i',
u'\u0419': 'Y',
u'\u0439': 'y',
u'\u041a': 'K',
u'\u043a': 'k',
u'\u041b': 'L',
u'\u043b': 'l',
u'\u041c': 'M',
u'\u043c': 'm',
u'\u041d': 'N',
u'\u043d': 'n',
u'\u041e': 'O',
u'\u043e': 'o',
u'\u041f': 'P',
u'\u043f': 'p',
u'\u0420': 'R',
u'\u0440': 'r',
u'\u0421': 'S',
u'\u0441': 's',
u'\u0422': 'T',
u'\u0442': 't',
u'\u0423': 'U',
u'\u0443': 'u',
u'\u0424': 'F',
u'\u0444': 'f',
u'\u0425': 'H',
u'\u0445': 'h',
u'\u0426': 'Ts',
u'\u0446': 'ts',
u'\u0427': 'Ch',
u'\u0447': 'ch',
u'\u0428': 'Sh',
u'\u0448': 'sh',
u'\u0429': 'Sch',
u'\u0449': 'sch',
u'\u042a': '',
u'\u044a': '',
u'\u042b': 'Y',
u'\u044b': 'y',
u'\u042c': '',
u'\u044c': '',
u'\u042d': 'E',
u'\u044d': 'e',
u'\u042e': 'Yu',
u'\u044e': 'yu',
u'\u042f': 'Ya',
u'\u044f': 'ya',
}
translitstring = []
for c in locallangstring:
translitstring.append(conversion.setdefault(c, c))
return ''.join(translitstring)
|
def vector_add(v, w):
"""adds corresponding elements"""
return [v_i + w_i
for v_i, w_i in zip(v, w)]
|
def distribute(items, n):
"""
Distributes items into n (almost) equally large batches.
Example: items = [1,2,3,4,5,6,7,8,9], n = 4 gives batches = [[1,5,9], [2,6], [3,7], [4,8]]
Args:
data ([any]): List of items of any type
n (int): Number of max items of a batch
Returns:
[[any]]: List of lists of items of any type
"""
batches = [[] for _ in range(n)]
for i in range(len(items)):
item = items[i]
batches[i % n].append(item)
return [b for b in batches if not len(b) == 0]
|
def closing_paren(sentence, open_paren_index):
"""
:param: sentence the sentence to search through
:param: open_paren_index, index of the opening parenthesis
"""
open_nested_count = 0
position = open_paren_index + 1
while position <= len(sentence):
char = sentence[position]
if char == "(":
open_nested_count += 1
elif char == ")":
if open_nested_count == 0:
return position
else:
open_nested_count -= 1
position += 1
raise Exception("No closing parenthesis :(")
|
def processPoseLine(line):
""""Process a text line the pose format
# timestamp x y z roll pitch yaw id
input: line
output: float x, float y, string id, float timeStamp"""
spaceCount = 0
x = ""
y = ""
id = ""
timeStamp = ""
for char in line:
if char == ",":
char = "."
if char == " ":
spaceCount += 1
elif spaceCount == 0:
timeStamp += char
elif spaceCount == 1:
x += char
elif spaceCount == 2:
y += char
elif spaceCount == 7 and char != "\n":
id += char
fx = float(x)
fy = float(y)
timeStamp = float(timeStamp)
return fx, fy, id, timeStamp
|
def get_python(spec='general'):
"""
contains skills required for python developers
:return: skills and advanced_skills
"""
# user can ask for specific info or general info
# general
skills = ["Flask","Django","Machine Learning","Artificial Intelligence","SQL","MongoDB",
"REST","Deep Learning","Selenium","Tkinter"]
advanced_skills = ['Blockchain', 'AWS', 'Big Data', 'Automation', 'Microservices']
# specific info ==================================================================================
python_build_tools = ['BuildBot', 'PyMake']
web_tech = ['HTML', 'CSS', 'Javascript']
web_frameworks = ['Flask','Django']
app_containers = ['python-docker-client','python-kuebrnetes-client']
automation_tools = ['Selenium','Robot','Pytest']
machine_learning = ['Regression','Classification','Clustering','ANN','CNN','RNN']
code_version_control = ['GitHub']
# ===============================================================================================
name_var_dict = [
{'pythonbuildtools': python_build_tools},
{'webtechnologies': web_tech},
{'webframeworks': web_frameworks},
{'applicationcontainers': app_containers},
{'pythonautomationtools': automation_tools},
{'machinelearning': machine_learning},
{'codeversioncontrol': code_version_control}
]
if spec is None:
return skills # if nothing else is passed
if spec.lower() == 'general':
return skills, advanced_skills
else:
for key in name_var_dict:
temp = spec.lower().strip().replace(" ", "")
if temp in list(key.keys())[0]:
idx = name_var_dict.index(key)
return name_var_dict[idx][list(key.keys())[0]]
|
def is_string_like(maybe):
"""Test value to see if it acts like a string"""
try:
maybe+""
except TypeError:
return False
return True
|
def sanitize_message(message):
"""Remove all characters which might break a commit message
:param message: string containing the AccuRev promote message
:return: the string without any non-ascii characters
"""
if not message:
message = 'empty AccuRev promote message'
else:
message = message.strip('"').lstrip('\n')
# remove non-ascii characters
message = ''.join(i for i in message if ord(i) < 128)
return message
|
def checkOperatorPrecedence(a,b):
"""
0 if a's precedence is more than b operator
1 otherwise
"""
check={}
check['(']=3
check['^']=2
check['*']=2
check['/']=2
check['-']=1
check['+']=1
if check[a] <= check[b]:
return 1
else:
return 0
|
def FirstRest(tokens):
"""List utility; splits a /list/ robustly into first & rest.
Args:
tokens: the list of tokens.
Returns:
Tuple of first token, and list of remaining.
"""
assert isinstance(tokens, list)
if not tokens:
first = None
rest = []
else:
first = tokens[0]
rest = tokens[1:]
return first, rest
|
def nova_except_format_assert(logical_line):
"""Check for 'assertRaises(Exception'.
nova HACKING guide recommends not using assertRaises(Exception...):
Do not use overly broad Exception type
N202
"""
if logical_line.startswith("self.assertRaises(Exception"):
return 1, "NOVA N202: assertRaises Exception too broad"
|
def combine_usub_files(file_list):
""" Combine the user subroutine files in file_list to one file. Any
`!DEC$ FREEFORM` in the files are commented and `!DEC$ FREEFORM` is
added to the top of the file. Includes can still be used in these
files, so only files containing Abaqus subroutines should be in this
list.
:param file_list: List of files to be combined
:type file_list: List[ str ]
:returns: Name of the combined user subroutine file
:rtype: str
"""
combined_file_name = 'usubs_combined.for'
with open(combined_file_name, 'w') as fid:
fid.write('!DEC$ FREEFORM\n')
for file in file_list:
with open(file, 'r') as sub:
sub_str = sub.read()
sub_str = sub_str.replace('!DEC$ FREEFORM', '!!DEC$ FREEFORM')
fid.write('\n! Contents from ' + file + '\n')
fid.write(sub_str)
return combined_file_name
|
def isInitialWord(syllableCounter, phonemeCounter):
"""
:param syllableCounter:
:param phonemeCounter:
:return:
"""
if syllableCounter == 1 and phonemeCounter == 1:
return True
else:
return False
|
def parse_key_value_pairs(kv_pairs: str):
"""
Convert key value pairs from k1=v1,k2=v2 => {k1:v1,k2,v2} dict
:param kv_pairs:
:return:
"""
return {
kvs.split("=")[0]: kvs.split("=")[1] for kvs in kv_pairs.split(",") if kvs.find("=") >= 0
}
|
def scale_edge_weights(edge_weights, w):
""" Scales all the edge-weights described by a dictionary. """
edge_weights2 = {}
for (n1, n2) in edge_weights.keys():
edge_weights2[(n1, n2)] = w * edge_weights[(n1, n2)]
return edge_weights2
|
def split_filename_into_subdirs(filename,length=3):
"""Split `filename` into a path hierarchy, each path at most `length` characters long, and return the tuple `(paths,basename,n_subdirs)`."""
c=filename.split("/")
if len(c)>=2:
prefix="/".join(c[:-1])+"/"
else:
prefix=""
filename=c[-1]
c=filename.split(".")
if len(c)>=2:
ext=c[-1]
filename=".".join(c[:-1])
else:
ext=""
filename=filename
l=[filename[x:x+3] for x in range(0,len(filename),length)]
paths="/".join(l[:-1])
basename=l[-1]
if ext:
complete_basename=basename+"."+ext
else:
complete_basename=basename
n_subdirs=len(l)-1
if paths:
complete_paths=prefix+paths+"/"
else:
complete_paths=prefix
tup=(complete_paths,complete_basename,n_subdirs)
return tup
|
def get_value(specs, name):
""" Unpack metric specification
Parameters
----------
specs: dict
a dict with keys value and name
name: str
the spec name
Return
------
value: float or None
value of the spec if exists, None otherwise
"""
value = None
for s in specs:
if s['name'] == name:
value = s['value']
break
return value
|
def get_colour(series_options, series_name):
"""Return the graph colour for the series specified."""
if series_options is None:
return None
if series_name in series_options:
if "colour" in series_options[series_name]:
return series_options[series_name]["colour"]
return None
|
def _get_lim(q, qlim):
"""Gets the limits for a coordinate q."""
if qlim == 'range':
return min(q), max(q)
else:
return qlim
|
def compute_all_squares(of_range):
"""Compute the squares of all the natural numbers in a range."""
# Register the list of squares.
squares = []
for i in range(of_range):
squares.append(i*i)
return squares
|
def reaction_class(tra):
""" string describing the reaction class
"""
rxn_class, _, _ = tra
# assert par.is_reaction_class(rxn_class), (
# '{} is not an allowed reaction class'.format(rxn_class)
# )
return rxn_class
|
def model_has_predict_function(model):
"""
"""
return hasattr(model, 'predict')
|
def union(actual, predicted):
"""
This function computes the union between the ground truth list
and the predicted list
:param actual: list containing ground truths
:param predicted: list containing predictions
:return : un between actual and predicted lists"""
return set(actual).union(set(predicted))
|
def utf8(string):
"""
Make sure string is utf8 encoded bytes.
"""
if isinstance(string, bytes):
return string
else:
return string.encode('utf8')
|
def xor_hex_strings(str1, str2):
"""
Return xor of two hex strings.
An XOR of two pieces of data will be as random as the input with the most randomness.
We can thus combine two entropy sources in this way as a safeguard against one source being
compromised in some way.
For details, see http://crypto.stackexchange.com/a/17660
returns => <string> in hex format
"""
if len(str1) != len(str2):
raise Exception("tried to xor strings of unequal length") # pragma: no cover
str1_dec = int(str1, 16)
str2_dec = int(str2, 16)
xored = str1_dec ^ str2_dec
return "{:0{}x}".format(xored, len(str1))
|
def delete_tags(metadata, key):
"""Delete any tags present inside an image"""
try:
metadata.__delitem__(key)
except KeyError:
print(("There's not a %s tag in this image, exiting..." % key))
return 1
|
def get_lemma_pos_of_sensekey(sense_key):
"""
lemma and pos are determined for a wordnet sense key
>>> get_lemma_pos_of_sensekey('life%1:09:00::')
('life', 'n')
:param str sense_key: wordnet sense key
:rtype: tuple
:return: (lemma, n | v | r | a | u)
"""
if '%' not in sense_key:
return '', 'u'
lemma, information = sense_key.split('%')
int_pos = information[0]
if int_pos == '1':
this_pos = 'n'
elif int_pos == '2':
this_pos = 'v'
elif int_pos in {'3', '5'}:
this_pos = 'a'
elif int_pos == '4':
this_pos = 'r'
else:
this_pos = 'u'
return lemma, this_pos
|
def get_nFalsePositive(TP, atrank):
""" the number of documents we should not have retrieved """
# FP = min((atrank + 1) - TP, nGroundTruth)
nRetreived = atrank + 1
FP = nRetreived - TP
return FP
|
def pathToString(filepath):
"""
Coerces pathlib Path objects to a string (only python version 3.6+)
any other objects passed to this function will be returned as is.
This WILL NOT work with on Python 3.4, 3.5 since the __fspath__ dunder
method did not exist in those verisions, however psychopy does not support
these versions of python anyways.
:Parameters:
filepath : str or pathlib.Path
file system path that needs to be coerced into a string to
use by Psychopy's internals
:Returns:
filepath : str or same as input object
file system path coerced into a string type
"""
if hasattr(filepath, "__fspath__"):
return filepath.__fspath__()
return filepath
|
def check_status(result, numvals, errduples=None):
"""Check the status of a 'get' or 'set' operation"""
# If no errduples at all, don't test
if errduples is None:
return None
# If there are no errors expected, status must be None
numerrs = len(errduples)
if numerrs == 0:
if result['status'] is not None:
return "expected 'status' is None"
return None
# Otherwise, status must be a list with numerrs elements
if not isinstance(result['status'], list):
return "result['status'] is not a list"
if len(result['status']) != numerrs:
return "expected {} status entries".format(numerrs)
# Check all of the status values against the expected list
idx = 0
for stat in result['status']:
# Each status must contain these keys
if not "object" in stat:
return "'object' not found in status[{}]".format(idx)
if not "attr" in stat:
return "'attr' not found in status[{}]".format(idx)
if not "index" in stat:
return "'index' not found in status[{}]".format(idx)
if not "error" in stat:
return "'error' not found in status[{}]".format(idx)
# The error code should not be zero
if stat['error'] == 0:
return "status[{}]['error'] == 0".format(idx)
# There is no guarantee of the order in which values are returned
# because there is a mismatch between our pwrcmd implementation
# (a serialized list of objects in the group) and the actual
# group itself, which has an internal sorting order that may be
# different from the order in which we built the group. So we
# have to search the list of expected errors to find one that
# matches.
errdup = (stat['object'], stat['attr'])
for dup in errduples:
# If we find it, good
if dup == errdup:
break
else:
# We finished the for loop, not found
return "unexpected error in status[{}]".format(idx)
# Index location must be within range of returned values
index = stat['index']
if index not in range(0, numvals):
return "status[{}]['index'] out of range".format(idx)
idx += 1
return None
|
def vectorize_edge(edge, lemma_index, pos_index, dep_index, dir_index):
"""
Return a vector representation of the edge: concatenate lemma/pos/dep and add direction symbols
:param edge: the string representation of an edge
:param lemma_index: index to lemma dictionary
:param pos_index: index to POS dictionary
:param dep_index: index to dependency label dictionary
:param dir_index: index to edge direction dictionary
:return:
"""
try:
lemma, pos, dep, direction = edge.split('/')
lemma, pos, dep, direction = lemma_index.get(lemma, 0), pos_index[pos], dep_index[dep], dir_index[direction]
except:
return None
return tuple([lemma, pos, dep, direction])
|
def restaurant_bouncer(age:int, fully_vaxed:bool = False) -> bool:
"""
checks if the person is allowed to enter restaurant or not.
raises exception if age <= 0 and not int
raises exception if fully_vaxed is not a bool
:param age: (int) age of the individual
:param fully_vaxed: (bool) vaccination status, default is False
:return: (bool) True if allowed else False
"""
assert isinstance(age, int), 'age must be an int'
assert isinstance(fully_vaxed, bool), 'fully_vaxed must be a bool'
assert age >= 0, 'age cannot be negative'
if fully_vaxed or age < 18:
print('Allowed to enter restaurant')
return True
else:
print('Sorry, not allowed in')
return False
|
def format_time(t: float):
"""Return the string representation of t. t must be in *seconds*"""
if t >= 60:
return f"{round(t / 60.0, 2)} mins"
else:
return f"{round(t, 2)} secs"
|
def is_select(status):
"""Returns true if the first word in status is 'select'."""
if not status:
return False
return status.split(None, 1)[0].lower() == "select"
|
def obj_assert_check(cls):
"""
The body of the assert check for an accessor
We allow all versions of add/delete/modify to use the same accessors
"""
if cls in ["of_flow_modify", "of_flow_modify_strict",
"of_flow_delete", "of_flow_delete_strict",
"of_flow_add"]:
return "IS_FLOW_MOD_SUBTYPE(obj->object_id)"
else:
return "obj->object_id == %s" % cls.upper()
|
def handle_club(input: str) -> int:
"""
Transforma os valores dos club
DIAMANTE -> 1
OURO -> 2
PRATA -> 3
BRONZE -> 4
"""
if input == "DIAMANTE":
return 1
elif input == "OURO":
return 2
elif input == "PRATA":
return 3
elif input == "BRONZE":
return 4
raise ValueError(f"Club ({input}) desconhecido, verifique")
|
def upper_frequency(center, order=1):
"""
Upper frequency of frequency band given a center frequency and order.
:param centr: Center frequencies.
:param order: Fraction of octave.
.. math:: f_u = f_c \cdot 2^{\\frac{+1}{2N}}
"""
return center * 2.0**(+1.0/(2.0*order))
|
def _precursor_to_interval(mz: float, charge: int, interval_width: int) -> int:
"""
Convert the precursor m/z to the neutral mass and get the interval index.
Parameters
----------
mz : float
The precursor m/z.
charge : int
The precursor charge.
interval_width : int
The width of each m/z interval.
Returns
-------
int
The index of the interval to which a spectrum with the given m/z and
charge belongs.
"""
hydrogen_mass, cluster_width = 1.00794, 1.0005079
neutral_mass = (mz - hydrogen_mass) * charge
return round(neutral_mass / cluster_width) // interval_width
|
def replace(string, args):
"""
Provide a standard Python string replace in templates
Usage:
{{ my_variable|replace:"/foo/bar/" }}
"""
old = args.split(args[0])[1]
new = args.split(args[0])[2]
return string.replace(old, new)
|
def _int(data):
"""Convert byte data to big-endian int."""
return int.from_bytes(data, byteorder="big")
|
def _censor_password(connection_string):
"""
example connection string:
postgres://user:[email protected]:5439/dev
returns:
postgres://user:*****@host.com:5439/dev
"""
if (not connection_string) or ("@" not in connection_string):
return connection_string
split1 = connection_string.split("@")
split2 = split1[0].split(":")
if len(split2) != 3:
return connection_string
split2[-1] = "*****"
split2_join = ":".join(split2)
split1[0] = split2_join
split1_join = "@".join(split1)
return split1_join
|
def constrain_string(string, minlen, maxlen):
"""Validation function constrains minimal and maximal lengths of string.
Args:
string (string): String to be checked
minlen (int): Minimal length
maxlen (int): Maximal length
Returns:
string: Returns given string
Raises:
ValueError: If string doesn't fit into min/max constrains
"""
if len(string) < minlen:
raise ValueError('Input need to be at least %s characters long' % minlen)
elif len(string) > maxlen:
raise ValueError('Input need to be maximum %s characters long' % maxlen)
return string
|
def as_list(obj):
"""Wrap non-lists in lists.
If `obj` is a list, return it unchanged. Otherwise, return a
single-element list containing it.
"""
if isinstance(obj, list):
return obj
else:
return [obj]
|
def _fullname(o):
""" Get the fully qualified class name of an object. """
module = o.__class__.__module__
if module is None or module == str.__class__.__module__:
return o.__class__.__name__
return module + '.' + o.__class__.__name__
|
def get_columns(rows, headers):
"""Takes rows and length of headers to get columns based on 2D list index from rows"""
if headers:
columns = [[x[j] for x in rows] for j in range(len(headers))]
return columns
else:
raise Exception("Could not parse columns because of irregular headers")
|
def get_bytes_used(current_time, process_durations):
"""
Return bytes used at given time.
>>> get_bytes_used(12, [2, 3, 4])
13
>>> get_bytes_used(14, [2, 3, 4])
14
:type current_time: int
:param current_time: Array index
:type process_durations: list
:param process_durations: List containing process durations to exhaust 1 byte.
:return: bytes_used
:rtype: int
"""
bytes_used = 0
for p_time in process_durations:
if p_time > current_time:
# Since the array is sorted we can break early
# and avoid unnecessary calculations.
break
bytes_used += current_time // p_time
return bytes_used
|
def search_string_from_terms(include_terms, exclude_terms):
"""
"""
marked_exclude_terms = ['-' + term for term in exclude_terms or []]
search_term_list = list(include_terms) + marked_exclude_terms
search_string = ' '.join(search_term_list)
return search_string
|
def choose(n: int, k: int) -> int:
"""
Simple implementation of math.comb for python 3.7 compatibility.
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in range(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
|
def toscalar(t): # use on python scalars/pytorch scalars
"""Converts Python scalar or PyTorch tensor to Python scalar"""
if isinstance(t, (float, int)):
return t
if hasattr(t, 'float'):
t = t.float() # half not supported on CPU
if hasattr(t, 'item'):
return t.item()
else:
assert len(t) == 0
return t[0]
|
def format_reference(in_tuple):
"""
Format a spatial reference given as a tuple.
Examples
--------
>>> format_reference(('MNI152Lin', {'res': 1}))
'MNI152Lin_res-1'
>>> format_reference(('MNIPediatricAsym:cohort-2', {'res': 2}))
'MNIPediatricAsym_cohort-2_res-2'
"""
out = in_tuple[0].split(':')
res = in_tuple[1].get('res', None) or in_tuple[1].get('resolution', None)
if res:
out.append('-'.join(('res', str(res))))
return '_'.join(out)
|
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
"""User warning formatter"""
# pylint: disable=unused-argument
return '{}:{}: {}: {}\n'.format(filename, lineno, category.__name__, message)
|
def stringifyList(rowVal) :
"""
convert raw address value to string
"""
rowVal_ = rowVal
# print()
# log.debug('rowVal_ : %s', rowVal_)
# log.debug('type(rowVal_) is list : %s', type(rowVal_) is list)
if type(rowVal) is list :
rowVal_ = ", ".join(str(x) for x in rowVal)
return rowVal_
|
def still_has_slot(selected_recs, slots, group_id):
"""
judge whether a group still has slot
"""
max_slot = slots[group_id]
current_slot = 0
for (item, group) in selected_recs:
if group == group_id:
current_slot += 1
if current_slot < max_slot:
return True
else:
return False
|
def get_nested_keys(data):
"""Return key list for nested structure.
See: https://github.com/ducdetronquito/scalpl/issues/21#issue-700007850
"""
_null_key = object()
def nested_key(data):
if isinstance(data, list):
keys = []
for i in range(len(data)):
result = nested_key(data[i])
if isinstance(result, list):
if isinstance(data[i], dict):
keys.extend(["[%d].%s" % (i, item) for item in result])
elif isinstance(data[i], list):
keys.extend(["[%d]%s" % (i, item) for item in result])
elif result is _null_key:
keys.append("[%d]" % i)
return keys
elif isinstance(data, dict):
keys = []
for key, value in data.items():
result = nested_key(value)
if isinstance(result, list):
if isinstance(value, dict):
keys.extend(["%s.%s" % (key, item) for item in result])
elif isinstance(value, list):
keys.extend(["%s%s" % (key, item) for item in result])
elif result is _null_key:
keys.append("%s" % key)
return keys
else:
return _null_key
keys = nested_key(data)
if keys is not _null_key:
return keys
raise ValueError("invalid data type")
|
def remove_number(s):
"""change any number to word number. """
#print ("is number " + s)
try:
float(s)
return "number"
except ValueError:
try:
int(s)
return "number"
except ValueError:
return s
|
def remove_unicode(s):
"""Remove from 's' any unicode characters, including emoji."""
string_encode = s.encode("ascii", "ignore")
return string_encode.decode()
|
def GCD(x,y):
"""GCD(x:long, y:long): long
Return the GCD of x and y.
"""
x = abs(x) ; y = abs(y)
while x > 0:
x, y = y % x, x
return y
|
def _langs_with_no_scripts(lang_script_data):
"""Return a set of langs with no scripts in lang_script_data."""
return set([k for k in lang_script_data
if not (lang_script_data[k][0] or lang_script_data[k][1])])
|
def get_typename(cls):
"""Finds the typename string of the given ``cls``.
:param cls: the class object to find its typename
:type cls: :class:`type`
:returns: the typename
:rtype: :class:`basestring`
"""
mod = cls.__module__
if mod in ('__main__', '__builtin__'):
return cls.__name__
return mod + '.' + cls.__name__
|
def isiterable(obj, exclude=None):
"""
Returns True if a type is iterable,
or False otherwise.
Types passed to ``exclude`` will be
considered not iterable.
"""
if any(isinstance(obj, cls) for cls in exclude or ()):
return False
try:
iter(obj)
return True
except TypeError:
return False
|
def make_noisy_params_sweep_fname(tag: str, param_str: str):
"""Return a filename for noiseless parameter sweeps inputs.
Args:
tag: Some identifier string for this simulation result.
"""
return f"noisy_parameter_sweep_{tag}_{param_str}.npy"
|
def exp_map(val, lo, hi):
"""Exponential mapping."""
return pow(hi / lo, val) * lo
|
def build_ranges(page_list):
""" Eats a list of items, groups consecutive items in separate lists and
returns a list of lists """
page_list.append(-1)
output = []
last = int(page_list[0])-1
beginning = int(page_list[0])
for i in page_list:
i = int(i)
if i != last + 1:
output.append([str(beginning), str(last)])
beginning = i
last = i
page_list.pop()
return ",".join(["-".join(x) for x in output])
|
def compare_diversity(avg_dissim_algo, avg_dissim_control):
"""
Returns the percent change of diverity between normal recommendations vs. diverse recommendations
Parameters:
avg_dissim_algo (int): computed diversity of diverse recommendations
avg_dissim_control (int): computed diversity of normal recommendations
Returns:
int: percent change
"""
percent_change = ((avg_dissim_algo - avg_dissim_control)/avg_dissim_control)*100
return round(percent_change, 2)
|
def egg_drop_problem(num_eggs: int, num_floors: int) -> int:
"""
Time Complexity: O(n*k*k)
Space Complexity: (n*k)
"""
dp = [[0] * (num_floors + 1) for _ in range(num_eggs + 1)]
for floor in range(1, num_floors + 1):
dp[1][floor] = floor
for eggs in range(1, num_eggs + 1):
dp[eggs][1] = 1
for eggs in range(2, num_eggs + 1):
for floor in range(2, num_floors + 1):
drops = num_floors
for k in range(1, floor):
# 1. breaks: remaining floors = k - 1 & remaining eggs = eggs - 1
# 2. doesn't break: floors = floor - k & remaining eggs = eggs
drops = min(drops, 1 + max(dp[eggs - 1][k - 1], dp[eggs][floor - k]))
dp[eggs][floor] = drops
return dp[num_eggs][num_floors]
|
def type2tag(target_type):
"""Given the target type, return the list of TARGET_TAGS to parameterize Freedom E SDK"""
if "arty" in target_type or "vc707" in target_type or "vcu118" in target_type:
tags = "fpga openocd"
elif "hifive1-revb" in target_type:
tags = "board jlink"
elif "rtl" in target_type:
tags = "rtl"
elif "spike" in target_type:
tags = "spike"
elif "qemu" in target_type:
tags = "qemu"
else:
tags = "board openocd"
return tags
|
def get_force_datakeeper(self):
"""
Generate DataKeepers to store by default results from force module
Parameters
----------
self: VarLoadFlux object
Returns
-------
dk_list: list
list of DataKeeper
"""
dk_list = []
return dk_list
|
def join_positions(pos1,pos2):
""" Merge two positions and return as a list of strings
pos1: iterable object containing the first positions data
pos2: iterable object containing the second positions data
Example:
>>> join_positions('ABCD','1234')
['A1', 'B2', 'C3', 'D4']
"""
return [''.join([r1,r2]) for r1,r2 in zip(pos1,pos2)]
|
def _is_temp_garbage(filename: str):
"""Is this a Microsoft Office temp file?"""
return filename.startswith("~$") or filename.endswith(".tmp")
|
def get_argument_tabstops(arguments):
"""str: Convert the given function arguments into an UltiSnips-style snippet."""
# UltiSnips reservses the "0" tabstop for its own use. User tabstops start at 1
offset = 1
return ['${{{tabstop}:{name}}}'.format(tabstop=index + offset, name=name)
for index, name in enumerate(arguments)]
|
def parse_user_id_from_banner(banner_url: str) -> str:
"""Parses the users id from the users banner photo url.
The user id can only be parsed from the banner photos url.
Example:
```
/pic/profile_banners%2F2474416796%2F1600567028%2F1500x500 -> 2474416796
^ ^
| |
----------
user id section in banner link
```
Args:
banner_url: URL of the profiles banner photo.
Returns:
The target profiles user id.
"""
return banner_url.split("%2F")[1]
|
def str_username(attending_username):
"""Determine if the input username is valid and meaningful
Check if the attending's username contain any number and
return corresponding results
Args:
attending_username(str): the username of target attending
Returns:
A string that states the attending's username is invalid
due to the numeric elements it contains, or a bool
variable True that indicates the username is valid.
"""
import re
if bool(re.search(r'\d', attending_username)) is False:
return True
else:
return "Please enter a valid username string with no numbers!"
|
def _rca_division(val1, val2, val3, val4):
"""Multi-step division."""
return (val1 / val2) / (val3 / val4)
|
def first_not_in_set(seq, items):
"""Returns first occurrence of any of items in seq, or None."""
for s in seq:
if not s in items:
return s
|
def get_auto_scaling_group_name(cluster):
"""Get the name for a cluster's auto scaling group.
Args:
cluster
The name of a cluster.
Returns:
The auto scaling group's name.
"""
return str(cluster) + "--ecs-cluster-auto-scaling-group"
|
def findPoisonedDuration(timeSeries, duration):
"""
:type timeSeries: List[int]
:type duration: int
:rtype: int
"""
n = len(timeSeries)
if n == 0:
return 0
head = 0
totalTime = 0
for i in range(1, n):
d = timeSeries[i] - timeSeries[i - 1]
if d > duration:
totalTime += timeSeries[i - 1] - timeSeries[head] + duration
head = i
totalTime += timeSeries[n - 1] - timeSeries[head] + duration
return totalTime
|
def integer_root(n,k):
"""integer kth root of n"""
# from https://stackoverflow.com/questions/15978781/how-to-find-integer-nth-roots/15979957:
# Newton's method to find kth root of n
u, s = n, n+1
while u < s:
s = u
t = (k-1) * s + n // pow(s, k-1)
u = t // k
return s
|
def to_literal(typ, always_tuple=False):
"""Convert a typestruct item to a simplified form for ease of use."""
def expand(params):
return (to_literal(x) for x in params)
def union(params):
ret = tuple(sorted(expand(params), key=str))
if len(ret) == 1 and not always_tuple:
ret, = ret # pylint: disable=self-assigning-variable
return ret
tag, params = typ
if tag == 'prim':
return params
elif tag == 'tuple':
vals = tuple(expand(params))
return (tag, *vals)
elif tag == 'map':
k, v = params
return (tag, union(k), union(v))
else:
return (tag, union(params))
|
def idx2subs(idx_list):
"""Given a list idx_list of index values for each dimension of an array,
idx2subs() returns a list of the tuples of subscripts for all of the
array elements specified by those index values.
Note: This code adapted from that posted by jfs at
https://stackoverflow.com/questions/533905/get-the-cartesian-product-
of-a-series-of-lists"""
if not idx_list:
return [()]
return [items + (item,)
for items in idx2subs(idx_list[:-1]) for item in idx_list[-1]]
|
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
|
def transfer_ncdm(k, alpha, beta, gamma=5.):
"""The non-cold dark amtter transfer function as in Murgia(2018) and Stucker (2021)"""
return (1. + (alpha*k)**beta)**(-gamma)
|
def create_query_str(keywords, sources):
"""
Creates the query string to search for `keywords` from `sources`.
Arguments
----------
keywords: ([string]), list of keywords to search for.
sources: ([string]), list of news sources to find articles from.
"""
str_builder = keywords.copy()
for source in sources:
str_builder.append('inurl:' + source)
str_builder.append('OR')
if len(str_builder) > 0 and str_builder[-1] == 'OR':
del str_builder[-1]
return ' '.join(str_builder)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.