content
stringlengths 42
6.51k
|
---|
def _int_from_bool(boolean_value):
"""Converts a boolean value to an integer of 0 or 1."""
if boolean_value:
return 1
return 0
|
def _mesh_export_format_from_filename(filename):
"""
Determine a mesh output format based on a file name.
Determine a mesh output format based on a file name. This inspects the file extension.
Parameters
----------
filename: string
A file name, may start with a full path. Examples: 'brain.obj' or '/home/myuser/export/brain.ply'. If the file extension is not a recognized extension for a supported format, the default format 'obj' is returned.
Returns
-------
format: string
A string defining a supported mesh output format. One of ('ply', 'obj').
matched: Boolean
Whether the file name ended with a known extension. If not, the returned format was chosen because it is the default format.
"""
if filename.endswith('.ply'):
return 'ply', True
elif filename.endswith('.obj'):
return 'obj', True
else:
return 'obj', False
|
def convert_option_name_to_environment_variable_name(option_name: str) -> str:
"""Convert given option name to uppercase, replace hyphens with underscores, and add "BARRIER_" prefix."""
return f"BARRIER_{option_name.upper().replace('-', '_')}"
|
def non_FA(alt_img_root):
"""Wrapper for the ``tbss_non_FA`` command.
e.g.: ``tbss_non_FA("L2")``
"""
return ["tbss_non_FA", alt_img_root]
|
def get_compatible_name(name_str):
"""
Converts given string to a valid Maya string
:param name_str: str
:return: str
"""
return ''.join([c if c.isalnum() else '_' for c in name_str])
|
def csnl_to_list(csnl_str):
"""Converts a comma-separated and new-line separated string into
a list, stripping whitespace from both ends of each item. Does not
return any zero-length strings.
"""
s = csnl_str.replace('\n', ',')
return [it.strip() for it in s.split(',') if it.strip()]
|
def is_route_bfs(graph, node1, node2):
"""Determine if there is a route between two nodes."""
queue = [node1]
visited_nodes = set()
while queue:
current_node = queue.pop(0)
for child_node in graph[current_node]:
if child_node not in visited_nodes:
if child_node == node2:
return True
queue.append(child_node)
visited_nodes.add(current_node)
return False
|
def sort_string(text: str) -> str:
"""Sort string punctuation, lowercase and then uppercase."""
return ''.join(sorted([x for x in text], key=str.swapcase))
|
def recall_at_n(asins: list, predicted_asins: list) -> float:
"""
Args:
asins ([list]):
predicted_asins ([list]):
"""
# number of relevant items
set_actual = set(asins)
set_preds = set(predicted_asins)
num_relevant = len(set_actual.intersection(set_preds))
# calculating recall@N - relevant / total relevant items
recall_at_n = num_relevant / len(asins)
return recall_at_n
|
def find_char(char, secret, lst):
"""Takes a character from given string,
shifts it secret times and return the
character from lst depending on case of
the taken character."""
pos = lst.index(char)
offset = (pos + secret) % 26
ch = lst[offset]
return ch
|
def make_null_array(n):
"""
Return list of n empty lists.
"""
a = []
for i in range(n):
a.append([])
return(a)
|
def signbit(x):
""" Returns 1 if x<0"""
return int(x < 0)
|
def bisect_right(a, x, lo=0, hi=None):
"""Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, a.insert(x) will
insert just after the rightmost x already there.
Optional args lo (default 0) and hi (default len(a)) bound the
slice of a to be searched.
"""
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
|
def total_intake_calories(bmr, activity_level):
"""
Calculate the Total Intake Calories based
on activity level
:param bmr: value from bmr function
:param activity_level: reference level, 0 - sedentary, 1 - low, 2 - moderate, 3 - high
:return: average target intake calories
"""
ref_activities = [1.2, 1.4, 1.65, 1.95]
return bmr * ref_activities[activity_level]
|
def func_h(b, n, p, x_i):
"""
b_(i+1) = func_g(b_i, x_i)
"""
if x_i % 3 == 2:
return (b + 1) % n
elif x_i % 3 == 0:
return 2*b % n
elif x_i % 3 == 1:
return b
else:
print("[-] Something's wrong!")
return -1
|
def tuple_diff(t1, t2):
"""Elementwise addition of two tuples"""
return tuple( t1[i] - t2[i] for i in range(len(t1)) )
|
def parse_masked_mac(s): # pylint: disable=invalid-name
"""
parse mac and mask string. (xx:xx:..:xx/yy:yy:...:yy)
"""
items = s.split("/", 1)
if len(items) == 1:
return items[0], "ff:ff:ff:ff:ff:ff"
elif len(items) == 2:
return items[0], items[1]
else:
raise SyntaxError(s)
|
def _ShouldGenerateVM(options):
"""Returns true if we will need a VM version of our images."""
# This is a combination of options.vm and whether or not we are generating
# payloads for vm testing.
return options.vm and (options.basic_suite or options.full_suite)
|
def correct(old_txt):
"""
correct() sets to 0 the atom valence indication from the mol block in old_text and
returns the corrected text
"""
new_lines = []
# initialize list of corrected lines in old_txt
old_lines = old_txt.split('\n')
# get lines from old_txt
creator_line = old_lines[1].strip()
# line 2, indexed 1 in old_txt, starts with text producer
creator = creator_line.split()[0]
# get creatot of old_txt
if creator != 'RDKit':
# creator of old_txt is not RDKit
return ''
# denies text correction
count_line = old_lines[3].rstrip()
# line 4, indexed 3 in old_txt, gives the number of atom lines and the file type (V2000 or V3000)
if 'V2000' not in count_line:
# file type is not V2000
return ''
# denies text correction
numAtom = int(count_line[0:3])
# number of atoms is the first field of the count line
for i in range(0, 4):
# loop over lines of old_txt preamble
new_lines.append(old_lines[i])
# no change for preamble lines
for i in range(4, 4+numAtom):
# loop over atom lines
line = old_lines[i]
# get current atom line
pieces = line.split()
# put current atom line into pieces
valence = pieces[9]
# valence in the 10th field, indexed 9
if valence != '0':
# a non-zero valence was set, atom is charged
line = line[0:48]+' 0'+line[51:]
# line surgery to remove valence indication
new_lines.append(line)
# accounts for the modified atom line
for i in range(4+numAtom, len(old_lines)):
# loop over the lines that follow the atom lines
new_lines.append(old_lines[i])
# no change for the lines that follow the atom lines
new_txt = '\n'.join(new_lines)
# rebuild the text with modified atom lines
return new_txt
|
def separate(sep, extractions):
"""Extracts information from an existing dictionary and returns a list of
seperate dictionaries.
"""
extracted = []
for k in extractions:
tmp_d = {}
tmp_d[k]=sep[k]
extracted.append(tmp_d)
return extracted
|
def lcm(num1, num2):
"""Returns the lowest common multiple of two given integers."""
temp_num = num1
while (temp_num % num2) != 0:
temp_num += num1
return temp_num
|
def error_analysis(err_train, err_test, threshold=0.2):
"""
Using a threshold to flag when there is over/under-fitting.
Default: 0.20
"""
if (err_test - err_train) > threshold:
print("WARNING: OVER-fitting")
return False
if err_train > threshold:
print("WARNING: UNDER-fitting")
return False
return True
|
def reverse(text: str):
"""
Returns the `text` in reverse order.
"""
return text[::-1]
|
def skipExport(item, all, metadataProperty):
"""
Determine whether a particular item should be exported as part of this export run.
:param item: the item we're checking
:param all: whether or not we're exporting all or recent items
:param metadataProperty: the metadata property that acts as a flag to check previous exports
"""
return not all and item.get('meta', {}).get(metadataProperty)
|
def nested_dict_to_list_of_tuples(d, separator='.', extra_nesting=False):
""" Take a 2-level nested dict of dict to list of tuples.
to preserve some structure, the keys from the two levels are
joined with the separator string.
If extra_nesting is true, the inner items are wrapped in an extra
tuple.
This function is mainly to support use with my black box tuning code,
which represents the search space as key-tuple(value) pairs."""
l = []
for outer_key, outer_value in d.items():
assert separator not in outer_key, 'Please try another separator!'
for inner_key, inner_value in outer_value.items():
assert separator not in inner_key, 'Please try another separator!'
if extra_nesting:
inner_value = [inner_value]
l.append((outer_key + separator + inner_key, inner_value))
return l
|
def in_nested_list(my_list, item):
"""
Determines if an item is in my_list, even if nested in a lower-level list.
"""
if item in my_list:
return True
else:
return any(in_nested_list(sublist, item) for sublist in my_list if isinstance(sublist, list))
|
def normalize_path(path):
"""
Normalizes provided path to: /something/something/something
:param path: string path to normalize
:return: string normalized path
"""
while path[len(path) - 1] == ' ' or path[len(path) - 1] == '/':
path = path[:len(path) - 1]
return path
|
def _build_key(host, port):
"""Build a key based upon the hostname or address supplied."""
return "%s:%s" % (host, port)
|
def minRemoveToMakeValid(s):
"""
:type s: str
:rtype: str
"""
opens, closes = [], []
for i in range(len(s)):
c = s[i]
if c == '(':
opens.append(i)
elif c == ')':
if len(opens) == 0:
closes.append(i)
else:
opens.pop()
hs = set(opens+closes)
return ''.join(s[i] for i in range(len(s)) if i not in hs)
|
def to_str(text, session=None):
"""
Try to decode a bytestream to a python str, using encoding schemas from settings
or from Session. Will always return a str(), also if not given a str/bytes.
Args:
text (any): The text to encode to bytes. If a str, return it. If also not bytes, convert
to str using str() or repr() as a fallback.
session (Session, optional): A Session to get encoding info from. Will try this before
falling back to settings.ENCODINGS.
Returns:
decoded_text (str): The decoded text.
Note:
If `text` is already str, return it as is.
"""
if isinstance(text, str):
return text
if not isinstance(text, bytes):
# not a byte, convert directly to str
try:
return str(text)
except Exception:
return repr(text)
default_encoding = (
session.protocol_flags.get("ENCODING", "utf-8") if session else "utf-8"
)
try:
return text.decode(default_encoding)
except (LookupError, UnicodeDecodeError):
for encoding in ["utf-8", "latin-1", "ISO-8859-1"]:
try:
return text.decode(encoding)
except (LookupError, UnicodeDecodeError):
pass
# no valid encoding found. Replace unconvertable parts with ?
return text.decode(default_encoding, errors="replace")
|
def group_instance_count_json(obj):
"""
Test lifecycle parser
>>> group_instance_count_json(json.loads('{ "AutoScalingGroups": [ { "Instances": [ { "LifecycleState": "InService" } ] } ] }'))
1
"""
if not obj['AutoScalingGroups']:
return 0
instances = obj['AutoScalingGroups'][0]['Instances']
in_service = 0
for instance in instances:
if instance['LifecycleState'] == 'InService':
in_service += 1
return in_service
|
def lfff_name(lt):
"""Create mch-filename for icon ctrl run for given leadtime.
Args:
lt (int): leadtime
Returns:
str: filename of icon output simulation in netcdf, following mch-convention
"""
hour = int(lt) % 24
day = (int(lt) - hour) // 24
remaining_s = round((lt - int(lt)) * 3600)
sec = int(remaining_s % 60)
mm = int((remaining_s - sec) / 60)
return f"lfff{day:02}{hour:02}{mm:02}{sec:02}.nc"
|
def set_pairing_bit_on_device_type(pairing_bit: bool, device_type: int):
"""
Shifts the pairing bit (True = 1, False = 0) 7 bits to the left and adds
the device type
"""
return (pairing_bit << 7) + device_type
|
def cmp_dicts(dict1, dict2):
"""
Returns True if dict2 has all the keys and matching values as dict1.
List values are converted to tuples before comparing.
"""
result = True
for key, v1 in dict1.items():
result, v2 = key in dict2, dict2.get(key)
if result:
v1, v2 = (tuple(x) if isinstance(x, list) else x for x in [v1, v2])
result = (v1 == v2)
if not result:
break # break for key, v1
return result
|
def unpack_libraries(libraries):
""" Given a list of libraries returns url """
if libraries:
return libraries[0].get('url')
|
def isstr(obj):
"""
isstr
"""
return isinstance(obj, str)
|
def convert_to_signed_int_16_bit(hex_str):
"""
Utility function to convert a hex string into a 16 bit signed hex integer value
:param hex_str: hex String
:return: signed 16 bit integer
"""
val = int(hex_str, 16)
if val > 0x7FFF:
val = ((val+0x8000) & 0xFFFF) - 0x8000
return val
|
def calc_share_ratios(list_prio, cores):
""" Basic Shares calculator"""
# Shares calculation logic
max_shares_per_app = 100
# collect all high priority apps
shares_per_app = None
if len(list_prio) <= cores:
total_shares = sum([r[2] for r in list_prio])
shares_per_app = [r[2]/total_shares for r in list_prio]
print(total_shares, shares_per_app)
elif len(list_prio) > cores:
# we have more apps to run than core
# Option 1 assume all High priority apps have the same shares
# Hence all of them will run under the same limit and
# linux scheduler can take care of them
shares_per_app = [max_shares_per_app/(max_shares_per_app*cores)]*cores
# we are done we can return shares_per_app
return shares_per_app
|
def dissoc_deep(obj, *ks):
""" Return a copy of obj without k """
from copy import deepcopy
obj = deepcopy(obj)
for k in ks:
try: # pragma: no cover
del obj[k]
except (KeyError, IndexError): # pragma: no cover
pass
return obj
|
def ex_euclid(a, b):
"""
The extended Euclidean algorithm yields the
gcd of inputs a and b, and also two numbers
x and y such that a*x + b*y = gcd(a,b).
"""
last_remainder = a
current_remainder = b
last_s = 1
current_s = 0
last_t = 0
current_t = 1
while current_remainder > 0:
quotient, new_remainder = divmod(last_remainder, current_remainder)
new_s = last_s - quotient*current_s
new_t = last_t - quotient*current_t
current_remainder, last_remainder = new_remainder, current_remainder
current_s, last_s = new_s, current_s
current_t, last_t = new_t, current_t
return last_remainder, last_s, last_t
|
def filter_fn(filename):
"""Filter interesting coverage files.
>>> filter_fn('z3c.coverage.__init__.cover')
True
>>> filter_fn('z3c.coverage.tests.cover')
False
>>> filter_fn('z3c.coverage.tests.test_foo.cover')
False
>>> filter_fn('z3c.coverage.ftests.test_bar.cover')
False
>>> filter_fn('z3c.coverage.testing.cover')
True
>>> filter_fn('z3c.coverage.testname.cover')
True
>>> filter_fn('something-unrelated.txt')
False
>>> filter_fn('<doctest something-useless.cover')
False
"""
parts = filename.split('.')
return (filename.endswith('.cover') and
not filename.startswith('<') and
'tests' not in parts and
'ftests' not in parts)
|
def reverse(insts):
"""
Reverse the order of instances.
This function should be passed to :meth:`InstanceSet.select` without any
argument.
"""
return list(reversed(insts))
|
def is_valid_xvg(ext):
""" Checks if file is XVG """
formats = ['xvg']
return ext in formats
|
def parse_headers(arg):
"""Parse headers argument"""
if not arg:
return None
return dict(val.split(':', 1) for val in arg.split(','))
|
def term_precision(reference, test):
"""
Given a set of reference values and a set of test values, return
the fraction of test values that appear in the reference set.
In particular, return card(``reference`` intersection ``test``)/card(``test``).
If ``test`` is empty, then return None.
:type reference: set
:param reference: A set of reference values.
:type test: set
:param test: A set of values to compare against the reference set.
:rtype: float or None
"""
if (not hasattr(reference, 'intersection') or
not hasattr(test, 'intersection')):
raise TypeError('reference and test should be sets')
if len(test) == 0:
return None
else:
return round(len(reference.intersection(test)) / len(test), 3)
|
def transpose_loggraph(loggraph_dict):
"""Transpose the information in the CCP4-parsed-loggraph dictionary
into a more useful structure."""
columns = loggraph_dict["columns"]
data = loggraph_dict["data"]
results = {}
# FIXME column labels are not always unique - so prepend the column
# number - that'll make it unique! PS counting from 1 - 01/NOV/06
new_columns = []
j = 0
for c in columns:
j += 1
col = "%d_%s" % (j, c)
new_columns.append(col)
results[col] = []
nc = len(new_columns)
for record in data:
for j in range(nc):
results[new_columns[j]].append(record[j])
return results
|
def format_seconds(s):
"""
Format a seconds value into a human-readable form
"""
years, s = divmod(s, 31556952)
min, s = divmod(s, 60)
h, min = divmod(min, 60)
d, h = divmod(h, 24)
return '%sy, %sd, %sh, %sm, %ss' % (years, d, h, min, s)
|
def _normalize(line):
"""
Deal with lines in which spaces are used rather than tabs.
"""
import re
SPC = re.compile(' +')
return re.sub(SPC, '\t', line)
|
def get_function_object(obj):
"""
Objects that wraps function should provide a "__numba__" magic attribute
that contains a name of an attribute that contains the actual python
function object.
"""
attr = getattr(obj, "__numba__", None)
if attr:
return getattr(obj, attr)
return obj
|
def combine_counts(counts1, counts2):
"""
Combine two counts dictionaries.
"""
ret = counts1
for key, val in counts2.items():
if key in ret:
ret[key] += val
else:
ret[key] = val
return ret
|
def so_to_dict(sqlobj):
"""Convert SQLObject to a dictionary based on columns."""
d = {}
if sqlobj is None:
return d # stops recursion
for name in sqlobj.sqlmeta.columns.keys():
d[name] = getattr(sqlobj, name)
d['id'] = sqlobj.id # id must be added explicitly
if sqlobj._inheritable:
d.update(so_to_dict(sqlobj._parent))
d.pop('childName')
return d
|
def parse_performance_timing(p_timing):
"""Changes performance timing results to deltas."""
ns = p_timing["navigationStart"]
return {k: v - ns if v else 0 for k, v in p_timing.items()}
|
def int_to_alpha(num):
"""
Encode an integer (0-26) into alpha characters, useful for sequences of
axes/figures.
Parameters
-----------
int : :class:`int`
Integer to encode.
Returns
---------
:class:`str`
Alpha-encoding of a small integer.
"""
alphas = [chr(i).lower() for i in range(65, 65 + 26)]
return alphas[num]
|
def validate_number(x) -> bool:
"""Validates that the input is a number"""
if x.isdigit():
return True
elif x == "":
return True
else:
return False
|
def to_mb(val, update_interval=None):
"""Convert bytes to MB with 2 decimals"""
tmp = 1
if update_interval:
tmp = 1/update_interval
return "{:0.2f}".format((val / 1024 / 1024) * tmp)
|
def allcap_differential(words):
"""
Check whether just some words in the input are ALL CAPS
:param list words: The words to inspect
:returns: `True` if some but not all items in `words` are ALL CAPS
"""
is_different = False
allcap_words = 0
for word in words:
if word.isupper():
allcap_words += 1
cap_differential = len(words) - allcap_words
if cap_differential > 0 and cap_differential < len(words):
is_different = True
return is_different
|
def ccw(x, y, xm, ym, nx, ny):
"""
Determines if redrawn points are counter clockwise in cross-section
Parameters
----------
x : x-coordinates of cross-section
y : y-coordinates of cross-section
xm : x[i-1]
ym : y[i-1]
nx : new x-coordinate
ny : new y-coordinate
Returns
-------
ccw : Array of bools indicating which new points are counter clockwise
"""
return (x - xm) * (ny - ym) > (y - ym) * (nx - xm)
|
def ArgumentCountException(function: str, args: int, count: int) -> bool:
"""
Checks if the correct number of arguments were given to a specific command.
:param function: The display name given to the function making the call.
:param args: The number of arguments passed into the command.
:param count: The expected number of arguments for this command.
:return: Returns a boolean as a result of the count lookup.
"""
name = ArgumentCountException.__name__
if args != count:
print(f'{function} {name}: Incorrect Number of Arguments. Expected {count}, got {args}.')
return True
return False
|
def _is_boolish(b):
"""
Check if b is a 1, 0, True, or False.
"""
if (b == 1) or (b == 0) or (b == True) or (b == False):
return True
else:
return False
|
def test_if_between(a, b, test_val):
"""Returns True is test_val is between a and b"""
if a < b:
return a <= test_val <= b
else:
return b <= test_val <= a
|
def run_args_from_settings(settings):
"""
Build valid list of run arguments for spk_run.py based on a settings dict.
"""
# basic settings
run_args = [
settings["script"],
settings["mode"],
settings["representation"],
settings["dataset"],
settings["dbpath"],
settings["modeldir"],
"--split",
*settings["split"],
"--property",
settings["property"],
"--max_epochs",
settings["max_epochs"],
"--checkpoint_interval",
settings["checkpoint_interval"],
"--keep_n_checkpoints",
settings["keep_n_checkpoints"],
]
# optional settings
if settings["derivative"] is not None:
run_args += ["--derivative", settings["derivative"]]
if settings["negative_dr"]:
run_args += ["--negative_dr"]
if settings["contributions"] is not None:
run_args += ["--contributions", settings["contributions"]]
if settings["output_module"] is not None:
run_args += ["--output_module", settings["output_module"]]
# string cast
run_args = [str(arg) for arg in run_args]
return run_args
|
def count_leading(data, char):
"""Count number of character (char) in the beginning of string (data) """
for i in range(len(data)):
if data[i] != char:
return i
return len(data)
|
def calculate_distance(a, b):
"""Helper function to calculate the distance between node a and b."""
(x1, y1) = a
(x2, y2) = b
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
|
def cut_lines_to_n(string, n = 200):
"""
Takes a string and breaks it into lines with <= n characters per line.
Useful because psql queries have to have < 212 chars per line.
"""
out = []
lines = string.split("\n")
for line in lines:
newline = ""
words = line.split(" ")
for word in words:
if len(newline + word) + 1 > n:
out.append(newline.strip())
newline = word
else:
newline += " " + word
out.append(newline)
return "\n".join(out)
|
def services_by_user(account, days_back, user_arn, user_name):
""" Returns query for eventsource (service) / actions performed by user. """
query_string = f"""SELECT DISTINCT eventsource, eventname FROM behold
WHERE account = '{account}'
AND (useridentity.arn = '{user_arn}')
AND from_iso8601_timestamp(eventtime) > date_add('day', -{days_back}, now())
ORDER BY eventsource, eventname;"""
return (query_string, f"athena_results/services_by_user/{account}/{user_name}")
|
def filter_current_symbol(view, point, symbol, locations):
"""
Filter the point specified from the list of symbol locations. This
results in a nicer user experience so the current symbol doesn't pop up
when hovering over a class definition. We don't just skip all class and
function definitions for the sake of languages that split the definition
and implementation.
"""
def match_view(path, view):
fname = view.file_name()
if fname is None:
if path.startswith('<untitled '):
path_view = view.window().find_open_file(path)
return path_view and path_view.id() == view.id()
return False
return path == fname
new_locations = []
for l in locations:
if match_view(l[0], view):
symbol_begin_pt = view.text_point(l[2][0] - 1, l[2][1])
symbol_end_pt = symbol_begin_pt + len(symbol)
if point >= symbol_begin_pt and point <= symbol_end_pt:
continue
new_locations.append(l)
return new_locations
|
def solve(N):
"""Solve an individual problem."""
if N==0:
return 0
elif N<=4:
return 1
else:
return (N-4)//2 + 1 + N%2
|
def get_topic(domain_class: type) -> str:
"""
Returns a string describing a class.
:param domain_class: A class.
:returns: A string describing the class.
"""
return (
domain_class.__module__
+ "#"
+ getattr(domain_class, "__qualname__", domain_class.__name__)
)
|
def l_out(l_in: int, padding: int, dilation: int, kernel: int, stride: int) -> int:
"""
Determine the L_out of a 1d-CNN model given parameters for the 1D CNN
:param l_in: length of input
:param padding: number of units to pad
:param dilation: dilation for CNN
:param kernel: kernel size for CNN
:param stride: stride size for CNN
:return:
"""
return (l_in + 2 * padding - dilation * (kernel - 1) - 1) // stride + 1
|
def b(s, encoding="utf-8"):
""" bytes/str/int/float type to bytes """
if isinstance(s, bytes):
return s
elif isinstance(s, (str, int ,float)):
return str(s).encode(encoding)
else:
raise TypeError(type(s))
|
def reduce(l):
"""
Args:
Rangelist: generic list of tuples (can be either float or int tuple)
Return:
Reduced int list based on average value of every tuple in input tuple list
"""
result = []
for s in l:
midVal = abs(float(s[0]) - float(s[1])) / 2.0
result.append(midVal)
return result
|
def count_hosts(meetings):
""" Dedicated fuction to only count
specific hosts
"""
# Create an empty dictionary for hosts
host_dict = dict()
for item in meetings['items']:
# Check if the host is already counted
if item['hostEmail'] not in host_dict.keys():
host_dict[item['hostEmail']] = 1
else:
host_dict[item['hostEmail']] += 1
return host_dict
|
def pop_listtype(args, fname, ptype):
""" Reads argument of type from args """
if len(args) > 0 and isinstance(args[0], list):
if args[0][0] == fname:
p = ptype()
p.parse(args.pop(0)[1:])
return p
return None
|
def convert_trunc(raw):
"""
Convert the turnc value to a tuple of (True, False) to indicate if it is 5'
or 3' truncated.
"""
if isinstance(raw, tuple):
return raw
if raw == "no":
return (False, False)
if raw == "5'":
return (True, False)
if raw == "3'":
return (False, True)
if raw == "5'&3'":
return (True, True)
raise Exception("Unknown yes/no %s" % raw)
|
def organiser_name(meta):
"""Get name from organiser text."""
v = meta.get('organiser') or ''
assert type(v) in (dict, str), f"{type(v)} found"
if type(v) == dict:
return v.get('name', '')
return v
|
def meter_reading(m3,dl):
"""
Parameters:
-----------
m3: int
dl: list of int
Return:
-------
vol_m3: float
volume in m3 from the dial and gauges values
/!\ The nature of gauges means the gauge will show the next value early
converting requires to take 1 away from digit if digit for smaller gauge is 9
Example: reading gauges in order 2* 1/10 m3 + 9 *1/100 m3 = 0.19m3
"""
#see comments above for logic
dl = [str(x[0] if x[1]<9 else x[0]-1) for x in zip(dl, dl[1:] + [0])]
dl = int("".join(dl))/(10**len(dl))
return m3+dl
|
def note_to_freq(notenum):
"""Converts a MIDI note number to a frequency.
See https://en.wikipedia.org/wiki/MIDI_Tuning_Standard
"""
return (2.0 ** ((notenum - 69) / 12.0)) * 440.0
|
def round2int(value, boundary=-1):
"""Function that is used to obtain rounding value
:param value: float32, an original value
:param boundary: int, an object that represents the boundary of rounded value
:return:
"""
if value - int(value) >= 0.5:
if boundary == -1 or (int(value)+1) <= boundary:
return int(value)+1
else:
return boundary
else:
return int(value)
|
def get_most_popular_talks_by_views(videos):
"""Return the pycon video list sorted by viewCount"""
return sorted(videos, key = lambda x: int(x.metrics['viewCount']), reverse=True)
pass
|
def read_file(filename, **kwargs):
"""Read file and return contents as string"""
if not filename:
return
MODE = kwargs.get('mode', 'r')
content = ''
with open(filename, MODE) as reader:
content = reader.read()
return content
|
def get_processed_row(data, key):
"""
Utility function will pick either the custom overriden value or the default
value for the row.
"""
if data[key+'_r']:
return data[key+'_r']
else:
return data[key]
|
def false_negative(y_true, y_pred):
"""Function to false negative
Arguments:
y_true {list} -- list of true values
y_pred {list} -- list of predicted values
"""
fn = 0
for yt, yp in zip(y_true, y_pred):
if yt == 1 and yp == 0:
fn += 1
return fn
|
def remove_redundant(group):
"""
:param group:
:return:
"""
new_group = []
for j in group:
new = []
for i in j:
if i not in new:
new.append(i)
new_group.append(new)
return new_group
|
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
|
def total_amount_needed(amount, duration, frequency):
"""Calculates the total amount of the drug that is needed to send home with the client.
Params:
amount(Float) - Could be in mls or the number of tablets per dose.
duration(Integer) - The number of days to give the medication for
frequency(Integer) - The interval between each dosing in hrs.
:returns:
Total amount of the drug(float) (either in mls or number of tablets) to be sent home.
>>> total_amount_needed(0.5, 7, 12)
7.0
>>> total_amount_needed(1.5, 14, 8)
63.0
>>> total_amount_needed(2, 60, 48)
60.0
"""
# Number of times per day * days of treatment * amount per each dose
return 24 / frequency * duration * amount
|
def get_image_ranges(experiments):
"""Get image ranges for a list of experiments (including scanless exp.)"""
# Note, if set to 1,1,for scanless experiments then first batch offset in
# _calculate_batch_offsets is zero below, bad!
return [e.sequence.get_image_range() if e.sequence else (0, 0) for e in experiments]
|
def make_friends_list(json_data: dict) -> list:
"""
Returns a list of tuples: [(screen_name, location), ...]
from json data (.json object)
>>> make_friends_list({\
"users": [\
{\
"id": 22119703,\
"id_str": "22119703",\
"name": "Niki Jennings",\
"screen_name": "nufipo",\
"location": "Kyiv"\
}]})
[('nufipo', 'Kyiv')]
"""
friends = []
for friend in json_data['users']:
location = friend['location']
if location != '':
friends.append( (friend['screen_name'], location) )
return friends
|
def targets_key(measurement_uuid: str, agent_uuid: str) -> str:
"""The name of the file containing the targets to probe."""
return f"targets__{measurement_uuid}__{agent_uuid}.csv"
|
def add_integers(*args: int) -> int:
"""Sum integers.
Arguments:
*args: One or more integers.
Returns:
Sum of integers.
Raises:
TypeError: No argument was passed or a passed argument is not of type
`int`.
Example:
>>> add_integers(3, 4)
7
"""
if not len(args):
raise TypeError(
f"{add_integers.__name__}() requires at least one argument"
)
for i in args:
if not isinstance(i, int):
raise TypeError(
f"Passed argument of type '{type(i).__name__}', but only "
f"integers allowed: {i}"
)
return sum(args)
|
def valid_heart_json(input_json):
"""
Checks to make sure input JSON is valid and contains the appropriate keys.
Will also check to make sure the number of dates passed in matches the number
of readings.
Returns True if all checks passed, False otherwise.
"""
if(type(input_json) is dict):
if('hrDates' in input_json and 'hrValues' in input_json):
if(len(input_json['hrDates']) == len(input_json['hrValues'])):
return True
print("The JSON file passed in is missing data -- please check the output from Shortcuts.")
return False
|
def create_confusion_matrix(row):
"""
Reports the number of false positives, false negatives, true positives, and true negatives
to describe the performance of the model
Args:
row: A row in the input dataframe
Returns:
Accuracy variable(variable): Returns the accuracy measure that the predicted label corresponds in a row
"""
if row['actual_label'] == 1:
if row['predicted_label'] == 1:
return 'TP'
else:
return 'FN'
else:
if row['predicted_label'] == 1:
return 'FP'
else:
return 'TN'
|
def compute_auto_betweenclass_weighting(tests, betweenclass_weighting=True):
"""
Finds the automatic weighting used between classes of test, e.g., a
"top level" Bonferroni correction, or no correction, etc.
"""
if betweenclass_weighting:
betweenclass_weighting = {test: 1 / len(tests) for test in tests}
else:
betweenclass_weighting = {test: 1 for test in tests}
return betweenclass_weighting
|
def Naive_Tokenizer(sentence):
"""
Tokenizes sentence, naively splitting by space only.
This is only for cleaning, a real tokenizer is suppossed to be applied
later in the process.
"""
return sentence.split()
|
def fibonacci(n):
"""
Generates the first n Fibonacci numbers.
Adopted from: https://docs.python.org/3/tutorial/modules.html
"""
result = []
a, b = 0, 1
while len(result) < n:
result.append(b)
a, b = b, a + b
return result
|
def isqrt(n):
"""Returns the integer square root of n; i.e. r=isqrt(n) is the greatest
integer such that r**2<=n. Code taken directly from "Integer square root in
python" at http://stackoverflow.com/a/15391420."""
x = n
y = (x + 1) // 2
while y < x:
x = y
y = (x + n // x) // 2
return x
|
def print_execution_time(start, end):
"""Helper function to print execution times properly formatted."""
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
return ("{:0>2}:{:0>2}:{:0>2}".format(int(hours),int(minutes),int(seconds)))
|
def is_same_class(obj, a_class):
"""checks if an object is exactly an instance of a given class"""
return (type(obj) is a_class)
|
def dec_to_float(dec):
"""
Decimal to Float
"""
if dec:
return float(dec)
else:
return None
|
def is_new(urn):
""" returns the last segment of the given urn"""
idx = urn.find("$");
if idx >= 0 :
return True
else:
return False
|
def state2int(points_earned, distance_reward):
"""
Helper function returns total reward for snake_game
ate_food -> reward=2
no_colition -> reward=distance_reward
colision -> reward=-2
:param points_earned: Points earned reported by SnakeGame
:param distance_reward: Calculated with dp_reward
:type points_earned: float
:type distance_reward: float
:return: Total reward
:rtype: float or int
"""
if points_earned == 0:
return distance_reward
elif points_earned < 0:
return -2
elif points_earned > 0:
return 2
return 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.