content
stringlengths 42
6.51k
|
---|
def acc(FN, TN, TP, FP):
"""
Returns:
accuracy measure
"""
acc = 1.0 * (TP + TN) / (TP + TN + FP + FN)
return acc
|
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
assert bb1['x1'] < bb1['x2']
assert bb1['y1'] < bb1['y2']
assert bb2['x1'] < bb2['x2']
assert bb2['y1'] < bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
|
def trimstring(string):
"""Performs basic string normalization"""
string = string.lower()
string = string.strip()
return string
|
def comparer(ses_hash, usr_hash):
"""This provides a function to compare initially hashed flight times with doubly hashed stored flight times."""
total = 0
kmax = 0
for k in usr_hash.keys():
if k in ses_hash:
kmax += 1
total += (abs(int(ses_hash[k]) - int(usr_hash[k])) - 24)
score = 4.8 - (total/kmax) #4 ms minus average deviation off normal
if score > 4.8:
score = 1
elif score < 0:
score = 0
else:
score = abs(score)/4.8
return(score)
|
def rev_transcribe(dna):
"""
Transcribe DNA to RNA
"""
replace_dict = {"T": "A", "G": "C", "A": "T", "C": "G"}
return "".join([replace_dict[base] for base in dna][::-1])
|
def _closest_index(dcl, absofs):
""":return: index at which the given absofs should be inserted. The index points
to the DeltaChunk with a target buffer absofs that equals or is greater than
absofs.
:note: global method for performance only, it belongs to DeltaChunkList"""
lo = 0
hi = len(dcl)
while lo < hi:
mid = (lo + hi) / 2
dc = dcl[mid]
if dc.to > absofs:
hi = mid
elif dc.rbound() > absofs or dc.to == absofs:
return mid
else:
lo = mid + 1
# END handle bound
# END for each delta absofs
return len(dcl)-1
|
def interpret(block: str):
"""
Interprets an element block, breaking it into element and number of that element.
:param block: string block describing an element
:return: composition dictionary
:rtype: dict
"""
if block[0].isdigit() is True: # if isotope number is encountered
return {block: 1}
else:
ele = block[0]
i = 0
num = ''
while i < len(block) - 1:
i += 1
if block[i].isdigit() is True: # add digits
num += block[i]
else:
ele += block[i]
if num == '':
num = 1
else:
num = int(num)
return {ele: num}
|
def filter_connection_params(queue_params):
"""
Filters the queue params to keep only the connection related params.
"""
NON_CONNECTION_PARAMS = ('DEFAULT_TIMEOUT',)
#return {p:v for p,v in queue_params.items() if p not in NON_CONNECTION_PARAMS}
# Dict comprehension compatible with python 2.6
return dict((p,v) for (p,v) in queue_params.items() if p not in NON_CONNECTION_PARAMS)
|
def area(box):
"""Calculates area of a given bounding box."""
assert box[1][0] >= box[0][0]
assert box[1][1] >= box[0][1]
return float((box[1][0] - box[0][0]) * (box[1][1] - box[0][1]))
|
def filter_devices(devices, wanted):
"""
Return a list of desired Device objects.
:param devices:
list of Device dicts
:param wanted:
list of hostnames you want
"""
return [d for d in devices if d['hostname'] in wanted]
|
def strip_parameters(target: str) -> str:
"""Remove trailing ALGOL-style parameters from a target name;
e.g. foo(bar, baz) -> foo."""
if not target.endswith(")"):
return target
starting_index = target.rfind("(")
if starting_index == -1:
return target
return target[0:starting_index]
|
def MakeTuple(object_):
"""
Returns the given object as a tuple, if it is not, creates one with it inside.
@param: Any object or tuple
The object to tupleIZE
"""
if isinstance(object_, tuple):
return object_
else:
return (object_,)
|
def dict_to_str_sorted(d):
"""Return a str containing each key and value in dict d.
Keys and values are separated by a comma. Key-value
pairs are separated by a newline character from each
other, and are sorted in ascending order by key.
For example, dict_to_str_sorted({1:2, 0:3, 10:5}) should
return "0, 3\n1, 2\n10, 5". The keys in the string must
be sorted in ascending order."""
c = ""
L = []
for key, value in d.items():
a = str(key) + ", " + str(value) + "\n"
L.append(a)
b = sorted(L)
for i in range(len(L)):
c += str(b[i])
return c
|
def quick_sort(arr):
"""
time complexity: O(n*logn)
space complexity: O(1)
:prarm arr: list
:return: list
"""
if not isinstance(arr, list):
raise TypeError
if len(arr) <= 1:
return arr
else:
left = quick_sort([x for x in arr[1:] if x < arr[0]])
mid = [arr[0]]
right = quick_sort([x for x in arr[1:] if x >= arr[0]])
return left + mid + right
|
def tokenize_message(message):
"""return a list of normalized words."""
return (message
.lower()
.replace(".", " .")
.replace(",", " ,")
.replace("?", " ?")
.replace("!", " !")
.replace(":", " :")
.replace("'s", " 's")
.split())
|
def list_replace(values,old,new):
"""Replace any occurrence of a given value, in a given list.
Limitation: Fails to replace np.nan, since == operator yields false
on nan, and computed nan fails "is" test with np.nan.
Arguments:
values (list) : list (or iterable) of input values
old (...) : old value to be replaced
new (...) : new value
Returns:
(list) : values after replacement
"""
def replacer(value,old,new):
if (value==old):
return new
else:
return value
return list(map((lambda value : replacer(value,old,new)),values))
|
def metric_or_imperial(query, lang, us_ip=False):
"""
"""
# what units should be used
# metric or imperial
# based on query and location source (imperial for US by default)
if query.get('use_metric', False) and not query.get('use_imperial', False):
query['use_imperial'] = False
query['use_metric'] = True
elif query.get('use_imperial', False) and not query.get('use_metric', False):
query['use_imperial'] = True
query['use_metric'] = False
elif lang == 'us':
# slack uses m by default, to override it speciy us.wttr.in
query['use_imperial'] = True
query['use_metric'] = False
else:
if us_ip:
query['use_imperial'] = True
query['use_metric'] = False
else:
query['use_imperial'] = False
query['use_metric'] = True
return query
|
def total_letters(transcript: str) -> int:
"""
Sums the total amount of non-space characters in the transcript.
:param transcript: A string containing the contents of the transcribed audio file.
:return: Returns the number of letters in the file.
"""
counter = 0
for i in transcript:
if i != " ":
counter += 1
return counter
|
def remove_items_from_dict(a_dict, bad_keys):
"""
Remove every item from a_dict whose key is in bad_keys.
:param a_dict: The dict to have keys removed from.
:param bad_keys: The keys to remove from a_dict.
:return: A copy of a_dict with the bad_keys items removed.
"""
new_dict = {}
for k in a_dict.keys():
if k not in bad_keys:
new_dict[k] = a_dict[k]
return new_dict
|
def is_not_a_debit_line(line):
""" sometimes there are lines that we would like to be automatically removed from the debits section. AKA for these lines, the system won't even ask if user wants to ignore them. They will be removed before getting to that point """
if (
"str1" in line and "another" in line
or
"str1" in line and 'another' in line and "another" in line
or
"str1" in line and 'another' in line and 'another' in line
):
return True
""" return False if there are no lines to remove in credits section"""
return False
|
def make_descriptor_string(attribute, value):
""" Create a key-value string to form part of a GTF entry.
Example: gene_id and ENSG00000117676.13
becomes
gene_id "ENSG00000117676.13";
"""
return str(attribute) + ' "' + str(value) + '";'
|
def to_metric(amount, unit):
"""
Used to convert common (amount, unit) pairs to metric versions, e.g.,
(5, 'GALLON') -> (18.9270589, 'LITER')
"""
if unit == 'POUND':
kgs = (amount * 0.45359237, 'KILOGRAM')
if kgs[0] < 1.0:
return (kgs[0] * 1000, 'GRAM')
return kgs
if unit == 'OUNCE':
return (amount * 28.3495231, 'GRAM')
if unit == 'GALLON':
return (amount * 3.78541178, 'LITER')
if unit == 'TEASPOON':
return (amount * 0.00492892159, 'LITER')
if unit == 'TABLESPOON':
return (amount * 0.0147867648, 'LITER')
return (amount, unit)
|
def get_color_tag(counter):
"""returns color tag based on parity of the counter"""
if counter % 2 == 0:
return 'warning'
return 'info'
|
def calc_pad(pad, in_siz, out_siz, stride, ksize):
"""Calculate padding size.
Args:
pad: padding, "SAME", "VALID" or manually specified tuple [P, Q].
ksize: kernel size, [I, J].
Returns:
pad_: Actual padding width.
"""
if pad == 'SAME':
return (out_siz - 1) * stride + ksize - in_siz
elif pad == 'VALID':
return 0
else:
return pad
|
def number_to_string(number):
"""
Converts given number to a String(32) representing
an objectID primary key. Makes too long strings if
the number is over 32 digits.
>>> number_to_string(1)
'00000000000000000000000000000001'
>>> number_to_string(0xdeadbeef)
'000000000000000000000000deadbeef'
"""
# Convert to hexadecimal and remove leading '0x'
objectID = hex(int(number))[2:]
# Insert leading zeros.
objectID = (32 - len(objectID)) * "0" + objectID
return objectID
|
def sanitize_referers(url):
"""Given a url sanitize it to a referrer
Arguments:
- `url`: An url
"""
if url is None:
return "Unknown"
elif "google." in url:
return url
elif "facebook." in url or "fb." in url:
return url
else:
return "Unknown"
|
def gen_run_entry_str(query_id, doc_id, rank, score, run_id):
"""A simple function to generate one run entry.
:param query_id: query id
:param doc_id: document id
:param rank: entry rank
:param score: entry score
:param run_id: run id
"""
return f'{query_id} Q0 {doc_id} {rank} {score} {run_id}'
|
def printSeparator(separatorSize,printToScreen = True):
"""
Description:
Prints out a horizontal seperator of varying sizes
Params:
separatorSize [INT]: size of separator
Output:
NONE
"""
seperatorSegment = "####################"
seperator = ''
for i in range(0,separatorSize):
seperator += seperatorSegment
if printToScreen:
# print separator to console
print(seperator)
else:
# return separator as string
return seperator
|
def _any_instance(var, classes):
"""Check if var is an instance of any class in classes."""
for cl in classes:
if isinstance(var, cl):
return True
return False
|
def get_colour(progress, colours):
"""
Interpolates between colours by a given percent.
:param progress: Value 0 <= x <= 1 of how far to interpolate
:param colours: List of 2 colours to interpolate betweeen
:return str: Hex code of final colour
"""
if progress >= 0 and progress <= 1:
start_colour, end_colour = colours[0], colours[1]
r = start_colour[0] + (end_colour[0] - start_colour[0]) * progress
b = start_colour[1] + (end_colour[1] - start_colour[1]) * progress
g = start_colour[2] + (end_colour[2] - start_colour[2]) * progress
return '#%02x%02x%02x' % (round(r), round(b), round(g))
else: return '#000000'
|
def search(array, key, value):
"""Generic function to find a particular dictionary in a list of dictionaries,
based on one key:value pair in each dict.
"""
for item in array:
if item[key] == value:
return item
return None
|
def get_forge_url(version: str) -> str:
"""Ensures a constant and streamlined string creation for forge urls"""
return f"https://files.minecraftforge.net/net/minecraftforge/forge/index_{version}.html"
|
def json_ld_get_activities_list_from_rawdata(data):
"""Return list of processes from raw data."""
return list(data["processes"].values())
|
def merge_dicts(*dicts, **kwargs):
"""Merges dicts and kwargs into one dict"""
result = {}
for d in dicts:
result.update(d)
result.update(kwargs)
return result
|
def rescale(OldValue,OldMin,OldMax,NewMin,NewMax):
"""
# =============================================================================
# rescale(OldValue,OldMin,OldMax,NewMin,NewMax)
# =============================================================================
this function rescale a value between two boundaries to a new value bewteen two
other boundaries
inputs:
1-OldValue:
[float] value need to transformed
2-OldMin:
[float] min old value
3-OldMax:
[float] max old value
4-NewMin:
[float] min new value
5-NewMax:
[float] max new value
output:
1-NewValue:
[float] transformed new value
"""
OldRange = (OldMax - OldMin)
NewRange = (NewMax - NewMin)
NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin
return NewValue
|
def _get_leading(o):
"""
Parameters
----------
o : any
"""
return ((len(o) - len(o.lstrip(o[0]))), o[0])
|
def non_empty_datapoint_values(data):
"""
From a graphite like response, return the values of the
non-empty datapoints
"""
if data:
return [t[0] for t in data[0]['datapoints'] if t[0]]
return []
|
def is_chinese_char(char):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
cp = ord(char)
if ((cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
|
def search_steps_with_states(steps, states):
"""Search for steps that are in the given states.
Arguments:
steps -- An iterator over Step like objects.
states -- A list of states allowed by the Step like objects e.g., Step.WAIT.
Returns:
A generator object that will yield step objects whose state is in the given list of states.
"""
if states == []:
return steps
return (step for step in steps if step.state in states)
|
def is_gzip_file(file_name):
"""test for gzip compressed file extension"""
if file_name.lower().endswith('.gz') or file_name.lower().endswith('.gzip'):
return True
else:
return False
|
def get_params(space):
"""
Get params
:param space:
:return:
"""
# Params
hidden_size = int(space['hidden_size'])
cell_size = int(space['cell_size'])
feature = space['feature'][0][0]
lang = space['lang'][0][0]
dataset_start = space['dataset_start']
window_size = int(space['window_size'])
learning_window = int(space['learning_window'])
embedding_size = int(space['embedding_size'])
rnn_type = space['rnn_type'][0][0]
num_layers = int(space['num_layers'])
dropout = float(space['dropout'])
output_dropout = float(space['output_dropout'])
return hidden_size, cell_size, feature, lang, dataset_start, window_size, learning_window, embedding_size, rnn_type, num_layers, dropout, output_dropout
|
def pcc_vector(v1, v2):
"""Pearson Correlation Coefficient for 2 vectors
"""
len1 = len(v1)
len2 = len(v2)
if len1 != len2:
return None
else:
length = len1
avg1 = 1.0 * sum(v1) / len(v1)
avg2 = 1.0 * sum(v2) / len(v2)
dxy = [(v1[i] - avg1) * (v2[i] - avg2) for i in range(length)]
dx2 = [(v1[i] - avg1) ** 2 for i in range(length)]
dy2 = [(v2[i] - avg2) ** 2 for i in range(length)]
return sum(dxy) / (sum(dx2) * sum(dy2)) ** 0.5
|
def _linkify(value):
"""create link format"""
out = []
for link in value:
out.append(','.join(list(link)))
return '^'.join(out)
|
def rindex(lst, elem):
"""Get the last position of elem in lst."""
try:
return len(lst) - lst[::-1].index(elem) - 1
except:
return 0
|
def convert_ascii_code_to_decimal(ascii_code: int) -> int:
"""
Converts ASCII code to six-bit decimal number.
"""
if ascii_code < 48 or 87 < ascii_code < 96 or ascii_code > 119:
raise ValueError(f'Invalid ASCII {ascii_code}.')
decimal = ascii_code - 48
if decimal > 40:
decimal -= 8
return decimal
|
def clean_uid(uid):
"""
Return a uid with all unacceptable characters replaced with underscores
"""
try:
return uid.decode('utf-8').replace(u"/",u"_").replace(u":",u"_")
except AttributeError:
return uid.replace("/","_").replace(":","_")
|
def rename_with_schema(file_name):
"""
This method is used to rename the file with schema postfix.
:param file_name: type str
:return:
"""
return file_name.replace('.csv', '_schema.json')
|
def safe_col_name(args_pair):
"""Ensure that the column name is safe for SQL (unique value, no spaces, no trailing punctuation).
Typically called with `df.columns = [*map(safe_col_name, enumerate(df.columns.to_list()))]`
Args:
args_pair: tuple of arguments from map function in `(idx, col)`
Returns:
string: safely formatted string for SQLite
"""
idx, col = args_pair
col = col.strip().replace(' ', '_').replace('.', '_').replace(',', '_')
return str(idx) if col == '' else col
|
def parts(doc_urn):
"""A flat list of all "parts", which are defined as top-level colon-delimited parts, further
split by dot-delimited parts, e.g.:
urn:cts:copticLit:psathanasius.matthew20.budge:1:56
->
['urn', 'cts', 'copticLit', 'psathanasius', 'matthew20', 'budge', '1', '56']
"""
return [part for chunk in doc_urn.split(":") for part in chunk.split(".")]
|
def url_to_unc(url: str):
"""Convert URL und UNC."""
url = url.strip()
url = url.replace("smb://", "\\\\")
url = url.replace("file://", "\\\\")
url = url.replace("/", "\\")
url = url.replace("%20", " ")
return url
|
def _get_regular_TYC_name(*args):
"""Convert a TYC name in *Tycho-2 Catalogue* to its regular form
`"TYC NNN-NNN-N"`.
"""
if len(args) == 1 and isinstance(args[0], str):
name = args[0].strip()
return 'TYC ' + name[3:].strip()
elif len(args) == 3:
return 'TYC '+'-'.join([str(args[0]), str(args[1]), str(args[2])])
else:
return None
|
def fixbreaks(value):
"""
fixes line breaks to make markdown happy.
Be careful. It won't play nice with para breaks.
"""
return value.replace('\n', ' \n')
|
def glyph_order(keys, draw_order=[]):
"""
Orders a set of glyph handles using regular sort and an explicit
sort order. The explicit draw order must take the form of a list
of glyph names while the keys should be glyph names with a custom
suffix. The draw order may only match subset of the keys and any
matched items will take precedence over other entries.
"""
keys = sorted(keys)
def order_fn(glyph):
matches = [item for item in draw_order if glyph.startswith(item)]
return ((draw_order.index(matches[0]), glyph) if matches else
(1e9+keys.index(glyph), glyph))
return sorted(keys, key=order_fn)
|
def _c_string_literal(s):
"""
Convert a python string into a literal suitable for inclusion into C code
"""
# only these three characters are forbidden in C strings
s = s.replace('\\', r'\\')
s = s.replace('"', r'\"')
s = s.replace('\n', r'\n')
return '"{}"'.format(s)
|
def _is_executable_product(product):
"""Returns a boolean indicating whether the specified product dictionary is an executable product.
Args:
product: A `dict` representing a product from package description
JSON.
Returns:
A `bool` indicating whether the product is an executable.
"""
return "executable" in product["type"]
|
def command_deploy_file(ifname, ofname) :
"""Returns command to deploys file with calibration constants in the calib store.
"""
return 'cat %s > %s' % (ifname, ofname)
|
def _is_number(x):
"""Checker function for numeric inputs that include float."""
try:
float(x)
return True
except ValueError:
return False
|
def _cb_dist(ctx, param, value):
"""
Click callback to ensure `--distance` can be either a float or a field name.
"""
try:
return float(value)
except ValueError:
return value
|
def bisect_right(arr, target):
"""Returns the rightmost position that `target` should
go to such that the sequence remains sorted."""
left = 0
right = len(arr)
while left < right:
mid = (left + right) // 2
if arr[mid] > target:
right = mid
else:
left = mid + 1
return left
|
def get_field_and_dtype_from_attribute_field(attribute_field) -> tuple:
"""
Returns both the field and data type parts of attribute_field (e.g. 'file_size, int').
If no dtype is given, a dtype of "str" is returned.
Args:
attribute_field: the name of the attribute field
Returns:
tuple: contains the (field, data type)
"""
if type(attribute_field) == type({}):
if "$const" in attribute_field.keys():
## NB: RECURSIVE CALL
field, dtype = get_field_and_dtype_from_attribute_field(
attribute_field["$const"]
)
elif "$field" in attribute_field.keys():
## NB: RECURSIVE CALL
field, dtype = get_field_and_dtype_from_attribute_field(
attribute_field["$field"]
)
else:
field, dtype = attribute_field, "str"
elif "," in attribute_field: # e.g., "file_size_bytes, int"
field, dtype = attribute_field.split(",")
field, dtype = field.strip(), dtype.strip()
else: # default to string datatype
field, dtype = attribute_field.strip(), "str"
return field, dtype
|
def compare_payload(json_payload, idrac_attr):
"""
:param json_payload: json payload created for update operation
:param idrac_attr: idrac user attributes
case1: always skip password for difference
case2: as idrac_attr returns privilege in the format of string so
convert payload to string only for comparision
:return: bool
"""
copy_json = json_payload.copy()
for key, val in dict(copy_json).items():
split_key = key.split("#")[1]
if split_key == "Password":
is_change_required = True
break
if split_key == "Privilege":
copy_json[key] = str(val)
else:
is_change_required = bool(list(set(copy_json.items()) - set(idrac_attr.items())))
return is_change_required
|
def _find_missing_and_invalid_fields( required_fields, json_data ):
"""
Look for missing or invalid fields, and return them.
"""
missing = []
invalid = []
for req, types in required_fields.items():
if req not in json_data.keys():
missing.append( req )
if type(json_data[req]) not in types:
invalid.append( req )
return missing, invalid
|
def get_item_from_obj(config: dict, name_path: str) -> object:
"""
Follow the name_path to get values from config
For example:
If we follow the example in in the Parameters section,
Timestamp('2008-01-02 00:00:00') will be returned
Parameters
----------
config : dict
e.g.
{'dataset': {'class': 'DatasetH',
'kwargs': {'handler': {'class': 'Alpha158',
'kwargs': {'end_time': '2020-08-01',
'fit_end_time': '<dataset.kwargs.segments.train.1>',
'fit_start_time': '<dataset.kwargs.segments.train.0>',
'instruments': 'csi100',
'start_time': '2008-01-01'},
'module_path': 'qlib.contrib.data.handler'},
'segments': {'test': (Timestamp('2017-01-03 00:00:00'),
Timestamp('2019-04-08 00:00:00')),
'train': (Timestamp('2008-01-02 00:00:00'),
Timestamp('2014-12-31 00:00:00')),
'valid': (Timestamp('2015-01-05 00:00:00'),
Timestamp('2016-12-30 00:00:00'))}}
}}
name_path : str
e.g.
"dataset.kwargs.segments.train.1"
Returns
-------
object
the retrieved object
"""
cur_cfg = config
for k in name_path.split("."):
if isinstance(cur_cfg, dict):
cur_cfg = cur_cfg[k]
elif k.isdigit():
cur_cfg = cur_cfg[int(k)]
else:
raise ValueError(f"Error when getting {k} from cur_cfg")
return cur_cfg
|
def straight_line(x, m=1, c=0):
"""
straight line equation
"""
return x*m + c
|
def build_insert(table: str, to_insert: list):
"""
Build an insert request.
Parameters
----------
table : str
Table where query will be directed.
to_insert: iterable
The list of columns where the values will be inserted.
Returns
-------
str
Built query string.
"""
return (
f"INSERT INTO \"{table}\" (" + ", ".join(f"{w}" for w in to_insert) + ") VALUES (" +
", ".join(f":{w}" for w in to_insert) + ")"
)
|
def convert_bool_value_to_yes_no_string(value):
"""Convert a boolean value into a 'Yes' or 'No' string representation.
Parameters
----------
value: Boolean value.
Returns
-------
String "YES", "NO" (default).
"""
if value:
return "Yes"
return "No"
|
def getmtime(pathname):
"""Modification timestamp of file or directory or 0 if it does nor exists"""
##debug("exists: checking timestamp of %r" % pathname)
from os.path import getmtime
try: return getmtime(pathname)
except: return 0
|
def num_in_numbits(num, numbits):
"""Does the integer `num` appear in `numbits`?
Returns:
A bool, True if `num` is a member of `numbits`.
"""
nbyte, nbit = divmod(num, 8)
if nbyte >= len(numbits):
return False
return bool(numbits[nbyte] & (1 << nbit))
|
def _create_postgres_url(db_user, db_password, db_name, db_host,
db_port=5432, db_ssl_mode=None,
db_root_cert=None):
"""Helper function to construct the URL connection string
Args:
db_user: (string): the username to connect to the Postgres
DB as
db_password: (string): the password associated with the
username being used to connect to the Postgres DB
db_name: (string): the name of the Postgres DB to connect
to
db_host: (string): the host where the Postgres DB is
running
db_host: (number, optional): the port to connect to the
Postgres DB at
db_ssl_mode: (string, optional): the SSL mode to use when
connecting to the Postgres DB
db_root_cert: (string, optional): the root cert to use when
connecting to the Postgres DB
Returns:
[string]: Postgres connection string
"""
ssl_mode = ''
if db_ssl_mode:
# see
# https://www.postgresql.org/docs/11/libpq-connect.html#
# LIBPQ-CONNECT-SSLMODE
ssl_mode = '?sslmode=%s' % (db_ssl_mode)
if db_root_cert:
ssl_mode += '&sslrootcert=%s' % (db_root_cert)
return ('postgresql://%(user)s:%(password)s@%(host)s:%(port)s/'
'%(db)s%(ssl)s' % {
'user': db_user,
'password': db_password,
'db': db_name,
'host': db_host,
'port': db_port,
'ssl': ssl_mode})
|
def parse_path(s3_datapath):
"""
Return bucket and prefix from full s3 path.
Parameters
----------
s3_datapath : str
path to a bucket.
Should be of the form s3://bucket/prefix/.
Returns
-------
tuple
bucket and prefix.
"""
bucket_path = str(s3_datapath).split("//")[1]
parts = bucket_path.split("/")
bucket = parts[0].strip("/")
prefix = "/".join(parts[1:])
return bucket, prefix
|
def circumference_area(radius):
"""Returns the area of a circumference"""
return 3.14159*radius*radius
|
def pto(pct):
"""Percentage to odds converter. Take a number like 35, return what
odds it represents (x-to-one odds).
"""
return (1 - pct / 100.0) / (pct / 100.0)
|
def common_sub(data):
"""(internal) Finds longest common substring for a list of strings.
"""
substr = ''
if len(data) > 1 and len(data[0]) > 0:
for i in range(len(data[0])):
for j in range(len(data[0])-i+1):
if j > len(substr) and all(data[0][i:i+j] in x for x in data):
substr = data[0][i:i+j]
return substr
|
def is_retweet(tweet: dict):
"""Return True if the passed tweet is a retweet.
:param tweet: JSON dict for tweet
"""
try:
text = tweet["tweet"]["full_text"]
except KeyError:
text = tweet["tweet"]["text"]
if text.startswith("RT @"):
return True
return False
|
def py3round(number):
"""Unified rounding in all python versions."""
if abs(round(number) - number) == 0.5:
return int(2.0 * round(number / 2.0))
return int(round(number))
|
def extract_gff3_record_id_from_info_field(info):
"""Helper function to extract GFF3 record ID from info string"""
for tag in info.split(';'):
if tag.startswith('ID='):
return tag[3:]
|
def _print_field(field_name, field_value, capitals=False):
"""
Print a field in bib format if value is not none.
:param field_name: name of the field
:param field_value: value of the field
:param capitals: whether to add
:return: field in bib format or blank if field is None
"""
if field_value is not None:
# field_value = str(u' '.join((field_value, '')).encode('utf-8').strip())
field_value = str(field_value).replace("_", "\_")
field_value = str(field_value).replace("\\\\_", "\_")
field_value = str(field_value).replace("#", "\#")
field_value = str(field_value).replace("\\\\#", "\#")
field_value = str(field_value).replace("$", "")
if capitals:
return "\t%s = {{%s}},\n" % (field_name, field_value)
else:
return "\t%s = {%s},\n" % (field_name, field_value)
return ""
|
def linspace(start,stop,np):
"""
Emulate Matlab linspace
"""
return [start+(stop-start)*i/(np-1) for i in range(np)]
|
def sanitize_branch_name(branch_name):
"""Replace punctuation that cannot be in semantic version from a branch name with dashes."""
return branch_name.replace('/', '-').replace('_', '-')
|
def varify_label(temp_symbols):
"""
Parameters
----------
temp_symbols : list
temp_symbols contains the label of the segment that is for every heart beats of the segment.
Returns
-------
varified_label : str
"""
label = []
N_class = ['N', 'L', 'R', 'e', 'j']
for i in temp_symbols:
if i not in N_class:
label.append(i)
if len(temp_symbols) == 0:
varified_label = 'Q'
elif len(label) == 0:
varified_label = 'N'
else:
varified_label = max(label, key=label.count)
if varified_label == '~' or varified_label == 's' or varified_label == 'T':
varified_label = 'N'
if (
varified_label == 'A'
or varified_label == 'a'
or varified_label == 'J'
or varified_label == 'S'
):
varified_label = 'S'
if varified_label == 'V' or varified_label == 'E':
varified_label = 'V'
if varified_label == 'F':
varified_label = 'F'
if (
varified_label == '/'
or varified_label == 'f'
or varified_label == 'Q'
or varified_label == 'P'
):
varified_label = 'Q'
return varified_label
|
def _look_ahead (index_sentence,context, maximum) :
"""Generate the look ahead context starting with the sentence index and looking no more
than max number of sentences"""
context_pairs = []
for i in range(1, context+1):
s_index = index_sentence+i
if s_index<=maximum:
context_pairs.append((index_sentence, s_index))
return context_pairs
|
def ele_Q(w, q, n):
"""
:param
w: Angular frequency [1/s], (s:second)
q: CPE coefficient, Constant phase element [s^n/ohm]
or named as CPE_T
n: Constant phase elelment exponent [-]
or name as CPE_P
:return:
Zcpe: Impedance of a Constant phase element
"""
z = 1 / (q * ((1j * w) ** n))
return z
|
def check_int(school, international):
"""
Checks if a school is an international school based on a school list and other criteria
Parameters
----------
school: current school being checked
international: AP/IB school list
Returns
-------
Y or N
"""
# list of criteria that could qualify a school as an international school even if it is not on the school lists
int_criteria = ["International Baccalaureate", "IBDP", "IB", "IB Score", "A Level",
"A-Level", "French Baccalaureate", "A Levels", "A*"]
school_name = school['name']
degree_name = school['degree']
if degree_name is None: degree_name = ''
field_of_study = school['field_of_study']
if field_of_study is None: field_of_study = ''
grades = school['grades']
if grades is None: grades = ''
if any(element.lower() == school_name.lower() for element in international["AP"]):
int_hs = 'Y (AP)'
elif any(element.lower() == school_name.lower() for element in international["IB"]):
int_hs = 'Y (IB)'
elif any(element.lower() in school_name.lower() for element in int_criteria) or \
any(element.lower() in degree_name.lower() for element in int_criteria) or \
any(element.lower() in field_of_study.lower() for element in int_criteria) or \
any(element.lower() in grades.lower() for element in int_criteria):
int_hs = 'Y'
else:
int_hs = 'N'
return int_hs
|
def not_none(itemlist):
"""Return true only if no values of None are present"""
if itemlist is None:
return False
if not isinstance(itemlist,(tuple,list)):
return True
for x in itemlist:
if not not_none(x): return False
return True
|
def is_right_bracket(token):
""" returns true if token is right bracket """
return token == ")"
|
def _text_has_spaces(text):
"""
:param text:
:type text: str
:return: True if text has any space or false otherwise.
:rtype: bool
"""
words = text.split(" ")
if len(words) > 1:
return True
else:
return False
|
def get_interval(value, num_list):
"""
Helper to find the interval within which the value lies
"""
if value < num_list[0]:
return (num_list[0], num_list[0])
if value > num_list[-1]:
return (num_list[-1], num_list[-1])
if value == num_list[0]:
return (num_list[0], num_list[1])
if value == num_list[-1]:
return (num_list[-2], num_list[-1])
for index, num in enumerate(num_list):
if value <= num:
return (num_list[index - 1], num_list[index])
|
def convert_string_to_int_keys(input_dict: dict) -> dict:
"""
Returns the dict with integer keys instead of string keys
Parameters
----------
input_dict: dict
Returns
-------
dict
"""
return {int(k): v for k,v in input_dict.items()}
|
def all_equal(t):
"""
True if all elements in a table are equal.
"""
return not t[0] == ' ' and t[1:] == t[:-1]
|
def isfloat(s):
"""Test if an object is a float"""
try:
float(s)
return True
except (ValueError, TypeError):
return False
|
def float_padding(length, val, decimals=2):
"""Pads zeros to left and right to assure proper length and precision"""
return '{0:0>{fill}.{precision}f}'.format(float(val), fill=length, precision=decimals)
|
def dss_is_geo(dss_schema):
"""
Check if the input dataset contains a geopoint (DSS storage type) column
If so, specific processing will be applied later on
:param dss_schema: schema of a dss dataset
>>> dss_schema = {"columns": [{"name": "customer_id", "type": "bigint"}]}
:return Boolean{input dataset contains at least one geopoint column}
"""
for column in dss_schema['columns']:
if column['type'] == 'geopoint':
return True
return False
|
def ExpandPattern(pattern, it):
"""Return list of expanded pattern strings.
Each string is created by replacing all '%' in |pattern| with element of |it|.
"""
return [pattern.replace("%", x) for x in it]
|
def get_counts_and_averages(ID_and_ratings_tuple):
"""Given a tuple (bookID, ratings_iterable)
returns (bookID, (ratings_count, ratings_avg))
"""
nratings = len(ID_and_ratings_tuple[1])
return ID_and_ratings_tuple[0], (nratings, float(sum(x for x in ID_and_ratings_tuple[1]))/nratings)
|
def format_dict(adict, prefix='', indent=' ', bullets=('* ', '* '),
excludes=('_', ), linelen=79):
"""Return pretty-print of nested dictionary."""
result = []
for k, v in sorted(adict.items(), key=lambda x: x[0].lower()):
if any(k.startswith(e) for e in excludes):
continue
if isinstance(v, dict):
v = '\n' + format_dict(v, prefix=prefix+indent, excludes=excludes)
result.append(prefix + bullets[1] + '%s: %s' % (k, v))
else:
result.append(
(prefix + bullets[0] + '%s: %s' % (k, v))[:linelen].rstrip())
return '\n'.join(result)
|
def is_similar_mag(a, b, small=1E-4):
"""
Evaluates similar magnitudes to within small.
"""
return abs(a-b) <= small
|
def gcd(x, y):
"""Calculate the greatest common divisor of two numbers."""
while y != 0:
x, y = y, x % y
return x
|
def createYZlabels(y, z, ylabel, yunits, zlabel, zunits):
"""
Checks that y and z labels are appropriate and tries to make some if they are not.
"""
if y is None:
if ylabel is None: ylabel='j'
if yunits is None: yunits=''
else:
if ylabel is None: ylabel='Latitude'
#if yunits is None: yunits=u'\u00B0N'
if yunits is None: yunits=r'$\degree$N'
if z is None:
if zlabel is None: zlabel='k'
if zunits is None: zunits=''
else:
if zlabel is None: zlabel='Elevation'
if zunits is None: zunits='m'
return ylabel, yunits, zlabel, zunits
|
def check_cross_turn_repetition(context, pred, is_cn=False):
"""Check the cross-turn repetition.
Calcuate tri-gram repetition.
Args:
context: Words or tokens or token_ids.
pred: Words or tokens or token_ids.
is_cn: Chinese version repetition detection. If true, calcuate repetition on characters.
Returns:
Whether the cross-turn repetition is detected.
"""
if isinstance(pred[0], str):
context = [[tok.lower() for tok in utt] for utt in context]
pred = [tok.lower() for tok in pred]
if is_cn:
context = ["".join(utt) for utt in context]
pred = "".join(pred)
pred_tri_grams = set()
for i in range(len(pred) - 2):
tri_gram = tuple(pred[i:i + 3])
pred_tri_grams.add(tri_gram)
for utt in context:
for i in range(len(utt) - 2):
tri_gram = tuple(utt[i:i + 3])
if tri_gram in pred_tri_grams:
return True
return False
|
def _replacer(x, pad_size):
"""Replace a number with its padded hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin).replace('0x', '').rjust(pad_size, '0')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.