content
stringlengths 42
6.51k
|
---|
def postprocess_output(model_output, narrative_token, dialog_token, eos_token):
"""
Performs the reverse of preprocess_input. Removes dialog, dialog, and eos special tokens from
model output after tokenizer decoding. Text between a narrative_token and eos_token gets
surrounded with '***'.
"""
#Replace those eos tokens which immediately follow a narrative_token with "***"
narrative_token_idx = -len(narrative_token)
while True:
narrative_token_idx = model_output.find(narrative_token,
narrative_token_idx + len(narrative_token))
if narrative_token_idx == -1:
break
eos_token_idx = model_output.find(eos_token, narrative_token_idx)
if eos_token_idx > -1:
model_output = (model_output[:eos_token_idx] + "***" +
model_output[(eos_token_idx + len(eos_token)):])
#Substitute all the remaining special tokens
model_output = (model_output.replace(narrative_token, " ***")
.replace(dialog_token, " ")
.replace(eos_token, "")
.strip())
return model_output
|
def getKeys(dct, merge=False):
"""
Get dictionary keys for Python3
Input:
> dct: A dictionary
Output:
> The first key, all other keys
"""
if not isinstance(dct, (dict,list)):
print("Argument is not a dictionary")
return None
if isinstance(dct,list):
fKey = dct[0]
lKeys = dct[1:]
else:
allKeys = list(dct.keys())
if merge is True:
return allKeys
if len(allKeys) > 1:
fKey = allKeys[0]
lKeys = allKeys[1:]
else:
fKey = allKeys[0]
lKeys = []
return fKey, lKeys
|
def load_patents(file):
""" Read and split patent file.
Parameters
----------
file : str, file-like
Path to, or file-handle to file containing patents in stupid
text format.
Returns
-------
list[str]
List of patents in stupid text-format.
"""
if isinstance(file, str):
try:
with open(file, encoding='utf-8') as f:
contents = f.read()
except OSError:
contents = file
else:
contents = file.read()
if isinstance(contents, bytes):
contents = contents.decode('utf-8')
if '\r\n' in contents:
patents = contents.split('PATENT\r\n')
else:
patents = contents.split('PATENT\n')
if not patents[0]:
patents = patents[1:]
return patents
|
def date_str(year, month, day, hour=0, minute=0, second=0., microsecond = None):
"""
Creates an ISO 8601 string.
"""
# Get microsecond if not provided
if microsecond is None:
if type(second) is float:
microsecond = int((second - int(second)) * 1000000)
else:
microsecond = 0
# Convert types
year = int(year)
month = int(month)
day = int(day)
hour = int(hour)
minute = int(minute)
second = int(second)
microsecond = int(microsecond)
# ISO 8601 template
tmp = '{year}-{month}-{day}T{hour}:{minute}:{second}.{microsecond}'
return tmp.format(year=year, month=month, day=day,
hour=hour, minute=minute, second=second, microsecond=microsecond)
|
def xor(arr1, arr2):
"""XOR two byte arrays. Shorter array should be passed second."""
if len(arr2) < len(arr1):
l1, l2 = len(arr1), len(arr2)
arr2 = arr2 * int(l1 / l2) + arr2[:l1 % l2]
return [c1 ^ c2 for c1, c2 in zip(arr1, arr2)]
|
def ngrams(seq, min_n, max_n):
"""
Return min_n to max_n n-grams of elements from a given sequence.
"""
text_len = len(seq)
res = []
for n in range(min_n, min(max_n + 1, text_len + 1)):
for i in range(text_len - n + 1):
res.append(seq[i: i + n])
return res
|
def makeFields(prefix, n):
"""Generate a list of field names with this prefix up to n"""
return [prefix+str(n) for n in range(1,n+1)]
|
def file_size_format(size, suffix="B"):
"""Given a file size, return a string with its standard compressed form."""
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if size < 1024:
break
size /= 1024
else:
unit = "Yi"
return f"{size:.1f} {unit}{suffix}"
|
def index_containing_substring(list_str, substring):
"""For a given list of strings finds the index of the element that contains the
substring.
Parameters
----------
list_str: list of strings
substring: substring
Returns
-------
index: containing the substring or -1
"""
for i, s in enumerate(list_str):
if substring in s:
return i
return -1
|
def make_album(artist, title, tracks=0):
"""Build a dictionary containing information about an album."""
album_dict = {
'artist': artist.title(),
'title': title.title(),
}
if tracks:
album_dict['tracks'] = tracks
return album_dict
|
def badness(page_width, word_lengths, i, j):
"""
Get the "badness" of a line of text
given the width of the page you are
trying to fit it on
"""
total_width = sum(word_lengths[i-1:j]) + j - i + 1
if page_width < total_width:
return float('inf')
return (page_width - total_width) ** 3
|
def get_folded_phase(t,best_period):
"""Fold the observation times with the best period of the variable signal."""
# light curve over period, take the remainder (i.e. the "phase" of one period)
phase = (t / best_period) % 1
return phase
|
def get_train_valid_test_split_(splits_string, size):
""" Get dataset splits from comma or '/' separated string list."""
splits = []
if splits_string.find(',') != -1:
splits = [float(s) for s in splits_string.split(',')]
elif splits_string.find('/') != -1:
splits = [float(s) for s in splits_string.split('/')]
else:
splits = [float(splits_string)]
while len(splits) < 3:
splits.append(0.)
splits = splits[:3]
splits_sum = sum(splits)
assert splits_sum > 0.0
splits = [split / splits_sum for split in splits]
splits_index = [0]
for index, split in enumerate(splits):
splits_index.append(splits_index[index] +
int(round(split * float(size))))
diff = splits_index[-1] - size
for index in range(1, len(splits_index)):
splits_index[index] -= diff
assert len(splits_index) == 4
assert splits_index[-1] == size
return splits_index
|
def boolean(val):
"""Convert the given value to a boolean True/False value, if it isn't already.
True values are '1', 'yes', 'true', and 'on' (case insensitive), everything
else is False."""
return str(val).lower() in ('1', 'yes', 'true', 'on')
|
def UnderscoreToCamelCase(under_score):
"""Helper function which converts under_score names to camelCase.
In proto buffers, fields have under_scores. In Javascript, fields
have camelCase.
Args:
under_score: A name, segmented by under_scores.
Returns:
A name, segmented as camelCase.
"""
segments = under_score.split('_')
return '%s%s' % (segments[0], ''.join([s.title() for s in segments[1:]]))
|
def default_context(plugin, context):
"""
Return the default context for plugins rendered with a template, which
simply is a single variable named ``plugin`` containing the plugin
instance.
"""
return {"plugin": plugin}
|
def raw_unicode(raw):
"""Make a unicode string from a raw string.
This function takes a string containing unicode escape characters,
and returns the corresponding unicode string. Useful for writing
unicode string literals in your python source while being upwards-
compatible with Python 3. For example, instead of doing this:
s = u"hello\u2149" # syntax error in Python 3
Or this:
s = "hello\u2149" # not what you want in Python 2.x
You can do this:
s = raw_unicode(r"hello\u2149") # works everywhere!
"""
return raw.encode("utf8").decode("unicode-escape")
|
def _maybe_timed(x):
"""Unpack (est, fit_time) tuples if provided"""
return x if isinstance(x, tuple) and len(x) == 2 else (x, 0.0)
|
def _fixh5path(h5path):
"""Fix an h5path for use in h5py file objects"""
if h5path in [None, '', ' ', '/']:
h5path = '/'
if h5path[0] != '/':
h5path = '/' + h5path
if h5path[-1] != '/':
h5path = h5path + '/'
return h5path
|
def singleInstanceFromTimingRef(dictOfDF):
"""
Parameters
----------
dictOfDF : TYPE
DESCRIPTION.
Returns
-------
dictOfDF_NoTiming : Dictionary of DataFrames
Removes timing reference chirp at beginning of sweep.
"""
dictOfDF_NoTiming = {}
for count, key in enumerate(dictOfDF):
# finds index where the timing signal ends/begins for the front/rear timing signal respectively
timingSig_index_front = int(dictOfDF.get(key)[dictOfDF.get(key)['V_elec-'].gt(0.3)].index[0])#+dictOfDF.get(key).attrs['fs']*.5)
timingSig_index_back = dictOfDF.get(key)['V_elec-'].shape[0] - int(dictOfDF.get(key)[dictOfDF.get(key)['V_elec-'].iloc[::-1].reset_index(drop=True)
.gt(0.3)].index[0])#+dictOfDF.get(key).attrs['fs']*.5)
#create a dict of df with timing signals removed from beginning and end of the signal.
dictOfDF_NoTiming[key] = dictOfDF.get(key)[timingSig_index_front:timingSig_index_back].reset_index(drop=True)
# #find exact location of beginning of sweep ( .gt commands are the cutoff voltages)
# SweepStart = int(dictOfDF_NoTiming.get(key)[dictOfDF_NoTiming.get(key)['V_elec-'].gt(0.5)].index[0])
# SweepEnd = dictOfDF_NoTiming.get(key)['V_elec-'].shape[0] - int(dictOfDF_NoTiming.get(key)[dictOfDF_NoTiming.get(key)['V_elec-'].iloc[::-1].reset_index(drop=True)
# .gt(0.5)].index[0])
# dictOfDF_NoTiming[key] = dictOfDF_NoTiming.get(key)[SweepStart:SweepEnd].reset_index(drop=True)
# dictOfDF_NoTiming.get(key)['Time'] = dictOfDF_NoTiming.get(key)['Time'] - dictOfDF_NoTiming.get(key)['Time'][0]
print('SingleInstanceFromRef {} of {}'.format(count+1, len(dictOfDF)))
return dictOfDF_NoTiming
|
def calc_dx(x):
"""Computes the grid spacing."""
dx = x[1] - x[0]
return dx
|
def print_grad(named_parameters):
"""
visualize grad
"""
total_norm = 0
param_to_norm = {}
param_to_shape = {}
for n, p in named_parameters:
if p.grad is not None:
param_norm = p.grad.data.norm(2)
total_norm += param_norm ** 2
param_to_norm[n] = param_norm
param_to_shape[n] = p.size()
total_norm = total_norm ** (1. / 2)
print('---Total norm {:.3f} -----------------'.format(total_norm))
for name, norm in sorted(param_to_norm.items(), key=lambda x: -x[1]):
print("{:<50s}: {:.3f}, ({})".format(name, norm, param_to_shape[name]))
print('-------------------------------', flush=True)
return total_norm
|
def determine_anisotropy_classification(branch_classification: str) -> int:
"""
Return value based on branch classification.
Only C-C branches have a value, but this can be changed here.
Classification can differ from 'C - C', 'C - I', 'I - I' (e.g. 'C - E') in
which case a value (0) is still returned.
:param branch_classification: Branch classification string.
:return: Classification encoded as integer.
E.g.
>>> determine_anisotropy_classification("C - C")
1
>>> determine_anisotropy_classification("C - E")
0
"""
if branch_classification not in ("C - C", "C - I", "I - I"):
return 0
if branch_classification == "C - C":
return 1
if branch_classification == "C - I":
return 0
if branch_classification == "I - I":
return 0
return 0
|
def humanReadable(size_in_bytes):
"""Returns sizes in human-readable units."""
try:
size_in_bytes = int(size_in_bytes)
except ValueError:
size_in_bytes = 0
units = [(" KB", 10**6), (" MB", 10**9), (" GB", 10**12), (" TB", 10**15)]
for suffix, limit in units:
if size_in_bytes > limit:
continue
else:
return str(round(size_in_bytes/float(limit/2**10), 1)) + suffix
|
def calculate_change(old_val, new_val):
"""
Return a float representing the decimal change between old_val and new_val.
"""
if old_val == 0 and new_val == 0:
return 0.0
if old_val == 0:
return float(new_val - old_val) / (float(old_val + new_val) / 2)
return float(new_val - old_val) / abs(old_val)
|
def capitalize_word(word):
"""
Capitalize the first letter of the word.
:param word: a string input.
:return: the word in title form.
"""
return word.title()
|
def handle033a(tokens):
"""
Processes the 033A (Ort und Verlag) field. Currently, only subfield p and n are supported.
For details (in German), see: https://www.gbv.de/bibliotheken/verbundbibliotheken/02Verbund/01Erschliessung/02Richtlinien/01KatRicht/4030.pdf
:param tokens: a list of tokens of the field 033A
:return: a tuple in form of (<publisher>,<place(s)>)
"""
ort=""
verlag=""
for token in tokens:
if token.startswith("p"):
ort=token[1:].replace("@","").strip()
elif token.startswith("n"):
verlag=token[1:].replace("@","").strip()
return((verlag,ort))
|
def writeBenchScript(dir, bench):
"""
This method creates a script in dir which will be eventually
passed to the simulated system (to run a specific benchmark
at bootup).
"""
file_name = '{}/run_{}'.format(dir, bench)
bench_file = open(file_name,"w+")
bench_file.write('/home/gem5/NPB3.3-OMP/bin/{} \n'.format(bench))
# sleeping for sometime (5 seconds here) makes sure
# that the benchmark's output has been
# printed to the console
bench_file.write('sleep 5 \n')
bench_file.write('m5 exit \n')
bench_file.close()
return file_name
|
def div(n,length):
"""
"""
result = []
for i in range(length+1):
if i % n == 0 or i == length:
result.append(i)
return result
|
def sizeof_fmt(num, suffix="B"):
"""
Returns the filesize as human readable string.
https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-
readable-version-of-file-size
"""
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
|
def occurrences(resp_json, return_obj, options):
"""Taxa in time and space."""
# from ..elc import ages
# Currently SEAD does not support age parameterization
# The database also does not include age in the occurrence response
# factor = ages.set_age_scaler(options=options, db='pbdb')
for rec in resp_json:
data = dict()
data.update(occ_id='sead:occ:{0:d}'.format(rec.get('occ_id')))
data.update(taxon_id='sead:txn:{0:d}'.format(rec.get('taxon_id')))
data.update(locale_id='sead:dst:{0:d}'.format(rec.get('locale_id')))
# Family, Genus or Species name
if rec.get('taxon'):
name = rec.get('taxon').split()
if len(name) == 1:
data.update(taxon=name[0].capitalize())
else:
gs_name = ' '.join(name[1:])
data.update(taxon=gs_name)
else:
data.update(taxon=None)
# Source information
source = ''
if rec.get('sample_name'):
source = 'Sample: {0:s}'.format(str(rec.get('sample_name')))
if rec.get('locale_name'):
source = '{0:s}, Site: {1:s}'.format(source,
rec.get('locale_name'))
data.update(source=source)
# Ages not yet available from SEAD
data.update(max_age=None)
data.update(min_age=None)
# Geography (modern coordinates)
data.update(lat=rec.get('lat'))
data.update(lon=rec.get('lon'))
data.update(data_type=None)
data.update(elevation=None)
return_obj.append(data)
return return_obj
|
def get_numerical_value(expression):
"""
Recursively check if the expression can be treated as a numerical value.
:param expression:
:return: boolean
"""
if expression.startswith("(") and expression.endswith(")") and (len(expression) > 2):
new_expression = expression[1:-1]
return get_numerical_value(new_expression)
else:
try:
return float(expression)
except ValueError:
return None
|
def _is_manager_secured(response_history):
""" Checks if the manager is secured (ssl enabled)
The manager is secured if the request was redirected to https
"""
if response_history:
first_response = response_history[0]
return first_response.is_redirect \
and first_response.headers['location'].startswith('https')
return False
|
def compute_artist_rating(ratings):
"""Returns an artist rating based on the ratings of its albums."""
return float(sum(ratings)) / max(len(ratings), 1)
|
def merge_reduce(source_dict, other_dict, func):
"""
merge the reduce result with the same key
Args:
source_dict:
other_dict:
func: the function to merge two dict
Returns:
dict1: result after merge
"""
dict1_keys = source_dict.keys()
for k, v in other_dict.items():
if k in dict1_keys:
source_dict[k] = func(source_dict[k], v)
else:
source_dict[k] = v
return source_dict
|
def add_phasing_err(chrA, chrB, snp_pos, errors_positions):
"""
Given two haplotypes with the snp positions (as in the msmc input format)
as well as the positions of some phasing errors, this method will output
two new chromosomes switched at positions where phasing errors happen.
"""
snp_pos = [int(p) for p in snp_pos]
if errors_positions == []:
return (chrA, chrB)
newA = []
newB = []
A = chrA
B = chrB
start_pos = 0
curr_pos = 0
err_index = 0
while curr_pos < len(snp_pos):
if (snp_pos[curr_pos] >= errors_positions[err_index]):
# if phasing error
newA.append(A[start_pos:curr_pos])
newB.append(B[start_pos:curr_pos])
start_pos = curr_pos
# Invert the chromosomes as many times as there are phasing errors
while (snp_pos[curr_pos] >= errors_positions[err_index]):
(A, B) = (B, A)
err_index += 1
if err_index == len(errors_positions):
curr_pos = len(snp_pos) # make sure that it will not enter
# the first while next time
break # quit current while
curr_pos +=1
# Copy the rest of the haplotypes to the new chromosomes
newA.append(A[start_pos:])
newB.append(B[start_pos:])
return(''.join(newA), ''.join(newB))
|
def interpolate(t: float, a: float, b: float, fa: float, fb: float) -> float:
"""interpolate between a and b"""
return (t-a) * (fa - fb)/(b-a) + fa
|
def subtract_hour(string):
"""Subtracts an hour from the string - 24 hour format."""
if string == "0":
return "23"
hour = int(string)
return str(hour - 1)
|
def Cornfeld_mean(*variables):
"""Return the mean of a sample using Cornfeld's method.
Input: *float (all variables as arguments)
Output: int
"""
return (max(variables) + min(variables)) / 2
|
def iterable(item):
"""If item is iterable, returns item. Otherwise, returns [item].
Useful for guaranteeing a result that can be iterated over.
"""
try:
iter(item)
return item
except TypeError:
return [item]
|
def sources(capacity_factor: bool = True):
"""
This function provides the links to the sources used
for obtaining certain information. The arguments
can either be set to the appropriate boolean based
on the sources required.
Parameters:
-----------
capacity_factor: bool
This argument determines whether the sources for the
capacity factor data will be returned. Default is True.
"""
if not isinstance(capacity_factor, bool):
raise TypeError(
"Argument 'capacity_factor' must be of type 'bool'."
)
if capacity_factor is True:
print(
'Capacity Factor Sources:'
)
print(
'https://www.statista.com/statistics/183680/us-aver' +
'age-capacity-factors-by-selected-energy-source-since-1998/'
)
print(
'https://www.eia.gov/electricity/monthly/epm_table_grapher.ph' +
'p?t=epmt_6_07_a'
)
print(
'https://www.hydrogen.energy.gov/pdfs/review16/tv016_saur_2016' +
'_p.pdf'
)
return None
|
def size_fmt(num: float, suffix='B') -> str:
"""Returns readable str form of given size number (e.g. bytes) in more readable higher form, e.g. 1_000_000 -> 976.6KiB"""
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
|
def is_valid_ascii(path):
"""Check if string is valid ascii"""
try:
path.decode('ascii')
valid= True
except UnicodeDecodeError:
valid= False
return valid
|
def is_sublist(l1, l2, pos=False):
"""
Check if l1 is a sublist of l2 (it contains all elements from l1 in the same order).
if pos is True, return position where sublist starts, not a boolean value
"""
if l1 == []:
return 0 if pos else True
elif l1 == l2:
return 0 if pos else True
elif len(l1) > len(l2):
return -1 if pos else False
for i in range(len(l2)):
if l2[i] == l1[0]:
n = 1
while (n < len(l1)) and (i + n < len(l2)) and (l2[i + n] == l1[n]):
n += 1
if n == len(l1):
return i if pos else True
return -1 if pos else False
|
def parse_arguments(*args):
"""
Parse arguments into a dictionary.
Returns
-------
tuple
First element with codes and second element
with names.
Examples
>>> utils.collect_args({"A": 1, "B": 2}, 100)
{"A": 1, "B": 2, 100: 100}
"""
d = {}
for arg in args:
if isinstance(arg, dict):
d.update(arg)
else:
d.update({arg: arg})
return d
|
def mil(val):
"""convert mil to mm"""
return float(val) * 0.0254
|
def _extract_value_index(line, start=0):
"""Returns position of last non-whitespace character
"""
shortline = line.rstrip()
return len(shortline)
|
def character_talents(talent_data):
"""Accepts a JSON object containing a players talents
and returns the players current active specalization."""
talents = talent_data["talents"]
# Starts empty just incase the player hasn't got a spec selected.
active_spec = ""
for talent in talents:
# The API returns the selected key only if it's selected, therefore this check
# makes sure we're not looking for something that doesn't exist.
if "selected" in talent.keys():
if talent["selected"] == True:
active_spec = talent["spec"]["name"]
talent_data = {"active_spec": active_spec}
return talent_data
|
def extract_section_by_name(path, section_name):
"""
:param path:
:type path: list
:param section_name:
:type section_name: str
:return:
:rtype: list|None
"""
section = section_name.split(" ")
predicate = lambda x, section: section == x[:len(section)]
try:
return next(x[len(section):] for x in path if predicate(x, section))
except StopIteration:
return None
|
def _string_tolist(s):
"""Convert the authorization comma separated string to list
"""
return [v.strip() for v in s.split(',') if v]
|
def returnCentre(tlbr):
"""This function returns centre of bounding box.
Args:
tlbr (list) : list of values in str [topleft_x,topleft_y,bottomright_x,bottomright_y]
Returns:
list: Centre cordinates of bounding box
"""
int_tlbr = [int(tlbr[0]),int(tlbr[1])],[int(tlbr[2]),int(tlbr[3])]
topleft,bottomright = [int(tlbr[0]),int(tlbr[1])],[int(tlbr[2]),int(tlbr[3])]
centre=[0,0]
centre[0] =int(topleft[0] + (bottomright[0]-topleft[0])/2)
centre[1] = int(topleft[1] + (bottomright[1]-topleft[1])/2)
return centre
|
def Bingham(x, ystress=1.0, eta_bg=0.1):
"""Bingham model
Note:
.. math::
\sigma=\sigma_y+\eta_{bg}\cdot\dot\gamma
Args:
ystress: yield stress [Pa]
eta_bg : Background viscosity [Pa s]
Returns:
stress : Shear Stress, [Pa]
"""
return ystress + eta_bg * x
|
def uniques(some_list):
"""
Get unique values of a list
"""
new_list = []
for item in some_list:
if item not in new_list:
new_list.append(item)
return new_list
|
def _strip_plugin_path(path):
"""remove "plugins." prefix if it exist"""
return path[8:] if path.startswith("plugins.") else path
|
def findElementFromDict(dictionary,key):
""" Function to find a element into a dictionary for the key """
element = None
try:
element = dictionary[key]
return element
except KeyError:
return None
|
def get_url(package, version=None):
"""
Return homepage, repo, bugtracker URLs for a package.
All PEARs in pear.php.net are centrally controlled and hosted.
"""
package = package.replace('PEAR2_','')
urls = {
'homepage' : 'https://github.com/pear2/%s' % package,
'repository' : 'git://github.com/pear2/%s.git' % package,
'bugtracker' : 'https://github.com/pear2/%s/issues/%%d' % package,
}
return urls
|
def compute_vat_vd(rut):
"""
:param rut(str): rut to check
Obtiene el digito de verificacion de un rut
:return result(str): vat vd
"""
result = None
factores = [3, 7, 13, 17, 19, 23, 29, 37, 41, 43, 47, 53, 59, 67, 71]
rut_ajustado=str(rut).rjust( 15, '0')
s = sum(int(rut_ajustado[14-i]) * factores[i] for i in range(14)) % 11
if s > 1:
result = 11 - s
else:
result = s
return str(result)
|
def cache_data(path, generator, regen=False):
"""
This function caches data from the passed function.
- If the specified path does not exist, then the function is called to generate the data and the data is then saved for future use.
- If the specified path does exist, the function is ignored and the cached data is immediately loaded.
"""
import os
import json
global _RETREIVED_CACHES
if '_RETREIVED_CACHES' not in globals():
_RETREIVED_CACHES = set()
assert path.endswith('.json'), 'file name must end with .json'
if regen or not os.path.exists(path):
# GENERATE DATA & CACHE
if os.path.dirname(path):
os.makedirs(os.path.dirname(path), exist_ok=True)
print(f'[\033[93mGENERATING\033[0m]: {path}')
data = generator()
with open(path, 'w') as file:
json.dump(data, file)
print(f'[\033[92mSAVED\033[0m]: {path}')
else:
# LOAD DATA IF EXISTS
with open(path, 'r') as file:
data = json.load(file)
if path not in _RETREIVED_CACHES:
print(f'[\033[92mLOADED\033[0m]: {path}')
_RETREIVED_CACHES.add(path)
return data
|
def trim_lost_U(seq_F, qual_F, LOSTUSEQS):
""" test for lost U at the 3' end of the PCR primer sequence """
keepgoing = 1
for lostuseq in LOSTUSEQS:
if keepgoing:
if len(seq_F) < len(lostuseq):
return seq_F, qual_F
if seq_F[:len(lostuseq)] == lostuseq:
seq_F = seq_F[len(lostuseq):]
qual_F = qual_F[len(lostuseq):]
#if LOSTUSEQ[0] found, also look for LOSTUSEQ[1] etc.
else:
keepgoing = 0
return seq_F, qual_F
|
def concat(s1, s2):
"""
Helper function to reduce expressions
:param s1: Sthing we can iterate over with binary sequence and '_'
:param s2: Sthing we can iterate over with binary sequence and '_'
:return: Merged version of input, when certain bits are different this place is being replaced by '_'
"""
w = ""
lz = 0
for z1, z2 in zip(s1, s2):
if z1 == z2:
w += z1
else:
lz += 1
w += "_"
if lz == 1:
return w
return False
|
def create_bullet_camera_targets(
position, should_save: bool, should_send: bool,
):
""" Creates bullet camera targets.
Args:
camera_control: The method of camera control.
bullet_odicts: The bullet object dictionaries. If the camera control
method is `stack`, we assume that the destination object dictionary
comes first.
Returns:
bullet_camera_targets: A dictionary of camera targets in the bullet
world coordinate frame, with the following format:
{
<target_id: int>: {
"position": <List[float]>,
"should_save": <bool>,
"should_send": <bool>,
}
}
"""
bullet_camera_targets = {
0: {
"position": position,
"should_save": should_save,
"should_send": should_send,
}
}
return bullet_camera_targets
|
def find_parameter_groups(columns):
"""
Find parameter groups within a list
This finds all the parameters which should be grouped together into tuples rather
than being separated. For example, if you have a list like
``["CORE_CLIMATESENSITIVITY", "RF_BBAER_DIR_WM2", "OUT_ZERO_TEMP_PERIOD_1", "OUT_ZERO_TEMP_PERIOD_2"]``,
this function will return
``{"OUT_ZERO_TEMP_PERIOD": ["OUT_ZERO_TEMP_PERIOD_1", "OUT_ZERO_TEMP_PERIOD_2"]}``
which tells you that the parameters
``["OUT_ZERO_TEMP_PERIOD_1", "OUT_ZERO_TEMP_PERIOD_2"]`` should be grouped
together into a tuple with the name ``"OUT_ZERO_TEMP_PERIOD"`` while all the other
columns don't belong to any group.
Parameters
----------
list of str
List of strings to sort
Returns
-------
dict of str: list of str
Dictionary where the keys are the 'group names' and the values are the list of
parameters which belong to that group name.
"""
cols_to_merge = {}
for c in columns:
toks = c.split("_")
start = "_".join(toks[:-1])
if start.lower() in ["file_emisscen", "out_keydata", "file_tuningmodel"]:
continue
try:
int(toks[-1]) # Check if the last token is an integer
if start not in cols_to_merge:
cols_to_merge[start] = []
cols_to_merge[start].append(c)
except (ValueError, TypeError):
continue
return {k: sorted(v) for k, v in cols_to_merge.items()}
|
def get_star_column_number(line):
""" For a line in a star file describing a column entry (e.g., '_rlnEstimatedResolution #5'), retrieve the value of that column (e.g. 5)
"""
column_num = int(line.split()[1].replace("#",""))
return column_num
|
def partition(a_list):
"""assumes a_list is a list of numbers
modifies a_list by internally moving its elements,
such that the rightmost element (the pivot)
has to its right only greater or equal elements,
and has to its left only lesser or equalelements
returns an int, the index of the pivot
"""
#partition list around pivot
pivot = a_list[-1]
list_lenght = len(a_list)
for i in range(list_lenght-1,-1, -1):
if a_list[i] > pivot:
a_list.insert(list_lenght, a_list.pop(i))
#find pivot index
return a_list.index(pivot)
|
def take(n, iter):
"""Returns first n values from the given iterator."""
result = []
try:
for i in range(n):
result.append(next(iter))
except StopIteration:
pass
return result
|
def check_for_age_range(child_age, age_range):
"""
check if input child age is in the query's age range
:param child_age: the age to check (int)
:param age_range: the age range (list)
:return: True if the age is in the range, else False
"""
return True if min(age_range) <= child_age <= max(age_range) else False
|
def _HasOption(_, option):
"""Validate the option exists in the config file.
Args:
option: string, the config option to check.
Returns:
bool, True if test is not in the option name.
"""
return 'test' not in option
|
def calc_AIC(k, maxlogL):
"""
| Calculate Akaike Information Criterion.
| k = number of parameters
| maxlogL = maximum log likelihood
"""
aic = 2 * (k - maxlogL)
return aic
|
def co_code_findloadednames(co):
"""Find in the code of a code object, all loaded names.
(by LOAD_NAME, LOAD_GLOBAL or LOAD_FAST) """
import dis
from opcode import HAVE_ARGUMENT, opmap
hasloadname = (opmap['LOAD_NAME'],
opmap['LOAD_GLOBAL'], opmap['LOAD_FAST'])
insns = dis.get_instructions(co)
len_co_names = len(co.co_names)
indexset = {}
for insn in insns:
if insn.opcode >= HAVE_ARGUMENT:
if insn.opcode in hasloadname:
indexset[insn.argval] = 1
if len(indexset) >= len_co_names:
break
for name in co.co_varnames:
try:
del indexset[name]
except KeyError:
pass
return indexset
|
def descope_message_ids_set(msgset_key):
"""Descope messages set with '.'
:returns: (queue, project)
"""
tokens = msgset_key.split('.')
return tokens[1] or None, tokens[0] or None
|
def enum_dict_aslist(d):
"""Summarize a dictionary with enums as keys as a shortened
list with only the names of each enum member.
Args:
d: Dictionary with enums as keys.
Returns:
List of tuples for all dictionary key-value pairs, with each
tuple containing the enum member name in place of the full
enum as the key.
"""
return [(key.name, val) for key, val in d.items()]
|
def do_some_work(x: int) -> int:
"""Efficiently computes a simple polynomial just for kicks
5 + 3x + 4x^2
"""
return 5 + x * (3 + x * (4))
|
def sub_string_exists_in(substring_list, string):
"""Return true if one of the substring in the list is found in |string|."""
for substring in substring_list:
if substring in string:
return True
return False
|
def dict_is_song(info_dict):
"""Determine if a dictionary returned by youtube_dl is from a song (and not an album for example)."""
if "full album" in info_dict["title"].lower():
return False
if int(info_dict["duration"]) > 7200:
return False
return True
|
def _css_escape(text: str) -> str:
""" @see https://developer.mozilla.org/en-US/docs/Web/CSS/string """
if not text:
return ''
# -- probably not needed.
# text = text.encode('unicode-escape').decode('ascii').replace('\\u', '\\')
return text.replace("'", '\\27')
|
def home(request):
"""
Home page.
"""
return {'location':'home'}
|
def alma_query(ra, dec, mirror="almascience.eso.org", radius=1, extra=''):
"""
Open browser with ALMA archive query around central position
"""
#ra, dec = self.get('pan fk5').strip().split()
url = (f"https://{mirror}/asax/?result_view=observation"
f"&raDec={ra}%20{dec},{radius}{extra}")
#os.system(f'open "{url}"')
return url
|
def build_tracker_url(portal_url=None):
"""
Creates a url that can be used to open Tracker for ArcGIS
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
portal_url Optional :class:`String` The portal that should be used when tracker
is launched via the url scheme.
================== ====================================================================
:return: :class:`String`
"""
url = "arcgis-tracker://"
if portal_url is not None:
url += "?portalURL={}".format(portal_url)
return url
|
def YCoCgtoRGB(Y, Co, Cg):
""" convert YCoCg to RGB color
The YCoCg color model was developed to increase the effectiveness of the image compression.
This color model comprises the luminance (Y) and two color difference components
(Co - offset orange, Cg - offset green).
:param Y: Y value (0;255)
:param Co: Co value (0;255)
:param Cg: Cg value (0;255)
:return: RGB tuple (0;255) """
R = Y + Co - Cg
G = Y + Cg
B = Y - Co - Cg
return R, G, B
|
def reverse(lyst):
""" Returns a reversed list. """
return [lyst[p] for p in range(len(lyst) - 1, -1, -1)]
|
def extended_gcd(n_1, n_2):
""" Returns (bezout_a, bezout_b, gcd) using the extended euclidean algorithm.
Params
n1: int
n2: int
Returns
bezout_a: int
bezout_b: int
gcd: int """
x = 0
x_old = 1
y = 1
y_old = 0
while n_2 != 0:
q = n_1 // n_2 #quotient
n_1, n_2 = n_2, n_1%n_2
x, x_old = x_old - q*x, x
y, y_old = y_old - q*y, y
bezout_a = x_old
bezout_b = y_old
gcd = n_1
return (bezout_a, bezout_b, gcd)
|
def ipstr(barray):
"""Print a string of ip digits"""
return ".".join(f'{x}' for x in barray)
|
def sdb_latitude(latitude):
"""Return an 8 character, zero padded string version of the
latitude parameter.
**Arguments:**
* *latitude* -- Latitude.
"""
adjusted = (90 + float(latitude)) * 100000
return str(int(adjusted)).zfill(8)
|
def enet_params_from_sum(pen_val_1, pen_val_2):
"""
Computes the elastic net pen_val and mix_val from the two penalty values.
pen_val_1 = pen_val * mix_val
pen_val_2 = pen_val * (1 - mix_val )
Parameters
----------
pen_val_1, pen_val_2: float
The two penalty values in the sum.
Output
------
pen_val, mix_val: float
The elastic net parameters.
"""
pen_val = pen_val_1 + pen_val_2
mix_val = pen_val_1 / (pen_val_1 + pen_val_2)
return pen_val, mix_val
|
def left_to_right_check(input_line: str, pivot: int):
"""
Check row-wise visibility from left to right.
Return True if number of building from the left-most hint is visible looking to the right,
False otherwise.
input_line - representing board row.
pivot - number on the left-most hint of the input_line.
>>> left_to_right_check("412453*", 4)
True
>>> left_to_right_check("452453*", 5)
False
"""
maximum = 0
count = 0
for char in input_line[1:-1]:
height = int(char)
if height > maximum:
maximum = height
count += 1
return count == pivot
|
def errors_sq1_mean(y_pred, y_real):
"""
like sq_mean but with one always added ot the err difference so the square of the error won't
get smaller if the difference is less than 1
:param y_pred: list of predicted
:param y_real: list of real
:return:
"""
if len(y_pred) != len(y_real):
print("Error, unmatched number of ys")
return None
tot_err = 0.0
for i in range(len(y_pred)):
tot_err += (abs(y_pred[i]-y_real[i]) + 1) ** 2
mean_tot_err = tot_err/len(y_pred)
# print("total error: "+str(tot_err))
# print("mean error: "+str(mean_tot_err))
return mean_tot_err
|
def short_uuid4(uid):
"""When seeing a glimpse of the UID is enough."""
return str(uid)[:8]
|
def append_node(node, symbol):
"""
Return the appended node. Append a symbol to the node.
:param node: The node that will be appended to
:type node: list
:param symbol: The symbol that is appended
:type symbol: str
:return: The new node
:rtype: list
"""
# Create a list with the symbol and append it to the node
new_node = [symbol]
node.append(new_node)
return new_node
|
def calc_cost(cl1_len, cl2_len, num_cl1, num_cl2):
"""
Assign a cost to a pattern based on if the majority of its occurances are observed
in regions of a time series that are annotated with the same binary label.
The cost calculation takes into account a possible difference in the total lengths of the segments.
Args:
cl1_len: Total length of the time series that belong to the class 1.
cl2_len: Total length of the time series that belong to the class 2.
num_cl1: Number of occurances of the pattern in regions that belong to cl1.
num_cl2: Number of occurances of the pattern in regions that belong to cl2.
Return: The label name of the region that the pattern is contained in, as well as the normalized number of
occurences.
"""
if (num_cl1 + num_cl2 <= 2):
return 1.0, None, None
if (cl1_len == 0 or cl2_len == 0):
return 1.0, None, None
f = cl1_len / cl2_len
norm_cl1 = num_cl1 / f
norm_cl2 = num_cl2
cost = 1 - (abs(norm_cl1 - norm_cl2) / (norm_cl1 + norm_cl2))
return cost, norm_cl1, norm_cl2
|
def kth_to_last(head, k):
""" 2.2 Return Kth to Last: Implement an algorithm to find the kth to last
element of a singly linked list.
"""
def advance(node, n):
if node == None or n == 0:
return (node, n)
else:
return advance(node.next, n - 1)
(runner, index) = advance(head, k)
if index != 0 or runner == None:
return None
node = head
runner = runner.next
while runner != None:
runner = runner.next
node = node.next
return node
|
def has_smach_interface(obj):
"""Returns True if the object has SMACH interface accessors."""
return (hasattr(obj, 'get_registered_input_keys') and
hasattr(obj, 'get_registered_output_keys') and
hasattr(obj, 'get_registered_outcomes'))
|
def fibonacci(n, m=42):
"""this is probaly the stupidiest way to calculate a
fibonacci number - but it is a nice recursion test.."""
if n < 2:
return n
return fibonacci(n-2) + fibonacci(n-1)
|
def file_get_contents(filename):
"""Fetches textfile"""
with open(filename) as file:
return file.read()
|
def binary_search0(xs, x):
"""
Perform binary search for a specific value in the given sorted list
:param xs: a sorted list
:param x: the target value
:return: an index if the value was found, or None if not
"""
lft, rgt = 0, len(xs) - 1
while lft <= rgt:
mid = (lft + rgt) // 2
if xs[mid] == x:
return mid
if xs[mid] < x:
lft = mid + 1
else:
rgt = mid - 1
return None
|
def get_bubble_mask(im_list, min_size):
"""
Computes the standard deviation of a series of images of the video. The parts of the video which are
not moving in time (no particles) could be a bubble or particles which are stuck. We are especially interested in
bubble regions or dried particle regions which should be avoided or ignored by the detection.
:param im_list: A list of frames
:return: A mask which shows the larger (larger than min_size) zero movement regions (bubbles?)
"""
mask = 0.0
return mask
|
def cipher(text, shift, encrypt=True):
"""
The function allows encipher your message.
In short, each letter is replaced by a letter some fixed number of positions down the alphabet.
Parameters
----------
text: a string of the word, sentence or paragraph to encipher
shift: an integer of fixed number of positions down the alphabet
encrypt: a boolean that identify whether to encrypt or decrypt
Returns
----------
The next text that was ciphered according to the shift of input under the Caesar cipher
Examples
----------
>>> import cipher_zh2387
>>> cipher_zh2387.cipher("All about us", 3)
'Doo derxw xv'
"""
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
new_text = ''
for c in text:
index = alphabet.find(c)
if index == -1:
new_text += c
else:
new_index = index + shift if encrypt == True else index - shift
new_index %= len(alphabet)
new_text += alphabet[new_index:new_index+1]
return new_text
|
def is_bioproject(text):
"""
Pattern for project numbers.
"""
return text.startswith("PRJ")
|
def standardize_sample_duration(sample_duration):
"""
sample_duration: a string description of duration at which devices sample and measure pollutants
returns a standardized string - '24-hour' or '8-hour' or '1-hour'
"""
durations = ['24','8','1']
for duration in durations:
if duration in sample_duration:
return duration+'-hour'
return 'unknown'
|
def classification_string(classification_idx):
""" converts classification idx to string """
classifications = ['T-Shirt/top', \
'Trouser', \
'Pullover', \
'Dress', \
'Coat', \
'Sandal', \
'Shirt', \
'Sneaker', \
'Bag', \
'Ankle boot']
return classifications[classification_idx]
|
def _parse_compute_full_uri_to_partial_uri(full_uri, version='v1'):
"""
Take a GCP Compute object's self_link of the form
`https://www.googleapis.com/compute/{version}/projects/{project}/{location specifier}/{subtype}/{resource name}`
and converts it to its partial URI `{project}/{location specifier}/{subtype}/{resource name}`.
This is designed for GCP compute_objects that have compute/{version specifier}/ in their `self_link`s.
:param network_full_uri: The full URI
:param version: The version number; default to v1 since at the time of this writing v1 is the only Compute API.
:return: Partial URI `{project}/{location specifier}/{subtype}/{resource name}`
"""
return full_uri.split(f'compute/{version}/')[1]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.