content
stringlengths 42
6.51k
|
---|
def queryMaker(readList, outLoc="queryTemp.txt"):
"""
Takes a counts object (nested list in the form [[read, total, unique],...]) and creates a file formatted for input
into BLAST.
:param readList: nested list in the form [[read, total, unique],...]
:param outLoc: file location. temporary place to store the query file
:return: None
"""
"""Takes a list of reads in the form of [[read, total, unique],...] and creates a file formatted for input into
BLAST. """
f= open(outLoc, 'w')
for i in range(len(readList)):
f.write(">"+str(i)+"\n")
f.write(readList[i][0]+"\n")
f.close()
return True
|
def traverse_fields(entry, ignore_fields=None):
"""
Returns a list of sets of nested fields (one set per level of nesting)
of a JSON data entry produced by a benchmark analysis script.
Ignores the dashboard-appended 'timestamp' and 'tvm_hash' fields at the top level by default.
Also ignores the 'detailed' field by default (as old data files will not have detailed summaries).
Set ignore_fields to a non-None value to avoid the defaults.
"""
ignore_set = {'timestamp', 'tvm_hash', 'detailed',
'start_time', 'end_time', 'time_delta', 'success',
'run_cpu_telemetry', 'run_gpu_telemetry'}
if ignore_fields is not None:
ignore_set = set(ignore_fields)
level_fields = {field for field in entry.keys()
if field not in ignore_set}
values_to_check = [entry[field] for field in level_fields
if isinstance(entry[field], dict)]
tail = []
max_len = 0
for value in values_to_check:
next_fields = traverse_fields(value)
tail.append(next_fields)
if len(next_fields) > max_len:
max_len = len(next_fields)
# combine all the field lists (union of each level's sets)
final_tail = []
for i in range(max_len):
u = set({})
final_tail.append(u.union(*[fields_list[i]
for fields_list in tail
if len(fields_list) > i]))
return [level_fields] + final_tail
|
def get_memory_in_GB(memory_str):
"""Returns the memory value in GB from a given string in kB"""
try:
return '{0} GB'.format(int(memory_str[:-2]) / 1000000)
except (ValueError, TypeError):
return ''
|
def getWordsUnder(wordList, targetLength):
""" return list of words of a specified length in the list """
return [word for word in wordList if len(word) < targetLength]
|
def deposit_money(amount, card_balance):
"""Deposit given amount of money to the account."""
card_balance += amount
# save new balance to the database
return card_balance
|
def _filter_subs_packages(initial, subs):
"""Rename and filter package list with subsitutions; for similar systems.
"""
final = []
for p in initial:
try:
new_p = subs[p]
except KeyError:
new_p = p
if new_p:
final.append(new_p)
return sorted(final)
|
def linear(iteration: int, frequency: int = 10) -> bool:
"""Optimize hyperparameters at equally spaced intervals
Args:
iteration (int): current iteration
frequency (int, optional): Spacing between the True outputs. Defaults to 10.
Returns:
bool: True if iteration can be divided by frequency without remainder
"""
if iteration == 1:
return True
if iteration % frequency == 0:
return True
return False
|
def bai_path(bamfilepath):
"""Return the path to BAI file in the same directory as BAM file."""
return f"{bamfilepath}.bai"
|
def scale(num, base):
"""
If num is a float, multiplies it by base and returns that. Otherwise,
returns num unchanged.
"""
if isinstance(num, float):
return num * base
else:
return num
|
def RPL_WHOISOPERATOR(sender, receipient, message):
""" Reply Code 313 """
return "<" + sender + ">: " + message
|
def model(fr, p):
"""
Using this model because it satisfies some things that should be true in asymptotic limits:
As the fineness ratio goes to infinity, the drag-divergent Mach should go to 1.
As the fineness ratio goes to 0, the drag-divergent Mach should go to some reasonable value in the range of 0 to
1, probably around 0.5? Certainly no more than 0.6, I imagine. (intuition)
"""
return 1 - (p["a"] / (fr + p["b"])) ** p["c"]
|
def golden_section_search(function, a, b, tol=1e-3):
"""Golden section search algorithm for finding the minimum or maximum of a strictly unimodal function
by successively narrowing the range of values inside which the extremum is known to exist.
Parameters
----------
function : Function
Evaluation function.
a : float
Lower bound of the initial search interval.
b : float
Upper bound of the initial search interval.
tol : float, optional
A tolerance for convergence.
Default is ``1e-6``.
Returns
-------
float
The minimum or maximum value.
Notes
-----
Adapted after [1]_.
References
----------
.. [1] Wikipedia *Golden section search*.
Available at: https://en.wikipedia.org/wiki/Golden-section_search
.. [2] Kiefer, J. (1953). Sequential minimax search for a maximum. In *Proceedings of the American Mathematical Society* 4(3).
"""
gr = (5 ** 0.5 + 1) / 2
c = b - (b - a) / gr
d = a + (b - a) / gr
while abs(c - d) > tol:
if function(c) < function(d):
b = d
else:
a = c
c = b - (b - a) / gr
d = a + (b - a) / gr
return (b + a) / 2
|
def get_tag_text(tag):
"""Gets the text from a valid tag (just the text without the leading #)"""
return tag[1:]
|
def get_latest_sudo_policies(sudo_policies_list):
"""
Example of sudo_policies_list:
[
['qpm-rhel6-64a', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37a', '1634124307'],
['qpm-rhel6-64b', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37b', '1634124308'],
['qpm-rhel6-64c', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37c', '1634124309'],
['qpm-rhel6-64d', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37d', '1634124310'],
['qpm-rhel6-64c', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37c', '1634124369'],
['qpm-rhel6-64d', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37d', '1634124370'],
['qpm-rhel6-64e', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37e', '1634124371'],
['qpm-rhel6-64f', '/etc/opt/quest/qpm4u/policy/sudoers', '7.1.99.7-55-g787b0a37f', '1634124372']
]
"""
latest_sudo_policies = {}
for sudo_policy in sudo_policies_list:
hostname = sudo_policy[0]
if hostname not in latest_sudo_policies:
latest_sudo_policies[hostname] = sudo_policy
else:
if int(latest_sudo_policies[hostname][3]) < int(sudo_policy[3]):
latest_sudo_policies[hostname] = sudo_policy
latest_sudo_policies = list(latest_sudo_policies.values())
return latest_sudo_policies
|
def validate_nb_custom_field(custom_field):
"""
Validate NetBox custom field environment variable
"""
# Ensure NetBox custom_field is present
if custom_field == "" or custom_field is None:
print("Missing custom_field to update")
return False
return True
|
def get_data_list_name(name):
""" get plural form
city -> cities
unit -> units
:param str name: name of unit
:return:
"""
last = name[-1]
if last in 'y':
if last in 'a,e,i,o,u,y':
name = name[0:-1] + 'ies'
else:
name += 's'
elif last in 'ou':
name += 'es'
elif last == 'f':
name = name[0:-1] + 'ves'
elif name[-2:-1] == 'fe':
name = name[0:-2] + 'ves'
elif last in ['s', 'ss', 'x', 'sh', 'ch']:
name += 'es'
else:
name += 's'
return name
|
def _le_from_ge(self, other):
"""Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b)."""
op_result = self.__ge__(other)
if op_result is NotImplemented:
return NotImplemented
return not op_result or self == other
|
def parse_int(text: str) -> bool:
"""
Normally we would use str.isnumeric and int. The problem is, that isnumeric doesn't account for negative values.
That is the job of this helper function.
In earlier versions this function returned the integer value, but now it returns a boolean. This is because we made two calls.
In Python 3.8+ we can use the Walrus Operator to make this cleaner but that would break backward compatibility with Python 3.7.
:param text: the text to convert to an integer
:return: the resulting integer or None if it isn't one
"""
if text.strip("-").isdecimal():
return True
return False
|
def _get_yield_and_servings(value):
"""Returns a tuple consisting of the yield value and servings value.
Only one of these has a meaningful value while the other is set to a
default value. If there is an integer followed optionally by the word
'serving' or 'servings' then return this integer value as the number
of servings, otherwise return the input value as the yield.
"""
if value.lower().endswith('servings'):
servings = value[:-8].strip()
elif value.lower().endswith('serving'):
servings = value[:-7].strip()
else:
servings = value
try:
return '', int(servings)
except ValueError:
return value, 0
|
def unsnake(field):
"""
Return a string that converts underscore to spaces and capitalizes first letter.
"""
return str(field).replace('_', ' ').capitalize()
|
def get_group_gn(dim, dim_per_gp, num_groups):
"""get number of groups used by GroupNorm, based on number of channels."""
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0, \
"dim: {}, dim_per_gp: {}".format(dim, dim_per_gp)
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0, \
"dim: {}, num_groups: {}".format(dim, num_groups)
group_gn = num_groups
return group_gn
|
def get_size_string(size):
"""Return string representation for size."""
if size < 1 << 10:
return '%d B' % size
elif size < 1 << 20:
return '%d KB' % (size >> 10)
elif size < 1 << 30:
return '%d MB' % (size >> 20)
return '%d GB' % (size >> 30)
|
def _GetChannelIndicesInChannelDataIdList(id_list):
"""Returns a list of all the unique channel indices requested."""
channel_indices = []
for ch in id_list:
if ch.HasField('bipolar_channel'):
request_indices = [
ch.bipolar_channel.index, ch.bipolar_channel.referential_index
]
else:
request_indices = [ch.single_channel.index]
channel_indices = channel_indices + request_indices
return list(set(channel_indices))
|
def success_responses_filter(responses):
"""
:param responses:
:return:
"""
return [each for each in filter(lambda r: r.status_code == 200, responses)]
|
def join_url(url, *paths):
"""
Joins individual URL strings together, and returns a single string.
Usage::
>>> util.join_url("example.com", "index.html")
'example.com/index.html'
"""
to_rstrip = [url] + list(paths[:-1])
parts = [path.rstrip('/') for path in to_rstrip] + [paths[-1]]
return "/".join(path.lstrip('/') for path in parts)
|
def merge_two_dicts(x, y):
"""
Helper function to merge dictionaries into one
"""
z = x.copy()
z.update(y)
return z
|
def recursive_dict_update(d, u):
"""Update d with values from u, recursing into sub-dictionaries."""
for k, v in u.items():
if isinstance(v, dict):
try:
# copy because we don't want to modify the sub-dictionary
# just use its values to create an updated sub-dictionary
subdict = d[k].copy()
except KeyError:
subdict = {}
d[k] = recursive_dict_update(subdict, v)
else:
d[k] = v
return d
|
def dfttk_config_to_espei(config):
"""Convert a DFTTK configuration, e.g. [['Fe', 'Ni'], ['Fe']] to ESPEI's configuration [['FE', 'NI'], 'FE']"""
espei_config = []
for subl in config:
if len(subl) == 1:
# this sublattice is an endmember
espei_config.append(subl[0].upper())
else:
# this sublattice has an interaction
espei_config.append([comp.upper() for comp in subl])
return espei_config
|
def error_413(error):
"""Payload Too Large"""
return "Uploaded file is too large", 413
|
def format_date(match_date):
"""
Formats a data string for printing and display purposes.
Param: my_price (str) like "2019-08-11T13:00:00Z"
Example: format_date(2019-08-11T13:00:00Z)
Returns: 2019-08-11
"""
return match_date[0:10]
|
def is_substring(subseq, seq):
"""Check if `subseq` is a substring of `seq`."""
n = len(seq)
m = len(subseq)
if m > n:
return False
j = 0
i = 0
while i < n:
if seq[i] == subseq[j]:
j += 1
if j == m:
return True
elif j > 0:
j = 0
continue
else:
j = 0
if n - i <= m:
return False
i += 1
return False
|
def generate_section(inputfiles, align_sizes):
""" generates the all section """
cmd = ["tmp.all"]
for index, file in enumerate(inputfiles):
cmd += [file]
if align_sizes != [None]:
# the first input is None
cmd += ["--sectionalign", align_sizes[index + 1]]
return cmd
|
def traverse(dictionary, keys, default=None):
"""Find a deep value in a dictionary
Args:
dictionary (dict): The event port.
keys (tuple|str): The hiearchical list of keys.
default: The default value if not found.
Returns:
The value, or `default` if not found.
"""
if keys is None:
return default
if type(keys) == str:
keys = (keys,)
for key in keys:
try:
dictionary = dictionary[key]
except KeyError:
return default
return dictionary
|
def check_mandatory_keys(config, section, keys):
"""Check that mandatory keys are present in config
:param dict config: the dictionary containing the configuration
:param string section: the section in which to check for the keys
:param list keys: the keys to check for existence
:return: True
:rtype: bool
:raises KeyError: when there's some key missing
"""
if not section in config:
raise KeyError("No '%s' section provided in config file" % section)
for name in keys:
if name not in config[section]:
raise KeyError("No '%s' found in section '%s'" % (name, section))
return True
|
def CRRAutilityPP(c, gam):
"""
Evaluates constant relative risk aversion (CRRA) marginal marginal utility of
consumption c given risk aversion parameter gam.
Parameters
----------
c : float
Consumption value
gam : float
Risk aversion
Returns
-------
(unnamed) : float
Marginal marginal utility
"""
return -gam * c ** (-gam - 1.0)
|
def zfp_expert_opts(minbits, maxbits, maxprec, minexp):
"""Create compression options for ZFP in "expert" mode
See the ZFP docs for the meaning of the parameters.
"""
zfp_mode_expert = 4
return zfp_mode_expert, 0, minbits, maxbits, maxprec, minexp
|
def extract_type_for_type_or_null_property(property_):
"""
Serpyco use "anyOf" (null, or defined type) key to define optional properties.
Example:
``` json
[...]
"properties":{
"id":{
"type":"integer"
},
"name":{
"anyOf":[
{
"type":"string"
},
{
"type":"null"
}
]
[...]
```
This function return real property definition.
:param property_:property where extract
:return: real property definition of given property
"""
if "anyOf" in property_ and 2 == len(property_["anyOf"]):
for optional_property in property_["anyOf"]:
if optional_property.get("type") != "null":
return optional_property
raise TypeError("Can't extract type because this is not a type_or_null_property")
|
def generate_file_name(file_name="playlist", resolution="SD", extention="m3u8"):
""" return file name with resulotion
ex: generate_file_name('output', 'HD')
>> 'output_HD.m3u8
"""
if file_name is None: file_name="playlist"
if extention is None: extention="m3u8"
return f"{file_name}_{resolution}.m3u8"
|
def pickfirst(files):
"""Return first file from a list of files
Parameters
----------
files : list of filenames
Returns
-------
file : returns the filename corresponding to the middle run
"""
if isinstance(files, list):
return files[0]
else:
return files
|
def safe_cast(value, __class, default=None):
"""Try, cast, handle"""
try:
return __class(value)
except (ValueError, TypeError) as err:
print(f'Error casting {value} as {__class}: {err}')
return default
|
def _format_csv(record: dict) -> str:
"""formats the values of the record as comma separated values """
return ','.join([str(val) for val in record.values()])
|
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
|
def remove_block_head(block, head):
"""."""
new_block = []
for line in block:
if line.startswith(head):
index = len(head)
line = line[index:]
if line.startswith('.'):
line = line[1:]
new_block.append(line)
return new_block
|
def profbfiles(date):
"""Observation file locator stub
.. note:: User-specified stub
:param date: The verification date in string format ``'%Y%m%d'``
:returns: List of files
"""
files = ['profb.nc']
return files
|
def next_page(page_list, page):
"""Returns next page of a list.
Its needed for pagination.
:param page_list: A list of available page numbers.
:param page: Page to get next page from.
:type page_list: list
:type page: int
:return: Page number of next page.
:rtype: int
"""
if page not in page_list:
return None
if page_list.index(page) == len(page_list) - 1:
return None
return page_list[page_list.index(page) + 1]
|
def timeBlock(block, dur):
"""Adjusts the block for the proper timing interval"""
nBlock = []
for i, b in enumerate(block): # i is the index, b is the isi element within block
if i == 0:
nBlock.append(b)
else:
j = i - 1
prev = block[j] # prev is the previous item in the block list.
ev = b + dur + prev # isi + duration + previous isi
nBlock.append(ev)
return nBlock
|
def map_ids(elems):
""" Turn a list of objects into a list of spotify ids. """
return [elem.spotify_id() for elem in elems]
|
def full_kwargs(state, kwargs):
"""
Merge command state with keyword arguments.
Command state takes precedence over any keyword argument.
:param state: The current command state. Can be ``None``.
:param kwargs: The base keyword arguments.
:returns: A dictionary containing all of ``kwargs``, with any values
provided in ``state`` overriding the base ``kwargs`` values.
"""
if state is not None:
full = kwargs.copy()
full.update(state)
else:
full = kwargs
return full
|
def gettags(comment):
""" Parse documentation strings into JavaDoc-like tokens """
tags = []
tag = None
datatype = None
name = None
tag_lineno = lineno = 0
tag_text = []
for line in comment.split('\n'):
line = line.strip()
if line.startswith("@"):
tags.append((tag_lineno, tag, datatype, name, '\n'.join(tag_text)))
parts = line.split(None, 3)
if len(parts) == 1:
datatype = ''
name = ''
tag_text = []
elif len(parts) == 2:
datatype = parts[1]
name = ''
tag_text = []
elif len(parts) == 3:
datatype = parts[1]
name = parts[2]
tag_text = []
elif len(parts) == 4:
datatype = parts[1]
name = parts[2]
tag_text = [parts[3].lstrip()]
tag = parts[0][1:]
tag_lineno = lineno
else:
if line:
tag_text.append(line)
lineno += 1
tags.append((tag_lineno, tag, datatype, name, '\n'.join(tag_text)))
return tags
|
def convert_lines_to_object(lines: list) -> dict:
"""
Convert an array of lines into an object indexed by the line number
Indexing from 0
Parameters
----------
lines : list
The list of lines to convert
Returns
-------
dictionary
A dictionary of the lines, indexed by the line number (starting from 0)
"""
i = 0
pad_to = len(str(len(lines)))
obj = {"count": len(lines)}
for line in lines:
obj.update({"line" + str(i).rjust(pad_to, "0"): line})
i += 1
return obj
|
def humanized_class_name(obj, *args, **kwargs):
"""
Adds spaces to camel-case class names.
"""
humanized = ''
class_name = obj.__class__.__name__
for i in range(len(class_name)):
humanized += class_name[i]
# Insert space between every camel hump.
if (i + 1 < len(class_name) and class_name[i].islower()
and class_name[i + 1].isupper()):
humanized += ' '
return humanized
|
def _convert_to_string(input_value):
"""Converts input value to string.
:param input_value: Input value.
:return: output_value: Float value.
"""
try:
return str(input_value).strip().upper()
except:
return ''
|
def nested_contains(list_: list, value: object) -> bool:
"""
Return whether list_, or any nested sub-list of list_ contains value.
>>> list_ = ["how", "now", "brown", 1]
>>> nested_contains(list_, "now")
True
>>> nested_contains(list_, 1)
True
>>> nested_contains(list_, 3)
False
>>> list_ = ["how", ["now", "brown"], 1]
>>> nested_contains(list_, "now")
True
>>> nested_contains([], 5)
False
>>> nested_contains([5], 5)
True
"""
# if not isinstance(list_, list):
# return list_ == value
# else:
# bools = []
# for item in list_:
# bools.extend([nested_contains(item, value)])
# return any(bools)
# with listcomp... clearest version imo
if not isinstance(list_, list):
return list_ == value
else:
return any([nested_contains(item, value) for item in list_])
# just showing off now... not as readable
# return any([item == value if not isinstance(item, list)
# else nested_contains(item, value) for item in list_])
|
def bubble_sort(array):
"""
Sorts a list using bubble sort algorithm
Input: A list of integers
Output: A list containing the same integers as the input, but sorted in
ascending order
Sorts by iterating through a list swapping values that are in the wrong order.
Stops when it is impossible for unsorted elements to remain.
"""
for z in range(len(array)-1):
for x in range(len(array)-1):
for y in range(len(array)-1):
if(array[x]>array[x+1]):
temp = array[x]
array[x] = array[x+1]
array[x+1] = temp
return array
|
def gen_workspace_tfvars_files(environment, region):
"""Generate possible Terraform workspace tfvars filenames."""
return [
# Give preference to explicit environment-region files
"%s-%s.tfvars" % (environment, region),
# Fallback to environment name only
"%s.tfvars" % environment,
]
|
def _tail_avg(timeseries, tidx=0, vidx=1):
"""
This is a utility function used to calculate the average of the last three
datapoints in the series as a measure, instead of just the last datapoint.
It reduces noise, but it also reduces sensitivity and increases the delay
to detection.
"""
try:
t = (timeseries[-1][vidx] + timeseries[-2]
[vidx] + timeseries[-3][vidx]) / 3
return t
except IndexError:
return timeseries[-1][vidx]
|
def it(item):
"""Utility to convert rounded floats to int."""
try:
is_equal = int(item) == float(item)
except ValueError: # Item may not be numerical
return item
return int(item) if is_equal else float(item)
|
def replace_sanalinkki(text, words):
"""
Korjaa sanalinkit.
"""
for key in words.keys():
text = text.replace("#sanalinkki_{}".format(key), "#" + words[key])
return text
|
def getObjectName(objects):
"""Create a string representing the objects name.
objects - list of one or more DejaVu geom. objects"""
name = ""
for object in objects:
try:
objectName = object.fullName
if objectName.split("|")[-1] != 'root':
if objectName.find('root') == 0:
if not len(name):
name = objectName[5:]
else:
name = name + ";" + objectName[5:]
break
except:
pass
return name
|
def punct(w_list):
"""
:param w_list: word list to be processed
:return: w_list with punct and number filter out
"""
return [word for word in w_list if word.isalpha()]
|
def dictmerge(*dicts):
"""
"""
result = {}
for d in dicts:
result.update(d)
return result
|
def get_hashtags(hashtag_entity):
"""
- gets all the hashtags if available in the tweet
"""
hashtags = []
if hashtag_entity is not None:
for hashtag in hashtag_entity:
hashtags.append(hashtag['text'])
return hashtags
|
def _class_is_reference(cls):
"""
Checks if the current model entity is in fact a (data) reference
class, for external elements reference.
The data reference classes are used to refer external components of
the model, for a loose connection.
:rtype: bool
:return: If the current class is a data reference to an entity model.
"""
# checks if the class is a data reference, by testing it
# for the presence of the data reference value and by
# testing the (possible) existing value against true validation
if hasattr(cls, "data_reference") and cls.data_reference == True:
# returns valid (the model class is in fact
# a data reference model class)
return True
# returns invalid (the model class is not
# a data reference)
return False
|
def partition(pred, iterable):
"""Returns [[trues], [falses]], where [trues] is the items in
'iterable' that satisfy 'pred' and [falses] is all the rest."""
trues = []
falses = []
for item in iterable:
if pred(item):
trues.append(item)
else:
falses.append(item)
return trues, falses
|
def equation(x: float) -> float:
"""
hasil dari pengurangan dan perkalian
dari x
parameter dan tipe:
x: float
return:
hasil dari 10 - x * x
tipe:
float
>>> equation(5)
-15
>>> equation(0.1)
9.99
"""
return 10 - x * x
|
def det3(u, v, w):
"""Determinant of 3x3 matrix formed by input 3-vectors ``u``, ``v`` and ``w``."""
return u[0] * (v[1] * w[2] - v[2] * w[1]) + u[1] * (v[2] * w[0] - v[0] * w[2]) + u[2] * (v[0] * w[1] - v[1] * w[0])
|
def get_source_id(sl):
"""sl is list of sources in form: ns:id. Return
the ns and id of the last entry"""
source = sl[len(sl) - 1] # get last source item
ns, id = source.split(':')
return (ns, id)
|
def vars_are_encrypted(vars):
"""Returns True if any of the values in the dictionary vars contains
content which is encrypted by the AWX encryption algorithm
"""
for value in vars.values():
if isinstance(value, str):
if value.startswith('$encrypted$'):
return True
return False
|
def qb_account(item_title):
"""
Given an item title, returns the appropriate QuickBooks class and account
Parameter: item_title
Returns: item_class, item_account
Note that this is only guaranteed to work for Ticketleap sales, not
PayPal invoices.
"""
if 'Northern' in item_title or 'NLC' in item_title:
item_class = 'NLC'
else:
item_class = 'CCC'
if 'Competitor' in item_title:
item_account = ('Competition Income:Competitors:'
'Amateur Registration Fees')
else:
item_account = 'Competition Income:Sales:Tickets:Advance Tickets'
return item_class, item_account
|
def encodeHTML(string):
"""
Encodes some things as HTML Entities, making them safe to print anywhere.
Currently changes [&<>'"] "'# work around Emacs bug...
"""
return string.replace('&', '&') \
.replace('<', '<') \
.replace('>', '>') \
.replace('"', '"') \
.replace("'", ''')
|
def get_type(value):
"""
Build current config file structure.
"""
if isinstance(value, dict):
return {key: get_type(value[key]) for key in value}
else:
return str(type(value))
|
def blank_if_zero(n):
"""Return str(n) unless n is zero, in which case return ""."""
if n == 0:
return ""
return str(n)
|
def make_breakable(keyword):
"""Inserts zero-width-space HTML entities after each underscore IF the keyword is long.
This is useful to allow long keywords to be split into different lines.
"""
return '_​'.join(keyword.split('_')) if len(keyword) > 30 else keyword
|
def find_endurance_tier_iops_per_gb(volume):
"""Find the tier for the given endurance volume (IOPS per GB)
:param volume: The volume for which the tier level is desired
:return: Returns a float value indicating the IOPS per GB for the volume
"""
tier_description_split = volume['storageTierLevel']['description'].split()
if tier_description_split != []:
iops_per_gb = tier_description_split[0]
if iops_per_gb == '0.25':
return 0.25
if iops_per_gb == '2':
return 2.0
if iops_per_gb == '4':
return 4.0
if iops_per_gb == '10':
return 10.0
raise ValueError("Could not find tier IOPS per GB for this volume")
|
def extract_ratings(txt):
"""Extract the rating histogram from embedded Javascript code
The embedded code looks like this:
|----------------------------------------------------------|
| renderRatingGraph([6, 3, 2, 2, 1]); |
| if ($('rating_details')) { |
| $('rating_details').insert({top: $('rating_graph')}) |
| } |
|----------------------------------------------------------|
"""
codelines = "".join(txt).split(";")
rating_code = [line.strip() for line in codelines if "renderRatingGraph" in line]
if not rating_code:
return None
rating_code = rating_code[0]
rating_array = rating_code[rating_code.index("[") + 1 : rating_code.index("]")]
ratings = {5 - i:int(x) for i, x in enumerate(rating_array.split(","))}
return ratings
|
def only_digit(s: str) -> str:
"""Filter out the digits in the string."""
return ''.join([c for c in s if c.isdigit()])
|
def ProfitArrayToHierarchy(oneway,prune=None): # add old hierarchy as second parameter to add to it
# hierarchy follows this simple structure:
"""
fromId
toId
commodId=traderow
toId
commodId=traderow
fromId
toId
commodId=traderow
toId
commodId=traderow
"""
if prune is None:
prune=dict()
for way in oneway:
if way["AbaseId"] == way["BbaseId"]: # anomalous data discard
continue
if not way["AbaseId"] in prune:
prune[way["AbaseId"]]=dict()
if not way["BbaseId"] in prune[way["AbaseId"]]:
prune[way["AbaseId"]][way["BbaseId"]]=dict()
if not way["commodityId"] in prune[way["AbaseId"]][way["BbaseId"]]:
#way["profitPh"]=int(way["profit"]/way["hours"]) # bake me a cake ----- profitPh = profit / hours - done on db side now
prune[way["AbaseId"]][way["BbaseId"]][way["commodityId"]]=way
return prune
|
def get_dict_list(data_array):
"""Returns a list of dictionaries based on the column headers
(the 0th line in the column headers)
"""
key_list = data_array[0]
dict_list = []
for index, line in enumerate(data_array[1:]):
params = {}
for i in range(len(key_list)):
# try:
# params[key_list[i]] = float(line[i])
# except ValueError:
params[key_list[i]] = line[i]
dict_list.append(params)
return dict_list
|
def get_lr_run_identifier(lrs):
"""Computes the run identifier for the given learning rates. Search identifier will not contain w_eval.
Args:
lrs (dict): That that contains the learning rate for every searched learning rate.
Returns:
str: String that uniquely represents a run with the given learning rates.
"""
run_identifier = ""
for val in ['w_search', 'alpha', 'beta']:
try:
run_identifier += f"{val}-{lrs[val]}_"
except KeyError:
break
if 'w_eval' in lrs.keys():
run_identifier += f"w_eval-{lrs['w_eval']}_"
run_identifier = run_identifier[:-1]
return run_identifier
|
def Yagi13_fitcoefs(ell):
"""
Coefficients of Yagi 2013 fits for multipolar
$\bar{\lambda}_\ell = 2 k_\ell/(C^{2\ell+1} (2\ell-1)!!)$
Tab.I (NS) http://arxiv.org/abs/1311.0872
"""
if ell==3:
c = [-1.15,1.18,2.51e-2,-1.31e-3,2.52e-5];
elif ell==4:
c = [-2.45,1.43,3.95e-2,-1.81e-3,2.8e-5];
else:
c = [];
return c;
|
def list_diff_want_only(want_list, have_list):
"""
This function generated the list containing values
that are only in want list.
:param want_list:
:param have_list:
:return: new list with values which are only in want list
"""
if have_list and not want_list:
diff = None
elif not have_list:
diff = want_list
else:
diff = [
i
for i in have_list + want_list
if i in want_list and i not in have_list
]
return diff
|
def has_progressed_validly(status_old, status_new):
"""
If there happened to be progress between the two statuses we check if it was valid.
Thus, we return False if there was invalid progress, and True otherwise. (even if there was no progress)
stage change counts as progress, but has to be done in the right order.
also if both runs are in the same stage, we compare progress.
:param status_old: json describing a run status as returned from the '/projects/{project_id}/runs/{run_id}/status'
endpoint
:param status_new: same as above
:return: False if the progress was not valid
"""
old_stage = status_old['current_stage']['number']
new_stage = status_new['current_stage']['number']
if old_stage < new_stage:
return True
elif old_stage > new_stage:
return False
# both in the same stage then
if 'progress' in status_old['current_stage'] and 'progress' in status_new['current_stage']:
assert 0 <= status_new['current_stage']['progress']['relative'] <= 1.0, "{} not between 0 and 1".format(status_new['current_stage']['progress']['relative'])
assert 0 <= status_old['current_stage']['progress']['relative'] <= 1.0, "{} not between 0 and 1".format(status_old['current_stage']['progress']['relative'])
if status_new['current_stage']['progress']['relative'] < status_old['current_stage']['progress']['relative']:
return False
else:
return True
else: # same stage, no progress info
return True
|
def changeFrom12To24(aString):
"""
Changes the format from am/pm to 24h system.
:param aString: String representing an hour value.
:return:
"""
if aString == "1":
aString = "13"
if aString == "2":
aString = "14"
if aString == "3":
aString = "15"
if aString == "4":
aString = "16"
if aString == "5":
aString = "17"
if aString == "6":
aString = "18"
if aString == "7":
aString = "19"
if aString == "8":
aString = "20"
if aString == "9":
aString = "21"
if aString == "10":
aString = "22"
if aString == "11":
aString = "23"
return aString
|
def gene_to_filename(gene):
"""
Convert a formal gene name to be suitable for file names
"""
return gene.lower().replace(' ', '')
|
def win_percent(num_games, game_wins):
"""returns the percentage of wins"""
total_games = num_games
win_percents = game_wins[:]
for i in range(0, len(win_percents)):
win_percents[i] = round((1.0 * (game_wins[i])) / (total_games * 1.0), 2)
return win_percents
|
def _flatten_dict(original_dict):
"""Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
"""
flat_dict = {}
for key, value in original_dict.items():
if isinstance(value, dict):
for name, tensor in value.items():
if isinstance(tensor, dict):
raise ValueError("flatten_dict only handles 2 levels of nesting.")
flat_key = "__" + key + "_" + name
flat_dict[flat_key] = tensor
else:
flat_dict[key] = value
return flat_dict
|
def _validate_positive_int(value):
"""Validate value is a natural number."""
try:
value = int(value)
except ValueError as err:
raise ValueError("Could not convert to int") from err
if value > 0:
return value
else:
raise ValueError("Only positive values are valid")
|
def magtoflux(marr, fzero):
"""Convert from magnitude to flux.
marr--input array in mags
fzero--zero point for the conversion
"""
return fzero * 10 ** (-0.4 * marr)
|
def find_primes(n):
"""
Finds all the primes below number n using the sieve of Eratosthenes
"""
candidates = [i + 2 for i in range(n-1)]
for p in candidates:
for i in candidates:
if i % p == 0 and p != i:
candidates.remove(i)
return candidates
|
def normalize(name: str) -> str:
"""Normalize a value name to a valid Python (PEP8 compliant) identifier.
Args:
name: The name of a value returned by MPD.
Returns:
The normalized name, in all lowercase with - replaced by _.
"""
return name.lower().replace('-', '_')
|
def child_support_payor_c(responses, derived):
""" Return who the payor is depends on the monthly amount from Factsheet C """
try:
amount_1 = float(responses.get('your_child_support_paid_c', 0))
except ValueError:
amount_1 = 0
try:
amount_2 = float(responses.get('your_spouse_child_support_paid_c', 0))
except ValueError:
amount_2 = 0
if amount_1 > amount_2:
payor = 'you'
elif amount_1 < amount_2:
payor = 'spouse'
else:
payor = 'both'
return payor
|
def table_append(row, category='default'):
""" Take any number of string args and put them together as a
HTML table row.
"""
html_row = '</td><td>'.join(row)
html_row = f'<tr><td>{html_row}</td></tr>'
if category == 'header':
html_row = html_row.replace('td>', 'th>')
return html_row
|
def all_lowercase(title_words):
"""
if the words are all in lowercase
>>> all_lowercase(["a", "b", "c"])
True
>>> all_lowercase(["a", "12b", "c12"])
True
>>> all_lowercase(["a", "12b", "C12"])
False
"""
concat_str = ''.join(title_words)
return concat_str.lower() == concat_str
|
def boundaries_intersection(boundaries):
"""
compute the intersections inside a boundary
:param boundaries: list of list of vertex indices corresponding to the path
:return: list of common vertices between each tuple
"""
bound_conn = []
for bound_ind1 in range(len(boundaries) - 1):
for bound_ind2 in range(bound_ind1 + 1, len(boundaries)):
common = set(boundaries[bound_ind1]).intersection(
set(boundaries[bound_ind2]))
if common:
bound_conn.append([bound_ind1, bound_ind2, list(common)])
return bound_conn
|
def merge(base, overrider):
""" Override base dict with an overrider dict, recursively. """
for key, value in overrider.items():
if isinstance(value, dict):
subitem = base.setdefault(key, {})
merge(subitem, value)
else:
base[key] = value
return base
|
def escape_dn_chars(s):
"""
Escape all DN special characters found in s
with a back-slash (see RFC 4514, section 2.4)
"""
if s:
s = s.replace('\\','\\\\')
s = s.replace(',' ,'\\,')
s = s.replace('+' ,'\\+')
s = s.replace('"' ,'\\"')
s = s.replace('<' ,'\\<')
s = s.replace('>' ,'\\>')
s = s.replace(';' ,'\\;')
s = s.replace('=' ,'\\=')
s = s.replace('\000' ,'\\\000')
if s[0]=='#' or s[0]==' ':
s = ''.join(('\\',s))
if s[-1]==' ':
s = ''.join((s[:-1],'\\ '))
return s
|
def divergence_index(constant, iterations):
"""
:param constant: A complex number (the position)
:param iterations: The number of iterations to run at the given constant
:return: n = the number of stable iterations
"""
n = 0
z = 0
for i in range(iterations):
if abs(z) > 4:
break
z = z**2 + constant
n += 1
return n
|
def ensure_scripts(linux_scripts):
"""
Creates the proper script names required for each platform
(taken from 4Suite)
"""
from distutils import util
if util.get_platform()[:3] == 'win':
scripts_ = linux_scripts + [script + '.bat'
for script in linux_scripts]
else:
scripts_ = linux_scripts
return scripts_
|
def reducer_cumsum(event, data):
""" Parse int from the input then add to 'cumsum data' """
text = event[1]
val = 0
try:
val=int(text)
except Exception as e:
action =('ERROR', 'not an int')
return action, {}
data.setdefault('cumsum',0)
data['cumsum'] += val
return ("SEND",f"Sum is {data['cumsum']}"), data
|
def write_select_dimensions(dims_dict):
"""Create a string for a SELECT part of an SQL query for dimension tables
Given a dictionary key-value pairs, this function outputs a string to be
used as part of an SQL SELECT section. This is meant to be used when
flatenning a table, and the given dict should contain all Key-Title pairs
of the relevant dimensions.
For more details: https://www.cbs.nl/-/media/statline/documenten/handleiding-cbs-opendata-services.pdf?la=nl-nl
"""
string = ""
for i, (key, title) in enumerate(dims_dict.items()):
if i == 0:
string += (
f" {key}.Key AS {title.lower().replace(' ', '_')}_code" # no comma for first item
f"\n , {key}.Title AS {title.lower().replace(' ', '_')}"
)
else:
string += (
f"\n , {key}.Key AS {title.lower().replace(' ', '_')}_code"
f"\n , {key}.Title AS {title.lower().replace(' ', '_')}"
)
return string
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.