content
stringlengths 42
6.51k
|
---|
def filter_set(unknown_set:set):
"""
currently no filtering is being done. Previously, it had filters the set based on the length
and presence of digits.
"""
# unk_filter = filter(lambda x: len(x)<30, unknown_set)
# search_pattern = r'[0-9!#$%&()*+,\-./:;<=>?@\[\\\]^_{|}~]'
# unknown_set = set(filter(lambda x: not re.search(search_pattern, x), unk_filter))
return unknown_set
|
def count_leaf_items(item_list):
"""Recursively counts and returns the
number of leaf items in a (potentially
nested) list.
"""
print(f"List: {item_list}")
count = 0
for item in item_list:
if isinstance(item, list):
print("Encountered sublist")
count += count_leaf_items(item)
else:
print(f"Counted leaf item \"{item}\"")
count += 1
print(f"-> Returning count {count}")
return count
|
def is_list_of_list(item):
"""
check whether the item is list (tuple)
and consist of list (tuple) elements
"""
if (
type(item) in (list, tuple)
and len(item)
and isinstance(item[0], (list, tuple))
):
return True
return False
|
def subst_env_arg(lst, var):
"""Returns a copy of the list with elements starting with 'var=' changed to
literally 'var='. Used in tests where an environment variable argument to
env(1) contains a system-specific path."""
find = var + "="
new = []
for i in lst:
if i.startswith(find):
i = find
new.append(i)
return new
|
def parse_where(where, prefix):
"""Helper method to parse a dict to SQL where clause query
Parameters
----------
where : dict (key, value)
Key may be field name optionally appended with filtering operator:
name_eq be equal to the value
name_ne be equal to the value
name_lt less than the value
name_le less than or equal to the value
name_gt greater than the value
name_ge greater than or equal to the value
name_in be included in the value
name_notin not be included in the value
prefix : str in {'AND','WHERE'}
Whether this is continuation of a longer where clause
"""
def parse(name, value):
"""helper method to process filtering operator, if any"""
if name.endswith('_eq'):
return f"{name[:-3]} = '{value}'"
elif name.endswith('_ne'):
return f"{name[:-3]} != '{value}'"
elif name.endswith('_le'):
return f"{name[:-3]} <= '{value}'"
elif name.endswith('_lt'):
return f"{name[:-3]} < '{value}'"
elif name.endswith('_ge'):
return f"{name[:-3]} >= '{value}'"
elif name.endswith('_gt'):
return f"{name[:-3]} > '{value}'"
elif name.endswith('_in'):
value = "','".join(value)
return f"{name[:-3]} in ('{value}')"
elif name.endswith('_notin'):
value = "','".join(value)
return f"{name[:-6]} not in ('{value}')"
else:
return f"{name} = '{value}'"
if where:
if isinstance(where, dict):
where = " AND ".join(f"{parse(k, v)}'" for k,v in where.items())
return " " + prefix + " " + where
return ''
|
def get_apt_package_name(ecr_package_name):
"""
Few packages have different names in the ecr scan and actual apt. This function returns an
apt name of an ecr package.
:param ecr_package_name: str, name of the package in ecr scans
:param apt_package_name: str, name of the package in apt
"""
name_mapper = {"cyrus-sasl2": "libsasl2-2", "glibc": "libc6", "libopenmpt": "libopenmpt-dev", "fribidi": "libfribidi-dev", }
return name_mapper.get(ecr_package_name, ecr_package_name)
|
def _get_available_memory(mem_feat, constraints=None):
"""Get available memory
If constraints are given, parse constraint string into array of
constraints and compare them to active features. Currently only
handles comma-separated strings and not the more advanced
constructs described in the slurm manual.
Else, the minimum memory for a given partition is returned.
"""
if constraints is None:
return min([int(x['mem']) for x in mem_feat])
try:
constraint_set = set(constraints.split(","))
for x in mem_feat:
if constraint_set.intersection(x["features"]) == constraint_set:
return int(x["mem"])
except Exception as e:
print(e)
raise
|
def eval_code(stmts, locals_=None, globals_=None):
"""
a helper function to ignore incomplete syntax erros when evaluating code
while typing incomplete lines, e.g.: j.clien...
"""
if not stmts:
return
try:
code = compile(stmts, filename="<kosmos>", mode="eval")
except SyntaxError:
return
try:
return eval(code, globals_, locals_)
except:
return
|
def version_check(version):
"""Checks if OpenFlow version is compatible and returns the version if it is
Parameters
----------
version: hex
The OpenFlow version taken from the SDN switch 'hello' message
Returns
-------
The OpenFlow version if match, else 0 for unknown version
"""
return {
1: 'OF10', # 0x01 -> OF1.0
3: 'OF12', # 0x03 -> OF1.2
4: 'OF13', # 0x04 -> OF1.3
5: 'OF14', # 0x05 -> OF1.4
6: 'OF15', # 0x06 -> OF1.5
}.get(version, 0)
|
def ObtainTaskDict(actionDefine):
"""
Obtain task dict
:param actionDefine: a dictionary from imitationLearning.json
:return: the task list, action dictionary for each task,
dictionary of action name for each task.
"""
# obtain task List for multiTask (default is 0)
taskList = list()
for _, actionDefineTmp in enumerate(actionDefine):
taskList = taskList + actionDefineTmp["task"]
taskList = list(set(taskList))
taskActionDict = dict()
actionNameDict = dict()
for task in taskList:
taskActionDict[task] = list()
actionNameDict[task] = list()
for _, actionDefineTmp in enumerate(actionDefine):
for task in actionDefineTmp["task"]:
taskActionDict[task].append(actionDefineTmp)
for key in taskActionDict:
taskActionList = taskActionDict[key]
actionNameDict[key] = [taskActionList[n]["name"] for n in range(len(taskActionList))]
return taskList, taskActionDict, actionNameDict
|
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
|
def StripNewlines(text):
"""Replaces newlines and tabs with a single space."""
return ' '.join(text.strip().split())
|
def hsv_2_rgb(h, s, v):
""" http://stackoverflow.com/questions/24852345/hsv-to-rgb-color-conversion """
if s == 0.0: return [v, v, v]
i = int(h*6.) # XXX assume int() truncates!
f = (h*6.)-i; p,q,t = v*(1.-s), v*(1.-s*f), v*(1.-s*(1.-f)); i%=6
if i == 0: return [v, t, p]
if i == 1: return [q, v, p]
if i == 2: return [p, v, t]
if i == 3: return [p, q, v]
if i == 4: return [t, p, v]
if i == 5: return [v, p, q]
|
def find_root_nodes(digraph):
"""Return a set of nodes having no incoming edges in a directed graph.
Args:
digraph (dict): The directed graph.
Returns:
A list of root nodes.
"""
root_nodes = set()
for ni in digraph:
if all(ni not in digraph[nj] for nj in digraph):
root_nodes.add(ni)
return root_nodes
|
def clean_up(subjects, attributes):
"""
This function takes a list of subject ids which correspond to attributes
in the second list, and forms a dictionary out of them, with the unique
subject id as the key and a list of their attributes as the corresponding
value.
"""
# and as a one-liner:
# return {sub : attributes[subjects.index(sub):subjects.index(sub)+subjects.count(sub)] for sub in set(subjects)}
# create the empty dict that we will keep adding the stuff into one by one
subject_dict = dict()
# idx is the counter going from 0 to 38
# using enumerate saves as the line: subj_id = subjects[idx]
for idx, subj_id in enumerate(subjects):
# if this is the first time we encounter this subject id, add it to the dict
# the value is an empty list for now, we will now add all the attributes
if subj_id not in subject_dict:
subject_dict[subj_id] = []
# add the current attribute to the list of the subject
subject_dict[subj_id].append(attributes[idx])
return subject_dict
|
def invalid_comment_body_with_non_existent_request(valid_staff_model):
"""
A fixture for creating a comment body with non-existent client id.
Args:
valid_staff_model (Model): a valid staff model created by a fixture.
"""
return {
'message': 'I will be working on this soon',
'request_id': 1,
'staff_id': 1,
}
|
def _check_shape_aligned(shape1, shape2):
"""Checks shape1 and shape2 are valid shapes to perform inner product"""
if shape1[-1] == shape2[-1]:
return True
raise ValueError(
f'shapes {shape1} {shape2} not aligned: {shape1[-1]} (dim 0) != {shape2[-1]} (dim 0)')
|
def fix_items_ref(vba_code):
"""
Change Scripting.Dictionary.Items() references to
Scripting.Dictionary.Items.
"""
# Do we need to do this?
if (".Items()(" not in vba_code):
return vba_code
r = vba_code.replace(".Items()(", ".Items(")
return r
|
def is_abstract_function(func):
"""
Whether the given function is abstract in its class.
:param func: The function to check.
:return: True if the function is abstract,
False if not.
"""
return getattr(func, '__isabstractmethod__', False)
|
def watermark(x, w):
"""Watermarks the image by replacing the least significant bits of the image."""
x = format(x, '#010b')
w = format(w, '#010b')
toadd = w[2:4]
temp = x[:-2] + toadd
result = int(temp, 2)
return result
|
def get_freq_as_keys(freqs_dict):
"""
Function with input a {word:word_freq} dictionary and output {word_freq: freq_occurrence}
Parameters
----------
freqs_dict: dict
input dictionary {word: word_freq}
Returns
-------
freq2freq_occurence : dict
output dictionary {word_freq : freq_occurrence}
"""
freq2freq_occurence = {}
for _, freq in freqs_dict.items():
if freq not in freq2freq_occurence:
freq2freq_occurence[freq] = 1
else:
freq2freq_occurence[freq] += 1
return freq2freq_occurence
|
def sort_intersection_points(intersections):
"""
sort intersection points from top left to bottom right
"""
y_sorted = sorted(intersections, key=lambda x: x[1])
p12 = y_sorted[:2]
p34 = y_sorted[2:]
p12 = sorted(p12, key=lambda x: x[0])
p34 = sorted(p34, key=lambda x: x[0])
return p12 + p34
|
def buildGeneMap(identifiers, separator="|"):
"""build map of predictions to genes.
Use an identifier syntax of species|transcript|gene. If none is
given, all transcripts are assumed to be from their own gene.
"""
map_id2gene, map_gene2ids = {}, {}
for id in identifiers:
f = id.split(separator)
if len(f) < 3:
gene = id
else:
gene = f[0] + separator + f[2]
map_id2gene[id] = gene
if gene not in map_gene2ids:
map_gene2ids[gene] = []
map_gene2ids[gene].append(id)
return map_id2gene, map_gene2ids
|
def indexAll(inputList=None, value=None):
"""
find the index of a given value in a given list
:param inputList: input as a list
:param value: the value that targeted to index
:return: the index of value lies in a list
"""
if not isinstance(inputList, list):
raise TypeError('Input list must be a list object.')
return [i for i, x in enumerate(inputList) if x == value]
|
def action_to_json(action_internal):
"""
Lua indexes starts from 1!
local ACTION_MOVE = 0
local ACTION_ATTACK_HERO = 1
local ACTION_ATTACK_CREEP = 2
local ACTION_USE_ABILITY = 3
local ACTION_ATTACK_TOWER = 4
local ACTION_MOVE_DISCRETE = 5
local ACTION_DO_NOTHING = 6
:param action_internal: action numeric code
:return: bot-compatible JSON action message
"""
bot_action = 6
params = []
if 0 <= action_internal < 16:
# move
bot_action = 5
params.append(int(action_internal))
elif 16 == action_internal:
# attack the nearest creep
bot_action = 2
params.append(1)
elif 17 == action_internal:
# attach the nearest hero
bot_action = 1
action_response = {
'action': bot_action,
'params': params
}
return action_response
|
def format_register_value(val):
"""
Encode each byte by two hex digits in little-endian order.
"""
result = ""
mask = 0xff
shift = 0
for i in range(0, 8):
x = (val & mask) >> shift
result += format(x, '02x')
mask <<= 8
shift += 8
return result
|
def smooth_color(idx, niter):
"""Helper for creating color transitions in loops.
Examples
--------
>>> # change color smoothly blue -> red
>>> from pwtools import mpl
>>> N = 10
>>> for ii in range(N):
... z = mpl.smooth_color(ii,N)
... plot(rand(20)+ii, color=(z,0,1-z))
"""
return float(idx) / float(niter - 1)
|
def _get_rise_fall_coeff_(normalized_time):
"""
This is a smooth function that goes from 0 to 1. It is a 5th order polynomial because it satisfies the following
5 conditions
value = 0 at t=0 and t=1 (2 conditions)
first derivative = 0 at t=0 and t=1 (2 conditions)
value = 0.5 in the middle
:param normalized_time:
:return:
"""
coeff = (
6.0 * pow(normalized_time, 5.0)
- 15.0 * pow(normalized_time, 4.0)
+ 10.0 * pow(normalized_time, 3.0)
)
return coeff
|
def int_from_bytes(b) -> int:
"""
int_from_bytes - converts bytes to int
Args:
b: bytes
Returns:
int: result
"""
return int.from_bytes(b, 'big')
|
def part1(captcha):
"""
>>> part1("91212129")
9
>>> part1(read_input())
1029
"""
prev = captcha[0]
captcha += prev
total = 0
for c in captcha[1:]:
curr = int(c)
if curr == prev:
total += curr
prev = curr
return total
|
def get_status(mal, anilist, kitsu_attr):
"""
Get the airing status of the search.
:param mal: The MAL search result.
:param anilist: The anilist search result.
:param kitsu_attr: The attributes of kitsu search result.
:return: the airing status of the search.
"""
mal_status = mal.get('status')
if mal_status:
return mal_status
anilist_status = anilist.get('status')
if anilist_status:
return anilist_status
kitsu_status = kitsu_attr.get('status')
if kitsu_status:
return kitsu_status
|
def validate_option(option, min_value, max_value):
"""
Validates whether an option number is in the accepted interval (between
min_value and max_value)
"""
if (option >= min_value and option <= max_value):
return True
else:
print("\nNot a valid option!")
print(f'Only numbers between {min_value} and {max_value} are valid.\n')
return False
|
def _clean_varscan_line(line):
"""Avoid lines with non-GATC bases, ambiguous output bases make GATK unhappy.
"""
if not line.startswith("#"):
parts = line.split("\t")
alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()]
for a in alleles:
if len(set(a) - set("GATCgatc")) > 0:
return None
return line
|
def MockPreprocessor(text: str) -> str:
"""A mock preprocessor."""
del text
return "PREPROCESSED"
|
def check_lat(lat):
"""
Checks whether the input latitude is within range and correct type
Parameters
----------
lat : float or int
latitude (-90 to 90) in degrees
Returns
-------
None. Raises an exception in case
"""
if isinstance(lat, (int, float)):
if abs(lat) > 90:
raise ValueError('latitude should be -90 <= latitude <= 90')
else:
raise TypeError('latitude should be "float" or "int"')
return None
|
def massage_ip(ip: str) -> str:
"""
Prepend 10.10.10 to a string. Allow the user to pass just the last segment of an ipv4 address.
"""
dots = len([c for c in ip if c == "."])
if dots == 0:
return f"10.10.10.{ip}"
return ip
|
def csv_safe(value):
"""Remove any beginning character '=-+@' from string value.
Change None to empty string.
See http://georgemauer.net/2017/10/07/csv-injection.html
"""
if isinstance(value, str):
while len(value) and value[0] in '=-+@':
value = value[1:]
elif value is None:
value = ''
return value
|
def tohtml(filename):
"""
Gets the html file for an rst
"""
return './_build/html/'+filename[:-4]+'.html'
|
def sample_and_log_weights(lnpdf, q_logprob, q_sample, num_samples=100):
""" sample from distribution and produce importance weights """
X = q_sample(num_samples)
llx_q = q_logprob(X)
llx_pi = lnpdf(X, 0)
lnW = llx_pi - llx_q
return X, lnW
|
def get_safe_name(name: str) -> str:
"""Returns the safe version of a username."""
return name.lower().replace(' ', '_')
|
def combine_dictionaries(a, b):
"""
returns the combined dictionary. a's values preferentially chosen
"""
c = {}
for key in b.keys(): c[key]=b[key]
for key in a.keys(): c[key]=a[key]
return c
|
def instance_name(msg_name, id):
"""Generate the name of an instance from a msg name"""
return "_" + msg_name.lower() + str(id)
|
def is_even(value):
"""
Checks if a given value is even.
If value is even the function returns True, otherwise it returns False.
"""
return value % 2 == 0
|
def center(s, width):
"""center(s, width) -> string
Return a center version of s, in a field of the specified
width. padded with spaces as needed. The string is never
truncated.
"""
n = width - len(s)
if n <= 0: return s
half = n/2
if n%2 and width%2:
# This ensures that center(center(s, i), j) = center(s, j)
half = half+1
return ' '*half + s + ' '*(n-half)
|
def is_Tuple_ellipsis(tpl):
"""Python version independent function to check if a typing.Tuple object
contains an ellipsis."""
try:
return tpl.__tuple_use_ellipsis__
except AttributeError:
try:
if tpl.__args__ is None:
return False
# Python 3.6
if tpl.__args__[-1] is Ellipsis:
return True
except AttributeError:
pass
return False
|
def format_agg_rois(rois):
"""
Helper function to format MultiImageMaths command.
Parameters
----------
rois : `list` of `str`s
List of files
Returns
-------
first_image
op_files
op_string
"""
return rois[0], rois[1:], ("-add %s " * (len(rois) - 1)).strip()
|
def allowed_file(filename, extensions):
"""
Check file is image
:param filename: string
:param extensions: list
:return bool:
"""
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in extensions
|
def shiftBits(bits, neighborBits, shiftAmount):
"""
Given a set of packet bits, and either bits that
came before or after the normal packet bits, multiply
each neighbor bit by a percentage and add it to the
packet bit.
This is the magic behind better error correction. Since we are
sampling at the Nyquist rate, we can either be sampling
near the optimum sampling points, or the worst sampling
points. We use neighbor samples to influence where we
sample and provide alternate set of values to try to
error correct closer to the optimum sampling point.
For really strong signals, this doesn't have much effect,
most packets will error correct well. But for weaker signals,
this can provide close to a doubling of packets error corrected.
Shift amounts are found in ``SHIFT_BY_PROBABILITY`` and
are ordered by what shift values will produce the quickest
error corrections (using lots of empirical samples). The first shift
is always 0, since this is most likely to match. Well over 99%
of all matches will match within two tries.
You can also get a good first approximation by determining
zero-crossing values, but using the shift table is
essentially just as fast.
Args:
bits (nparray): Sample integers array. Int32.
neighborBits (nparray): Sample integers array of samples either
before (``bitsBefore``) or after (``bitsAfter``) the sample. Int32.
shiftAmount (float): Amount to shift bits toward the
``neighborBits`` as a percentage.
Returns:
nparray: ``bits`` array after shifting.
"""
# Add a percentage of a neighbor bit to the sample bit.
# Samples are positive and negative numbers, so this
# will either raise or lower the sample point.
shiftedBits = (bits + (neighborBits * shiftAmount))/2
# This an alternative formula for shifted bits, works at the
# same speed and produces near identical results. Not sure
# which is the absolute best one to use.
#
# One test running against 110,000 errors, this formula resulted
# in 188 more decodes (5772 to 5564). Over about an 11 minute
# run, this method was only 5 seconds slower.
#
# In a regular run of normal packets, this formula gets the
# same to slightly lesser number of packets.
#
#shiftedBits = ((neighborBits - bits) * shiftAmount) + bits
return shiftedBits
|
def divisible_by(num: int, divisor: int) -> bool:
"""Checks whether a number is evenly divisible by a divisor"""
return num % divisor == 0
|
def deep_reverse_copy(L):
""" assumes L is a list of lists whose elements are ints
Mutates L such that it reverses its elements and also
reverses the order of the int elements in every element of L.
It does not return anything.
"""
# Your code here
R = L[::-1]
mSize = len(L)
for i in range(mSize):
R[i] = R[i][::-1]
return R
|
def _isSameWord(str1: str, str2: str) -> bool:
"""
Tries to figure if given strings are the same words in different forms.
Returns True or False.
:param str1: str
:param str2: str
:return: Bool
"""
return (len(set(str1).difference(set(str2))) < len(str1) / 2) and (
len(set(str2).difference(set(str1))) < len(str2) / 2) and (
str1[0:2] == str2[0:2] if len(str1) < 4 else str1[0:3] == str2[0:3])
|
def arr_to_js_str(arr, converter):
"""Convert a python array to a string represent Javascript array"""
result = str([converter(i) for i in arr])
if converter is bool:
return result.replace('T', 't').replace('F', 'f')
return result
|
def polygon_under_graph(xlist, ylist):
"""Construct the vertex list which defines the polygon filling the space
under
the (xlist, ylist) line graph. Assumes the xs are in ascending order."""
return [(xlist[0], 0.), *zip(xlist, ylist), (xlist[-1], 0.)]
|
def test_outer(cond):
"""
>>> test_outer(True)
{}
>>> test_outer(False)
Traceback (most recent call last):
...
UnboundLocalError: local variable 'a' referenced before assignment
"""
if cond:
a = {}
def inner():
return a
return a
|
def strip_brackets(JSON_string):
"""Strips the square brackets from a JSON string
Parameters
----------
JSON_string : str, mandatory
The JSON string to strip the leading and trailing end square brackets from
"""
result = JSON_string.strip("[]")
return result
|
def dateFormattingAnchor (date):
"""Aids in ensuring date format is correct by checking for comma.
.. note::
Integral to dateValidation as of current version; subject to change.
:param str date: The date string which should be in MMMM DD, YYYY format.
:return: The index of the fist comma or -1 if none is found
:rtype: int
"""
comma = -1
for i in range(len(date)):
if date[i] == ",":
comma = i
break
return comma
|
def get_value_from_dict(data, key):
""" Retrieves a value from a dict based on a given key """
if key:
return data.get(key)
|
def create_feature_names(prefix, n_features):
"""Generate numbered feature names
Example
-------
from seasalt import create_feature_names
from verto.dim1 import trans, meta
from datasets.demo1 import X_train
trans.set_params(**{'pca__n_components': 3})
X_new = trans.fit_transform(X_train)
names = create_feature_names(
meta['feature_names_prefix'], X_new.shape[1])
print(names)
import pandas as pd
df = pd.DataFrame(data=X_new, columns=names)
df
"""
return [prefix + "_" + str(i) for i in range(n_features)]
|
def merge(paragraphs):
"""
Merges the list-inside-list paragraph format back into one string
"""
paragraphs = ["\n".join(sentences) for sentences in paragraphs]
return "\n\n".join(filter(lambda paragraph: len(paragraph) != 0, paragraphs))
|
def dist_cat(u,v):
"""Measure distance between two values. Tested."""
if u == v:
return 0
else:
return 1
|
def getListIndex(diagName,fldLst):
"""Return index of diagName in fldlst (list of diagnostics names);
if diagName is not in fldlist, return -1
"""
if diagName in str(fldLst):
j = fldLst.index(diagName)
else:
j = -1
return j
|
def identify_significant_pairs(list_pairs):
"""Takes in a a list of lists, which include the pairs and the third element of each list is L.O.S.
Args:
list_pairs (list of lists): First element is crypto pair 1, second element crypto pair 2 and third element is L.O.S.
"""
return [x for x in list_pairs if x[2] <= 0.01]
|
def get_acph2_m2_min(m1: float) -> float:
"""
Get minimum value of m2 (second moment) for ACPH(2) fitting.
According to [1], M2 has only lower bound since pow(CV, 2) should be
greater or equal to 0.5.
If m1 < 0, then `ValueError` is raised.
Parameters
----------
m1 : float
Returns
-------
m2_min : float
Minimum eligble value of the second moment.
"""
if m1 < 0:
raise ValueError(f"Expected m1 > 0, but m1 = {m1}")
return 1.5 * m1**2
|
def median(lst):
""" calculate the median in a list of data
Args:
lst (list): list of numeric data
Returns:
median (int)
"""
lst.sort()
length = len(lst)
if length % 2 == 0:
median = 0.5 * (lst[length//2-1] + lst[length//2])
else:
median = lst[length//2-1]
return median
|
def is_atom_in_str(species_data_str):
""" searches for an atom in the str
"""
if 'Atom' in species_data_str:
isatom = True
else:
isatom = False
return isatom
|
def initials(forename, surname, stream):
"""Print initials of the name
Method is memoized, so we show an actual call by writing to stream
"""
print(" ".join(("Call:", str(forename), str(surname))), file=stream)
return "%s%s" % (forename[0], surname[0])
|
def nickname(json):
"""Extract the nickname of the user from a TikTok url.
:param json json:
Parsed JSON of the TikTok video's HTML page.
:rtype: str
:returns:
TikTok user's nickname.
"""
return json["props"]["pageProps"]["itemInfo"]["itemStruct"]["author"]["nickname"]
|
def cmpJSONRR(rr, exp_rr):
"""
Comparing the RR in response with the expected one. Both in JSON format.
- RRs with different TTLs with the expected ones are considered true.
- SOA RRs with different Serial number with the expected ones are considered true.
rr: dict
the RR in response
exp_rr: dict
the expected RR
"""
for key in rr.keys():
if rr[key] != exp_rr[key] and key != "TTL" and key != "data":
return False
if rr[key] != exp_rr[key] and key == "data" and rr["type"] == "SOA":
rr1_values = rr[key].split(" ")
rr2_values = exp_rr[key].split(" ")
for i in range(len(rr1_values)):
if i != 2 and rr1_values[i] != rr2_values[i]:
return False
return True
|
def _default_ret(name):
"""
Set the default response values.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
return ret
|
def str_to_indices(string):
"""Expects a string in the format '32-123, 256, 280-321'"""
assert not string.endswith(","), "provided string '{}' ends with a comma, pls remove it".format(string)
subs = string.split(",")
indices = []
for sub in subs:
subsubs = sub.split("-")
assert len(subsubs) > 0
if len(subsubs) == 1:
indices.append(int(subsubs[0]))
else:
rang = [j for j in range(int(subsubs[0]), int(subsubs[1]))]
indices.extend(rang)
return sorted(indices)
|
def openmpi_worker_service(count, image, worker_memory):
"""
:type counter: int
:type worker_memory: int
:rtype: dict
"""
service = {
'name': "mpiworker",
'docker_image': image,
'monitor': False,
'required_resources': {"memory": worker_memory},
'ports': [],
'environment': [],
'volumes': [],
'command': '',
'total_count': count,
'essential_count': count,
'startup_order': 0
}
return service
|
def greeter(name):
"""Greeter route callback"""
return f"Hello {name}!"
|
def cmpid(a, b):
"""Compare two objects by their Python id."""
if id(a) > id(b): return 1
if id(a) < id(b): return -1
return 0
|
def square_area(side):
"""Returns the area of a square"""
# You have to code here
# REMEMBER: Tests first!!!
area = side * side
return area
|
def get_school_round(a_in, n_in):
"""
https://stackoverflow.com/questions/33019698/how-to-properly-round-up-half-float-numbers-in-python
:param a_in:
:param n_in:
:return: float
"""
if (a_in * 10 ** (n_in + 1)) % 10 == 5:
return round(a_in + 1 / 10 ** (n_in + 1), n_in)
else:
return round(a_in, n_in)
|
def merge_parts(compressed):
"""
will reduce element num if repetitions exceed max length
remember to set length type 'uint16' -> 'uint32'
"""
compressed_new = [list(compressed[0])]
for compressed_i, (offset, length, data) in enumerate(compressed[1:]):
if (
data == compressed_new[-1][2] and
offset != 0 and (
(compressed_i + 1) < len(compressed) and
length > compressed[compressed_i + 1][0]
) and (
compressed_new[-1][0] <= compressed_new[-1][1]
) and offset <= compressed_new[-1][1]
):
compressed_new[-1][1] += length
else:
compressed_new.append([offset, length, data])
return [tuple(element) for element in compressed_new]
|
def convert_login_customer_id_to_str(config_data):
"""Parses a config dict's login_customer_id attr value to a str.
Like many values from YAML it's possible for login_customer_id to
either be a str or an int. Since we actually run validations on this
value before making requests it's important to parse it to a str.
Args:
config_data: A config dict object.
Returns:
The same config dict object with a mutated login_customer_id attr.
"""
login_customer_id = config_data.get('login_customer_id')
if login_customer_id:
config_data['login_customer_id'] = str(login_customer_id)
return config_data
|
def get_permissions(self, user):
"""Mixin method to collect permissions for a model instance"""
pre, suf = 'allows_', '_by'
pre_len, suf_len = len(pre), len(suf)
methods = (
m for m in dir(self)
if m.startswith(pre) and m.endswith(suf)
)
perms = dict(
( m[pre_len:0-suf_len], getattr(self, m)(user) )
for m in methods
)
return perms
|
def encode_sentences(sentences, vocab=None, invalid_label=-1, invalid_key='\n', start_label=0):
"""Encode sentences and (optionally) build a mapping
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
sentences : list of list of str
A list of sentences to encode. Each sentence
should be a list of string tokens.
vocab : None or dict of str -> int
Optional input Vocabulary
invalid_label : int, default -1
Index for invalid token, like <end-of-sentence>
invalid_key : str, default '\\n'
Key for invalid token. Use '\\n' for end
of sentence by default.
start_label : int
lowest index.
Returns
-------
result : list of list of int
encoded sentences
vocab : dict of str -> int
result vocabulary
"""
idx = start_label
if vocab is None:
vocab = {invalid_key: invalid_label}
new_vocab = True
else:
new_vocab = False
res = []
for sent in sentences:
coded = []
for word in sent:
if word not in vocab:
if not new_vocab:
coded.append(invalid_label)
continue
else:
if idx == invalid_label:
idx += 1
vocab[word] = idx
idx += 1
coded.append(vocab[word])
res.append(coded)
return res, vocab
|
def complete_list_with(dest, source, update):
"""
Calls *update()* for every missing element in *dest* compared to *source*.
The update function will receive the respective element from *source* as
an argument.
Modifies and returns *dest*.
"""
if len(dest) >= len(source): return dest
while len(dest) < len(source):
dest.append(update(source[len(dest)]))
return dest
|
def safe_hasattr(obj, attr, _marker=object()):
"""Does 'obj' have an attribute 'attr'?
Use this rather than built-in hasattr, as the built-in swallows exceptions
in some versions of Python and behaves unpredictably with respect to
properties.
"""
return getattr(obj, attr, _marker) is not _marker
|
def index_list(s, item, i=0):
"""
Return the index list of the occurrances of 'item' in string/list 's'.
Optional start search position 'i'
"""
i_list = []
while True:
try:
i = s.index(item, i)
i_list.append(i)
i += 1
except:
break
return i_list
|
def size_to_human(size):
"""Humane readable size, with 1/10 precision"""
if size < 1024:
return str(size)
if size < 1024 * 1024:
return str(round(size / 1024.0, 1)) + ' KiB'
if size < 1024 * 1024 * 1024:
return str(round(size / (1024.0 * 1024), 1)) + ' MiB'
return str(round(size / (1024.0 * 1024 * 1024), 1)) + ' GiB'
|
def get_list_inds(my_ls):
"""Function to find the indexes for points based on 1:4,5,7:9 type notation.
Takes a string.
Returns a list"""
ls = my_ls.split(",")
me = [range(int(x.split(":")[0]), int(x.split(":")[1]) + 1) for x in ls if ":" in x]
import itertools
me_out = list(itertools.chain(*me))
me_out.extend([int(x) for x in ls if ":" not in x])
return me_out
|
def verify_sam_file(sam_file):
"""
Verify that the SAM file is valid
Parameters:
sam_file (str): Path to SAM file
Returns:
(bool): Whether the SAM file is valid or not
"""
with open(sam_file) as fn:
flag = False
for line in fn:
if line.startswith("@"):
continue
else:
flag = True
if len(line.split("\t")) >= 11:
return True
else:
return False
return flag
|
def get_rgb_from_16(data):
"""Construct an RGB color from 16 bit of data.
Args:
second_byte (bytes): the first bytes read
first_byte (bytes): the second bytes read
Returns:
tuple(int, int, int): the RGB color
"""
# Args are inverted because is little endian
c_r = (data & 0b1111100000000000) >> 11
c_g = (data & 0b0000011111000000) >> 6
c_b = (data & 0b111110) >> 1
return (c_r, c_g, c_b)
|
def get_list_duplicates(seq):
"""
a = [1,2,3,2,1,5,6,5,5,5]
list_duplicates(a) # yields [1, 2, 5]
"""
seen = set()
seen_add = seen.add
# adds all elements it doesn't know yet to seen and all other to seen_twice
seen_twice = set(x for x in seq if x in seen or seen_add(x))
return list(seen_twice)
|
def news_dict(cleaned_articles):
"""
For the cleaned_articles data, extract only the headline and summary.
:param cleaned_articles: List of dictionaries, extract only the target information.
:return: dictionary of Headlines to Summary.
"""
temp_dict = {}
for i in cleaned_articles:
temp_dict[i['title']] = i['content']
return temp_dict
|
def vidgen_to_multiclass(label: str) -> str:
"""
Map Vidgen labels to multiclass.
:label (str): Raw label.
:return (str): Mapped label.
"""
if label == 'entity_directed_hostility':
return label
elif label == 'counter_speech':
return 'discussion_of_eastasian_prejudice'
elif label == 'discussion_of_eastasian_prejudice':
return label
elif label == 'entity_directed_criticism':
return label
else:
return 'negative'
|
def frequency(lst, search_term):
"""Return frequency of term in lst.
>>> frequency([1, 4, 3, 4, 4], 4)
3
>>> frequency([1, 4, 3], 7)
0
"""
d = {}
for x in lst:
if x in d:
d[x] = d[x] + 1
else:
d[x] = 1
if search_term in d:
return d[search_term]
return 0
|
def GenererCodeBinaire(longueur):
""" Retourne une liste de 2^longueur codes binaires """
if longueur == 1:
return [[False], [True]]
else:
liste = GenererCodeBinaire(longueur - 1)
new_liste = []
for code in liste:
copie = code[:]
code.append(False)
new_liste.append(code)
copie.append(True)
new_liste.append(copie)
return new_liste
|
def count_dict_differences(d1, d2, keys=None, raise_on_missing_key=True, print_differences=False):
"""
Return the number of differences between two dictionaries. Useful to compare two policies stored as dictionaries.
Does not properly handle floats that are approximately equal. Mainly use for int and objects with __eq__
Optionally raise an error on missing keys (otherwise missing keys are counted as differences)
Args:
d1 (dict): Dictionary to compare
d2 (dict): Dictionary to compare
keys (list): Optional list of keys to consider for differences. If None, all keys will be considered
raise_on_missing_key (bool): If true, raise KeyError on any keys not shared by both dictionaries
print_differences (bool): If true, print all differences to screen
Returns:
int: Number of differences between the two dictionaries
"""
if keys is None:
keys = d1.keys() | d2.keys()
else:
# Coerce into a set to remove duplicates
keys = set(keys)
differences = 0
for k in keys:
try:
if d1[k] != d2[k]:
if print_differences:
print(f'{k}: {d1.get(k, None)} != {d2.get(k, None)}')
differences += 1
except KeyError:
if raise_on_missing_key:
raise KeyError("Dictionaries do not have the same keys")
else:
differences += 1
return differences
|
def shallow_compare_dict(first_dict, second_dict, exclude=None):
"""
Return a new dictionary with the different key/value pairs found between the first
and the second dicts. Only the equality of the first layer of keys/values is
checked. `exclude` is a list or tuple of keys to be ignored. The values from the
second dict are used in the return value.
"""
difference = {}
for key in second_dict:
if first_dict.get(key) != second_dict[key]:
if isinstance(exclude, (list, tuple)) and key in exclude:
continue
difference[key] = second_dict[key]
return difference
|
def _make_isotope_string(element_name, isotope_num):
"""Form a string label for an isotope."""
if isotope_num == 0:
return element_name
else:
return '%s[%d]' % (element_name, isotope_num)
|
def prep_results(s_results):
"""
Will format the search results into a dictionary that can be serialized into JSON
:param s_results:
:return:
"""
result = []
for s_result in s_results:
entry = {'src_tenant': s_result.src_tenant_name,
'src_app_profile': s_result.src_app_profile,
'src_app_profile_type': s_result.src_app_profile_type,
'sourceEpg': s_result.source_epg,
'src_epg_type': s_result.src_epg_type,
'sip': [],
'dip': [],
'dst_tenant': s_result.dst_tenant_name,
'dst_app_profile': s_result.dst_app_profile,
'dst_app_profile_type' : s_result.dst_app_profile_type,
'destEpg': s_result.dest_epg,
'dst_epg_type': s_result.dst_epg_type,
'contract_tenant': s_result.contract_tenant,
'contract': s_result.contract}
for address in sorted(s_result.dip):
entry['dip'].append(str(address))
entry['sip'] = []
for address in sorted(s_result.sip):
entry['sip'].append(str(address))
entry['filter'] = []
for aci_filter in s_result.protocol_filter:
entry['filter'].append(str(aci_filter))
result.append(entry)
return result
|
def quote_val(value):
"""Double quote a string value, if it's a string"""
quote_char = '"'
if isinstance(value, str):
# Is it already quoted?
if len(value) >= 2:
if value[0] == quote_char and value[-1] == quote_char:
# Yes, already quoted
return value
# Nope, quote it
return quote_char + value + quote_char
return value
|
def _get_caching_device(job_name, task_id):
""" Returns the local caching device for a given job name / task id """
if job_name != 'learner':
return None
return '/job:%s/task:%d' % (job_name, task_id)
|
def parse_literal(x):
"""
return the smallest possible data type for a string or list of strings
Parameters
----------
x: str or list
a string to be parsed
Returns
-------
int, float or str
the parsing result
Examples
--------
>>> isinstance(parse_literal('1.5'), float)
True
>>> isinstance(parse_literal('1'), int)
True
>>> isinstance(parse_literal('foobar'), str)
True
"""
if isinstance(x, list):
return [parse_literal(y) for y in x]
elif isinstance(x, (bytes, str)):
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return x
else:
raise TypeError('input must be a string or a list of strings')
|
def check_slice_list(slice_list):
"""
"""
if not slice_list:
raise ValueError('[Error] Encountered input error for slice_list. ' +
'slice_list is empty')
if isinstance(slice_list, slice):
slice_list = [slice_list]
if not isinstance(slice_list, (tuple, list)):
raise ValueError('[Error] Encountered input error for slice_list. ' +
'Acceptable input types include list or tuple')
if not all([isinstance(s, slice) for s in slice_list]):
raise ValueError('[Error] Encountered non-slices in slice_list.')
if not all([isinstance(s.start, int) for s in slice_list]):
raise ValueError('[Error] Encountered non-integer values in slice_list.')
if not all([isinstance(s.stop, int) for s in slice_list]):
raise ValueError('[Error] Encountered non-integer values in slice_list.')
if not all([s.start >= 0 for s in slice_list]):
raise ValueError('[Error] Encountered negative values in slice_list.')
if not all([s.stop >= 0 for s in slice_list]):
raise ValueError('[Error] Encountered negative values in slice_list.')
return slice_list
|
def my_tokenizer(s):
"""
Split a string s by spaces.
Parameters
----------
s : The string.
Returns
-------
s.split() : A list with the resulting substrings from the split
"""
return s.split()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.