content
stringlengths 42
6.51k
|
---|
def getCycleNodeStamp(cycle, node):
"""
Returns a CCCNNN stamp for this cycle and node.
Useful for comparing the current cycle/node with requested snapshots in the settings
See Also
--------
isRequestedDetailPoint : compares a cycle,node to the dumpSnapshots list.
extractCycleNodeFromStamp : does the opposite
"""
snapText = "{0:03d}{1:03d}".format(cycle, node)
return snapText
|
def factorial(number):
"""Function that finds the factorial number of the number introduced
Args:
number (int): Number that you want to find the factorial of
Raises:
ValueError: if the value of number is less than 0 it raises this error with a warning
Returns:
int: Factorial of {number}
"""
if number < 0:
raise ValueError(
'Introdujo un numero negativo [' + str(number) + '] intente de nuevo')
if number <= 0:
return 1
else:
return number * factorial(number - 1)
|
def construct_names(paths):
""" """
names = []
for p in paths:
names.append("-".join([e["name"] for e in p]))
return names
|
def score(G, results):
"""The score is the number of edges we got correctly, divided by the number of edges we guessed."""
points = 0
for src, (_timestamp, detected_node) in results.items():
if detected_node in G[src]:
points += 1
return points / float(len(results))
|
def is_expression(tpn_field):
"""Return True IFF .tpn value `tpn_field` defines a header expression.
Initially `tpn_field` is either a value from the TpnInfo "values" field or
it is the value of the "presence" field. In both cases, an expression
is signified by bracketing the value in parens.
"""
return tpn_field.startswith("(") and tpn_field.endswith(")")
|
def _convert_paths_to_flask(transmute_paths):
"""flask has it's own route syntax, so we convert it."""
paths = []
for p in transmute_paths:
paths.append(p.replace("{", "<").replace("}", ">"))
return paths
|
def from_submission_status_annotations(annotations):
"""
Convert back from submission status annotation format to a normal dictionary.
Example::
submission_status.annotations = from_submission_status_annotations(submission_status.annotations)
"""
dictionary = {}
for key, value in annotations.items():
if key in ['stringAnnos', 'longAnnos']:
dictionary.update({kvp['key']: kvp['value'] for kvp in value})
elif key == 'doubleAnnos':
dictionary.update({kvp['key']: float(kvp['value']) for kvp in value})
else:
dictionary[key] = value
return dictionary
|
def is_decimal_amount(string: str) -> bool:
"""Checks if string is a decimal amount (e.g. 1/2, 1/4, etc..)
Args:
string: string to be checked.
Returns:
True if string is a decimal amount, False otherwise.
"""
if "/" not in string or len(string.split("/")) != 2:
return False
string_split = string.split("/")
return string_split[0].isnumeric() and string_split[1].isnumeric()
|
def fibonacci(length=10):
"""Get fibonacci sequence given it length.
Parameters
----------
length : int
The length of the desired sequence.
Returns
-------
sequence : list of int
The desired Fibonacci sequence
"""
if length < 1:
raise ValueError("Sequence length must be > 0")
sequence = [0] * (length + 2)
sequence[0] = 0
sequence[1] = 1
for i in range(2, len(sequence)):
sequence[i] = sequence[i - 1] + sequence[i - 2]
return sequence[: -2]
|
def remove_directories(list_of_paths):
"""
Removes non-leafs from a list of directory paths
"""
found_dirs = set('/')
for path in list_of_paths:
dirs = path.strip().split('/')
for i in range(2,len(dirs)):
found_dirs.add( '/'.join(dirs[:i]) )
paths = [ path for path in list_of_paths if
(path.strip() not in found_dirs) and path.strip()[-1]!='/' ]
return paths
|
def shape_to_string(shape):
"""
On Windows, shape tuples use long ints which results in formatted shapes
such as (2L, 3L). This function ensures that the shape is always formatted
without the Ls.
"""
return "({0})".format(", ".join(str(int(item)) for item in shape))
|
def wilight_to_opp_position(value):
"""Convert wilight position 1..255 to opp.format 0..100."""
return min(100, round((value * 100) / 255))
|
def format_value(value, fmt):
"""
Convert numerical value to string with a specific format
Parameters
----------
value : int or float
Numerical variable to convert.
fmt : str
String format used to apply the conversion
Returns
-------
string_value : str
String containing a formatted version of the value
Examples
--------
>>> format_value(30.5, ".3f")
'30.500'
>>> format_value(30.5, "5g")
'30.5'
>>> format_value(123, "d")
'123'
>>> format_value(123, ".2f")
'123.00'
"""
return "{value:>{fmt}}".format(value=value, fmt=fmt).strip()
|
def identity(*args):
""" Always returns the same value that was used as its argument.
Example:
>>> identity(1)
1
>>> identity(1, 2)
(1, 2)
"""
if len(args) == 1:
return args[0]
return args
|
def _object_get(entry, attr):
"""
simple getter over object entry
Example::
class A(object):
def __init__(self, attribute):
self.attribute = attribute
def foo():
return self.attribute
a = A(10)
#: calls foo as it's callable
_object_get(a, 'foo')
#: returns attribute
_object_get(a, 'attribute')
:param object entry: an object to operate with
:param str attr: object's attribute to get or to invoke (if it's callable,
i.e. method)
.. note::
You can not use methods with one or more mandatory parameters
:rtype: object
:return: entry's attribute
"""
return (
getattr(entry, attr)() if callable(getattr(entry, attr))
else getattr(entry, attr)
) if hasattr(entry, attr) else None
|
def without_duplicates(duplicates, similar_songs):
"""Creates a list that only includes those strings which occur once in the
similar_songs list.
Args:
duplicates: Usually the list that is the return list of get_duplicates.
similar_songs: Usually the list that is the return list of
find_similar_songs.
Returns:
A list of all items that are found in the similar_songs list but not in
the duplicates list.
"""
result = []
for song in similar_songs:
if song not in duplicates:
result.append(song)
return result
|
def point2str(point, precision=1):
"""
Format a point for printing, based on specified precision with trailing zeros. Uniform printing for vector-like data
(tuple, numpy array, list).
Args:
point (vector-like): nD point with floating point coordinates.
precision (int): Number of digits after the decimal point.
Return:
String represntation of the given point "xx.xxx yy.yyy zz.zzz...".
"""
return ' '.join(format(c, '.{0}f'.format(precision)) for c in point)
|
def normalize_optical(optical_flow, min, max):
"""
Min-max normalizes optical flow given min and max values
"""
return ((optical_flow - min)/(max - min))
|
def parse_pair(src):
"""
parse '(field, "value")' definition to tuple (field, value)
"""
token = next(src)
if token != '(':
return None, None
token = next(src)
field = token
token = next(src)
if token == ')':
return field, None
elif token != ',':
return None, None
token = next(src)
value = token
token = next(src)
if token != ')':
return None, None
return field, value
|
def redshiftFromScale(scale):
"""
Converts a scale factor to redshift.
:param scale: scale factor
:type scale: float or ndarray
:return: redshift
:rtype: float or ndarray
"""
return 1. / scale - 1.
|
def has_subpattern(string: str) -> bool:
"""
String subpattern recognition I
In this kata you need to build a function to return
either true/True or false/False if a string can be
seen as the repetition of a simpler/shorter subpattern or not.
Strings will never be empty and can be composed of any character
(just consider upper- and lowercase letters as different entities)
and can be pretty long (keep an eye on performances!).
:param string:
:return:
"""
length = len(string)
n = 2
while n < (length // 2) + 1:
if length % n != 0:
n += 1
continue
if string[0:length // n] * n == string:
return True
n += 1
return False
|
def next_id(arr: list) -> int:
""" This function returns the smallest unused ID. """
reversed_arr = list(sorted(arr.copy()))
if len(reversed_arr) == 0:
return 0
for i in range(len(reversed_arr) - 1):
if reversed_arr[0] != 0:
return 0
if reversed_arr[i] - reversed_arr[i+1] == 0:
continue
if reversed_arr[i] - reversed_arr[i+1] != -1 and reversed_arr[i] - reversed_arr[i+1] != 0:
return reversed_arr[i] + 1
return max(reversed_arr) + 1
|
def compare_locations_fc(fields, fcrow, id_vals, loc_fields):
"""Compares the values of each of a list of fields with
the corresponding values in a dictionary.
Compares values accross field types.
Returns True if the values are different"""
status = False
for loc_field in loc_fields:
if loc_field in fields:
loc_index = fields.index(loc_field)
try:
if id_vals[loc_field].is_integer():
id_vals[loc_field] = int(id_vals[loc_field])
except AttributeError:
pass
try:
if fcrow[loc_index].is_integer():
fcrow[loc_index] = int(fcrow[loc_index])
except AttributeError:
pass
if not str(id_vals[loc_field]).upper() == str(fcrow[loc_index]).upper():
status = True
break
return status
|
def contains_any(seq, aset):
""" Check whether sequence seq contains ANY of the items in aset. """
for c in seq:
if c in aset:
return True
return False
|
def ascii(str):
"""Return a string with all non-ascii characters hex-encoded"""
if type(str) != type(''):
return map(ascii, str)
rv = ''
for c in str:
if c in ('\t', '\n', '\r') or ' ' <= c < chr(0x7f):
rv = rv + c
else:
rv = rv + '\\' + 'x%02.2x' % ord(c)
return rv
|
def unique(lst):
"""
Returns a list made up of the unique values found in lst. i.e., it
removes the redundant values in lst.
"""
lst = lst[:]
unique_lst = []
# Cycle through the list and add each value to the unique list only once.
for item in lst:
if unique_lst.count(item) <= 0:
unique_lst.append(item)
# Return the list with all redundant values removed.
return unique_lst
|
def terminalise_image(output):
"""
joins lists
:return:"""
return "\n".join(["".join(row) for row in output])
|
def fl2int(x, deci=3):
"""
Convert floating point number to integer.
"""
return int(round(x * pow(10, deci)))
|
def get_language_file_path(language):
"""
:param language: string
:return: string: path to where the language file lies
"""
return "{lang}/localization_{lang}.json".format(lang=language)
|
def edge_loops_from_edges(mesh, edges=None):
"""
Edge loops defined by edges
Takes me.edges or a list of edges and returns the edge loops
return a list of vertex indices.
[ [1, 6, 7, 2], ...]
closed loops have matching start and end values.
"""
line_polys = []
# Get edges not used by a face
if edges is None:
edges = mesh.edges
if not hasattr(edges, "pop"):
edges = edges[:]
while edges:
current_edge = edges.pop()
vert_end, vert_start = current_edge.vertices[:]
line_poly = [vert_start, vert_end]
ok = True
while ok:
ok = False
# for i, ed in enumerate(edges):
i = len(edges)
while i:
i -= 1
ed = edges[i]
v1, v2 = ed.vertices
if v1 == vert_end:
line_poly.append(v2)
vert_end = line_poly[-1]
ok = 1
del edges[i]
# break
elif v2 == vert_end:
line_poly.append(v1)
vert_end = line_poly[-1]
ok = 1
del edges[i]
# break
elif v1 == vert_start:
line_poly.insert(0, v2)
vert_start = line_poly[0]
ok = 1
del edges[i]
# break
elif v2 == vert_start:
line_poly.insert(0, v1)
vert_start = line_poly[0]
ok = 1
del edges[i]
# break
line_polys.append(line_poly)
return line_polys
|
def get_bbox(points2d_px: list, width: int, height: int, clip=True):
"""Get 2d bounding box in pixel for a normalized 2d point list.
Args:
points2d_px (list): List of normalized 2d points.
width (int): Image width in pixels.
height (int): Image heigh in pixels.
clip (bool, optional): Clip values outside of picture. Defaults to True.
Returns:
tuple: x_min, y_min, x_max, y_max in pixels.
"""
x_min = 10000
x_max = 0
y_min = 10000
y_max = 0
for point2d_px in points2d_px:
x,y = point2d_px
#x*=width
#y*=height
if x < x_min:
x_min=x
if x > x_max:
x_max=x
if y < y_min:
y_min=y
if y > y_max:
y_max=y
if clip:
x_min=max(x_min,0)
y_min=max(y_min,0)
x_max=min(x_max,width)
y_max=min(y_max,height)
return x_min, y_min, x_max, y_max
|
def graph_labeling_to_list(t, keys):
"""Creates a list that contains the tree's labels (according to the pre-order traversal).
Args:
t: a dictionary that contains the nodes of a labeled tree
keys: sorted keys of the given t
Returns:
list: contains the tree's labels
"""
label_list = []
for i in keys:
label_list.append(t[i].label)
return tuple(label_list)
|
def guess_fused_orths(word, ud_forms):
"""The UD data 'fused tokens' don't necessarily expand to keys that match
the form. We need orths that exact match the string. Here we make a best
effort to divide up the word."""
if word == "".join(ud_forms):
# Happy case: we get a perfect split, with each letter accounted for.
return ud_forms
elif len(word) == sum(len(subtoken) for subtoken in ud_forms):
# Unideal, but at least lengths match.
output = []
remain = word
for subtoken in ud_forms:
assert len(subtoken) >= 1
output.append(remain[: len(subtoken)])
remain = remain[len(subtoken) :]
assert len(remain) == 0, (word, ud_forms, remain)
return output
else:
# Let's say word is 6 long, and there are three subtokens. The orths
# *must* equal the original string. Arbitrarily, split [4, 1, 1]
first = word[: len(word) - (len(ud_forms) - 1)]
output = [first]
remain = word[len(first) :]
for i in range(1, len(ud_forms)):
assert remain
output.append(remain[:1])
remain = remain[1:]
assert len(remain) == 0, (word, output, remain)
return output
|
def sort_decls(sexp):
"""Sort all toplevel variable declarations in sexp.
This is used to work around the fact that
ir_reader::read_instructions reorders declarations.
"""
assert isinstance(sexp, list)
decls = []
other_code = []
for s in sexp:
if isinstance(s, list) and len(s) >= 4 and s[0] == 'declare':
decls.append(s)
else:
other_code.append(s)
return sorted(decls) + other_code
|
def flatten_dict(dic, merge_symbol=':'):
"""
Flattens a tree like dictionary, merging root keys as the
concatenation of all branch keys with the specified merge_symbol (str).
"""
flattened_dict = {}
for key, val in dic.items():
if isinstance(val, dict):
inner_dict = flatten_dict(val)
flattened_dict.update({key + merge_symbol + k_inner : val_inner \
for k_inner, val_inner in inner_dict.items()})
else:
flattened_dict.update({key : val})
return flattened_dict
|
def get_operand_string(mean, std_dev):
"""
Generate the Operand String to be used in workflow nodes to supply
mean and std deviation to alff workflow nodes
Parameters
----------
mean: string
mean value in string format
std_dev : string
std deviation value in string format
Returns
-------
op_string : string
"""
str1 = "-sub %f -div %f" % (float(mean), float(std_dev))
op_string = str1 + " -mas %s"
return op_string
|
def gen_mock_key(func_name, key):
# type: (str, str) -> str
"""
Concate function name and the key to store in the mock DB
"""
return '{0}-{1}'.format(func_name, key)
|
def to_list(obj):
"""
Checks if obj is a list and returns it. If not, returns an empty list.
:param obj:
:return:
"""
if isinstance(obj, list):
return obj
else:
return []
|
def toList(given):
"""
This will take what is given and wrap it in a list if it is not already
a list, otherwise it will simply return what it has been given.
:return: list()
"""
if not isinstance(given, (tuple, list)):
given = [given]
return given
|
def s_round(self, places=2):
"""
Correctly round float to n decimal places.
>>> s_round(4.055, 2)
4.06
>>> s_round(4.054, 2)
4.05
"""
return round(self + 10**(-2*6), places)
|
def is_int(value):
"""Check if given value is integer
Parameters
----------
value : variable
Returns
-------
bool
"""
if value is not None:
try:
int(value)
return True
except ValueError:
return False
else:
return False
|
def checkBucketName(bucketName):
""" Checks to make sure bucket names input are valid according to S3 naming conventions
:param bucketName: Name of bucket to check
:return: Boolean - whether or not the name is valid
"""
if (len(bucketName) < 3) or (len(bucketName) > 63): # Bucket names can be 3-63 (inclusively) characters long.
return False
for char in bucketName: # Bucket names can contain letters, numbers, periods, and hyphens
if char.lower() not in "abcdefghijklmnopqrstuvwxyz0123456789.-":
return False
return True
|
def sum_geometric_progression(
ist_pertama: int, rasio_umum: int, ist_bilangan: int
) -> float:
"""
mengembalikan jumlah n suku dalam
deret geometric
>>> sum_geometric_progression(1, 2, 10)
1023.0
>>> sum_geometric_progression(1, 10, 5)
11111.0
>>> sum_geometric_progression(0, 2, 10)
0.0
"""
if rasio_umum == 1:
# rumus untuk hasil jika rasio umum adalah 1
return ist_bilangan * ist_pertama
# rumus untuk menemukan hasil dari suku ke n dari
# geometric progression
return (ist_pertama / (1 - rasio_umum)) * (1 - rasio_umum ** ist_bilangan)
|
def flatten(x):
"""Flattens nested list"""
if isinstance(x, list):
return [a for i in x for a in flatten(i)]
else:
return [x]
|
def collect_nodes(kind, collection, gen):
"""
Collects nodes with a given kind from a generator,
and puts them in the provided list.
Returns the first node from the generator that does not
match the provided kind.
"""
tail = None
for node in gen:
if node.kind is not kind:
tail = node
break
else:
collection.append(node)
return tail
|
def get_field_val(data, gid, content_id, field_id, default = 'Undefined'):
"""
try to get a field value from the hash generated in field_hash
"""
if gid in data:
if content_id in data[gid]:
if field_id in data[gid][content_id]:
return data[gid][content_id][field_id]
return default
|
def get_distinct_by(key, source):
"""Finds distinct items for the key and returns the result in a list.
:param key: Function computing a key value for each item
:param source: Iterable collection of items
:return: The list of distinct by the key value items
"""
seen_keys = set()
return [item for item in source
if key(item) not in seen_keys and not seen_keys.add(key(item))]
|
def hex_to_rgb(hex_color):
"""
Converts a 6 digit hex number to RGB.
@param: hex_color - A 6 digit string with values in the range [a-fA-F0-9].
@return: a tuple containing 3 integers.
"""
if not isinstance(hex_color, str):
raise TypeError("'hex_color' must be of type 'str'.")
if len(hex_color) != 6:
raise ValueError("'hex_color' must 6 characters in length "
"(excluding '#') e.g. FF1919.")
r = int(hex_color[0:2], base=16)
g = int(hex_color[2:4], base=16)
b = int(hex_color[4:6], base=16)
return r, g, b
|
def subsequence(X):
"""Returns the Longest Increasing Subsequence in the Given List/Array"""
N = len(X)
P = [0] * N
M = [0] * (N+1)
L = 0
for i in range(N):
lo = 1
hi = L
while lo <= hi:
mid = (lo+hi)//2
if (X[M[mid]] < X[i]):
lo = mid+1
else:
hi = mid-1
newL = lo
P[i] = M[newL-1]
M[newL] = i
if (newL > L):
L = newL
S = []
k = M[L]
steps = [k]
for i in range(L-1, -1, -1):
S.append(X[k])
k = P[k]
steps = [k] + steps
return S[::-1],steps
|
def str_cannonize(str):
""" Input string with spaces and/or mixed upper and lower
characters will be converted to a cannonical form,
i.e. all to lowercase and spaces replaced by a '_'.
Return new cannonical string
"""
if not str:
return None
tmp = str.split() # intention is to replace multiple whitespaces by a single '_'
new_str = ""
for i in range(0,len(tmp) -1):
new_str += tmp[i] + "_"
new_str += tmp[-1]
return new_str
|
def generator_next(g):
"""
Return next elements in generator.
:param g|generator:
:rtype generator: return None if stopping iteration.
"""
try:
e = g.__next__()
return e
except StopIteration:
return None
|
def checkcols(sudoku):
"""
Checks if each column contains each value only once
"""
size = len(sudoku)
for col_num in range(size):
numbercontained = [False for x in range(size)]
for row_num in range(size):
value = sudoku[row_num][col_num]
# if placeholder, ignore it
if value in range(size):
if numbercontained[value]:
return False
else:
numbercontained[value] = True
return True
|
def is_hpp(file):
"""
Returns True if file looks like a C++ file (header of .cpp)
"""
return file.split(".")[-1] in ["hpp", "h"]
|
def fix_date_v2(date_str: str) -> str:
"""Fix v2 dates so they're ISO"""
if date_str.endswith("+1000"):
date_str = date_str[:-2]
return date_str
|
def get_gender_from_label(gender):
"""
Method to get the passed gender in XNAT format
:param gender: gender selected
:return: value accepted by xnat: 'female' or 'male'
"""
if gender.lower() in ['female', 'f']:
return 'female'
elif gender.lower() in ['male', 'm']:
return 'male'
else:
return 'unknown'
|
def convert_dict_into_tuple(py_dict):
"""
Convert dictionary object to tuple.
"""
if not isinstance(py_dict, dict):
return
py_tuple = tuple([x for x in py_dict.values()])
return py_tuple
|
def _get(d, keys):
"""
helper function to get a value from a nested dict with dotted string notation.
Sould not be used from the api.
:param d:
:param keys:
:return:
"""
if not isinstance(d, dict):
return
if "." in keys:
key, rest = keys.split(".", 1)
return _get(d.get(key), rest)
else:
return d.get(keys)
|
def lookup(value, key):
"""
Return a dictionary lookup of key in value
"""
try:
return value[key]
except KeyError:
return ""
|
def encode(codes, sentence):
"""
join the codes together into one string
"""
try:
if not codes:
msg = "\nNo data available for encoding.\nDid you send a valid string?"
raise ValueError(msg)
except ValueError as ve:
print(ve)
return False
output = "".join([codes[letter] for letter in sentence])
return output
|
def clean_up_instances(instances):
"""
Cleans the instance raw data up to send back to client
"""
cleansed_instances = {}
for instance in instances:
cleansed_instances.update({
instance["tags"]["DesktopId"]: {
"instanceid": instance["instanceid"],
"dns": instance["dns"],
"launchtime": instance["launchtime"],
"state": instance["state"],
"screengeometry": instance["tags"]["ScreenGeometry"],
"machine_def_id": instance["tags"]["MachineDef"]
}
})
return cleansed_instances
|
def sales_force_efficiency(number_of_orders_from_visits, number_of_visits):
"""Returns the percentage of visits by the sales force that resulted in orders from customers.
Args:
number_of_orders_from_visits (int): Number of orders generated by sales force visits during the period.
number_of_visits (int): Number of sales force visits during the period.
Returns:
Percentage of visits by the sales force that led to orders.
"""
return (number_of_orders_from_visits / number_of_visits) * 100
|
def getRealFactory(factory):
"""Get the real factory.
Sometimes the original factory is masked by functions. If the function
keeps track of the original factory, use it.
"""
# Remove all wrappers until none are found anymore.
while hasattr(factory, 'factory'):
factory = factory.factory
# If we have an instance, return its class
if not hasattr(factory, '__name__'):
return factory.__class__
return factory
|
def split_arbitrary_thickness_section(key, value):
"""
>>> key = 'T(11)'
>>> value = '[1.2,PT=(123,204)]'
>>> index, out = split_arbitrary_thickness_section(key, value)
>>> index
11
>>> out
[1.2, [123, 204]]
"""
assert key.endswith(')'), 'key=%r' % key
# T(3), CORE(3)
key_id = key[:-1].split('(', 1)[1]
key_id = int(key_id)
if isinstance(value, (int, float)):
return key_id, value
value = value.replace(' ', '')
if 'PT' in value:
bracketed_values = value.strip('[]')
sline = bracketed_values.split(',', 1)
thicknessi = float(sline[0])
pt_value = sline[1].split('=')
assert pt_value[0] == 'PT', pt_value
points = pt_value[1].strip('()').split(',')
assert len(points) == 2, pt_value
int_points = [int(pointi) for pointi in points]
out = [thicknessi, int_points]
else:
out = float(value)
return key_id, out
|
def normalize_dihedral(d):
""" Normalize any number to the range (-180, 180], including 180 """
return d + (180-d)//360*360
|
def families_pulsed_magnets():
"""Return pulsed magnet families."""
return ['InjSept']
|
def xy_to_bit(x: int, y: int) -> int:
"""Transform x/y coordinates into a bitboard bit number."""
return y * 8 + x
|
def get_path(node):
"""
Return a fake path if necessary.
As an example Aliases use this as their target name in Ninja.
"""
if hasattr(node, "get_path"):
return node.get_path()
return str(node)
|
def import_stmt(imp_from, imp, imp_as):
"""Generate an import statement."""
return (
("from " + imp_from + " " if imp_from is not None else "")
+ "import " + imp
+ (" as " + imp_as if imp_as is not None else "")
)
|
def GetFeedItemIdsForCampaign(campaign_feed):
"""Gets the Feed Item Ids used by a campaign through a given Campaign Feed.
Args:
campaign_feed: the Campaign Feed we are retrieving Feed Item Ids from.
Returns:
A list of Feed Item IDs.
"""
feed_item_ids = set()
try:
lhs_operand = campaign_feed['matchingFunction']['lhsOperand']
except KeyError:
lhs_operand = None
if (lhs_operand and lhs_operand[0]['FunctionArgumentOperand.Type'] ==
'RequestContextOperand'):
request_context_operand = lhs_operand[0]
if (request_context_operand['contextType'] == 'FEED_ITEM_ID' and
campaign_feed['matchingFunction']['operator'] == 'IN'):
for argument in campaign_feed['matchingFunction']['rhsOperand']:
if argument['xsi_type'] == 'ConstantOperand':
feed_item_ids.add(argument['longValue'])
return feed_item_ids
|
def polyline(points,closed=False):
"""Path instructions for a polyline
Args:
points (list of 2-tuples): The vertices of the polyline
closed (bool, optional): Close the polyline to a polygon. Defaults to False.
Returns:
string: Ipe path instructions
"""
instructions = [ str(points[0][0]), str(points[0][1]), 'm' ] + [ f(p) for p in points[1:] for f in [ lambda p: str(p[0]), lambda p: str(p[1]), lambda _: 'l ' ] ]
if closed: instructions = instructions + ['h ']
return ' '.join(instructions)
|
def dict2list(dct, keylist):
"""
task 0.6.3
procedure taking a dictionary and a keylist
output should be a list of the dict keylist values
e.g. input dct={'a':'A', 'b':'B', 'c':'C'} keylist=['b', 'c', 'a'] output=['B', 'C', 'A']
"""
return [dct[key] for key in keylist]
|
def vararg1(x, y):
""" vararg1 """
z = x + y
return z
|
def RPL_ENDOFEXCEPTLIST(sender, receipient, message):
""" Reply Code 349 """
return "<" + sender + ">: " + message
|
def parse_element_string(elements_str, stoich=False):
""" Parse element query string with macros. Has to parse braces
too, and throw an error if brackets are unmatched.
e.g.
Parameters: '[VII][Fe,Ru,Os][I]'
Returns: ['[VII]', '[Fe,Ru,Os]', '[I]']
e.g.2
Parameters: '[VII]2[Fe,Ru,Os][I]'
Returns: ['[VII]2', '[Fe,Ru,Os]', '[I]']
Parameters:
elements_str: str, chemical formula, including macros.
Keyword arguments:
stoich: bool, parse as a stoichiometry, i.e. check for numbers
Raises:
RuntimeError: if the composition contains unmatched brackets.
Returns:
list: split list of elements contained in input
"""
import re
valid = False
for char in elements_str:
if char not in ['[', ']', '{', '}', ',', ':'] and not char.isalnum():
raise RuntimeError('Illegal character {} detected in query.'.format(char))
valid = False
for char in elements_str:
if char.isupper():
valid = True
break
if not valid:
raise RuntimeError('Composition must contain at least one upper case character.')
elements = [elem for elem in re.split(r'([A-Z][a-z]*)', elements_str) if elem]
if stoich:
tmp_stoich = elements
for ind, strng in enumerate(elements):
if not any(char.isdigit() for char in strng):
tmp_stoich[ind] = [strng]
else:
tmp_stoich[ind] = [elem for elem in re.split(r'([0-9]+)', strng) if elem]
elements = [item for sublist in tmp_stoich for item in sublist]
# split macros
while '[' in elements or '{' in elements or '][' in elements or '}{' in elements or ']{' in elements or '}[' in elements:
tmp_stoich = list(elements)
cleaned = True
while cleaned:
for ind, tmp in enumerate(tmp_stoich):
if tmp == '][':
del tmp_stoich[ind]
tmp_stoich.insert(ind, '[')
tmp_stoich.insert(ind, ']')
cleaned = True
elif tmp == '}{':
del tmp_stoich[ind]
tmp_stoich.insert(ind, '{')
tmp_stoich.insert(ind, '}')
cleaned = True
elif tmp == ']{':
del tmp_stoich[ind]
tmp_stoich.insert(ind, '{')
tmp_stoich.insert(ind, ']')
cleaned = True
elif tmp == '}[':
del tmp_stoich[ind]
tmp_stoich.insert(ind, '[')
tmp_stoich.insert(ind, '}')
cleaned = True
elif ind == len(tmp_stoich) - 1:
cleaned = False
for ind, tmp in enumerate(tmp_stoich):
if tmp == '[':
end_bracket = False
while not end_bracket:
if tmp_stoich[ind + 1] == ']':
end_bracket = True
tmp_stoich[ind] += tmp_stoich[ind + 1]
del tmp_stoich[ind + 1]
for ind, tmp in enumerate(tmp_stoich):
if tmp == '{':
end_bracket = False
while not end_bracket:
if tmp_stoich[ind + 1] == '}':
end_bracket = True
tmp_stoich[ind] += tmp_stoich[ind + 1]
del tmp_stoich[ind + 1]
if ']' in tmp_stoich:
tmp_stoich.remove(']')
if '}' in tmp_stoich:
tmp_stoich.remove('}')
if '' in tmp_stoich:
tmp_stoich.remove('')
elements = tmp_stoich
for elem in elements:
if '}[' in elem or ']{' in elem:
raise RuntimeError('Unmatched brackets in query string')
return elements
|
def matchLines(theLineS):
"""Given a list of lines return a list of objects:
If the line is all digits then divide it by 2.
If the line is all non-digits then make it lower case.
Other lines are represented by None."""
result = []
for l in theLineS:
# Your code goes here
pass
return result
|
def cons_function(kwargs):
"""Consumer function gets integers from the queue and
stores them in a list passed throughout the calls.
At the last call, the list plus the kwargs['add'] is
returned."""
kwargs['_name']
is_last_call = kwargs['_last_call']
add = kwargs['add']
value = kwargs['_value']
result = kwargs['_result']
# If our first call was our last call.
if result is None and is_last_call:
return [add]
# Otherwise, if it's not our last call.
if not is_last_call:
if result is None:
int_list = [value]
else:
int_list = result + [value]
# And if it is.
else:
int_list = result + [int(add)]
return int_list
|
def extract(line):
"""Return a tuple (uin, nick) from 'O11111111 nickname'"""
line = line.replace("\n", "")
uin = line[1:line.find("\t")]
# fix uin
uin2 = ""
for c in uin:
if c.isdigit():
uin2 += c
uin = uin2
nick = line[1+line.find("\t"):]
nick = nick.replace("/", "_")
nick = nick.replace(":", "_")
return (uin, nick)
|
def _split_storage_url(storage_object_url):
""" Returns a list containing the bucket id and the object id. """
return storage_object_url.split("/")[2:]
|
def summary(txt):
"""
Returns the first line of a string.
"""
lines = txt.split('\n')
return lines[0]
|
def is_response_blank(response):
"""
Function: [is_response_blank]
:param response: The [response] to check whether the response is blank.
:return: [boolean] `true` if [response] is blank, `false` otherwise.
"""
return bool(response and response.strip())
|
def classify_bmi(bmi):
"""Function to classify BMI into categories"""
if bmi <= 18.5:
return "underweight"
elif bmi <= 25: # Using elif after return is flagged by pylint. Check it out.
return "normal"
elif bmi <= 30:
return "overweight"
else:
return "obese"
|
def peters_f(e):
"""
Compute f(e) from Peters and Mathews (1963) Eq.17
### Parameters:
e (`float/array`): Eccentricity
### Returns:
f (`float/array`): Enhancement factor of gravitational radiation
"""
numerator = 1 + (73/24)*e**2 + (37/96)*e**4
denominator = (1 - e**2)**(7/2)
f = numerator / denominator
return f
|
def calculate_age(seconds):
"""
Convert seconds to time period string
e.g. 6000 -> 1 h 40 m
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return '%d h %02d m' % (h, m)
|
def check_imports(to_check):
"""
Function to check if a specific package is currently imported. Does not check if module components, or shortened module names eg:
import pandas as pd
from matplotlib.pyplot import plt
Parameters
----------
module str | lst
String of a single module name, or a list of strings to check
Returns
-------
imported bool | lst
True or False if module is currently imported, returns list in same order as input values if multiple given
"""
modules = dir()
if isinstance(to_check, str):
if to_check in modules:
return True
else:
return False
if isinstance(to_check,list):
answer = []
for i in to_check:
if i in modules:
answer.append(True)
else:
answer.append(False)
return answer
|
def get_run_status(results_list):
"""
Returns overall run status either Pass or Fail.
"""
for tc in results_list:
if tc["status"] == "Failed":
return "FAILED"
if tc["status"] == "Not Executed":
return "SETUP-FAILURE"
return "PASSED"
|
def readable_number(num, single_thing='', plura_thing='',):
"""
Convert numbers into human readable format e.g 1000000 to 1M
:param num: the number
:param single_thing: 1 apple
:param plural_thing: 2 apples
"""
if num is None:
return ''
# Add a space in front of the thing
if single_thing:
single_thing = ' ' + single_thing
if plura_thing:
plura_thing = ' ' + plura_thing
# 0/1 thing
if num <= 1:
return '%i%s' % (num, single_thing)
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
if magnitude == 0:
return '%i%s' % (num, plura_thing)
else:
pattern = '%i%s%s' if str(num).endswith('0') else '%.1f%s%s'
return pattern % (num, ['', 'K', 'M', 'B'][magnitude], plura_thing)
|
def mod_exp(base: int, exp: int, modulus: int) -> int:
"""Calling Python built-in method to implement fast modular exponentiation.
Args:
base:
exp:
modulus:
Returns:
"""
return pow(base, exp, modulus)
|
def funded_by_grant(paper, grant_id):
"""Return True if the paper was funded by the specified grant.
"""
try:
if grant_id in paper['funding']:
return True
except Exception as e:
pass
return False
|
def get_attr(obj, attrs):
""" Recursively get a value from a nested set of objects
Parameters
----------
obj : any
The root object
attrs : str or list
Either a list of attribute names or a string
of attribute names joined by dots.
Returns
-------
any or None
The value from getattr(getattr(obj, attr[0]), attr[1]) etc. or None if an attribute is not found.
"""
if isinstance(attrs, str):
attrs = attrs.split(".")
key = attrs[0]
if hasattr(obj, key):
if len(attrs) > 1:
value = getattr(obj, key)
if isinstance(value, list):
return [get_attr(item, attrs[1:]) for item in value]
else:
return get_attr(value, attrs[1:])
else:
return getattr(obj, key)
else:
return None
|
def get_median_change_tweets(trend_days):
"""
Function for calculating trend differences between days, based on tweets.
@param trend_days: the data to calculate trend graph from.
@return: list fo points for a graph.
"""
keys = sorted(trend_days.keys())
#print keys
results = []
for i in range(1, len(keys)):
# difference from yesterday til today.
# positive tweets
positive_tweets_today = trend_days[keys[i]]['pos']
positive_tweets_yesterday = trend_days[keys[i - 1]]['pos']
# negative tweets
negative_tweets_today = trend_days[keys[i]]['neg']
negative_tweets_yesterday = trend_days[keys[i - 1]]['neg']
# total amount of tweets
total_amount_of_tweets = trend_days[keys[i]]['tot'] + \
trend_days[keys[i - 1]]['tot']
# change in positive tweets between this and the previous day.
pos_diff = (positive_tweets_today - positive_tweets_yesterday) / (
total_amount_of_tweets * 1.0)
# change in negative tweets between this and the previous day.
neg_diff = (negative_tweets_today - negative_tweets_yesterday) / (
total_amount_of_tweets * 1.0)
# median = the mid point between the positive and negative change points.
# change in sentiment volume between this and the previous day.
median = min([neg_diff, pos_diff]) + abs(pos_diff - neg_diff) / 2
results.append(median)
#print keys[i], "%.4f" % median, trend_days[keys[i]]['tot']
#print "pos", keys[i], trend_days[keys[i]]['neg'] - trend_days[keys[i-1]]['neg']
#print len(results)
return results
|
def errors_to_msg(errors):
"""Convert a dictionary of errors in a well formatted message.
"""
err = '\n'.join(('- %s : %s' % (k, v) for k, v in errors.items()))
return 'The following errors occured :\n' + err
|
def setbit(byte, offset, value):
"""
Set a bit in a byte to 1 if value is truthy, 0 if not.
"""
if value:
return byte | (1 << offset)
else:
return byte & ~(1 << offset)
|
def parse_patch_output(output_line):
"""parses the output produced by patch and returns the filename"""
pf = output_line[14:]
if pf[0] == '`':
pf = pf[1:-1] # Remove the quotes
return pf
|
def get_delta(k, c=1.0874, s=1.0187):
"""
Estimate the approximate expected transit depth as a function
of radius ratio. There might be a typo here. In the paper it
uses c + s*k but in the public code, it is c - s*k:
https://github.com/christopherburke/KeplerPORTs
:param k: the dimensionless radius ratio between the planet and
the star
"""
delta_max = k*k * (c + s*k)
return 0.84 * delta_max
|
def read_xlsx(filename):
"""Read information from a XLSX file and return a list.
Parameters
----------
filename : str
Full path and name for the xlsx file.
Returns
-------
list
"""
try:
import pandas as pd
lines = pd.read_excel(filename)
return lines
except:
lines = []
return lines
|
def _new_units(first_unit, last_unit):
"""
Units for newly added device
"""
units= {}
units['new_first_unit'] = first_unit
units['new_last_unit'] = last_unit
if units['new_first_unit'] > units['new_last_unit']:
units['new_last_unit'] = first_unit
units['new_first_unit'] = last_unit
return units
|
def safeint(num):
"""converts to int, regardless
"""
try:v=int(num)
except:v=0
return v
|
def tranform2bool(input_):
"""
transform the input parameter to boolean
"""
assert input_[0].lower() in ['y', 'n'], 'The input of Yes/No question should start with "y" or "n", please contact with the developer'
if input_[0].lower() == 'y':
return True
elif input_[0].lower() == 'n':
return False
|
def get_default_region_type(current_type):
"""If all shapes in current_type are of identical shape type,
return this shape type, else "polygon" as lowest common
denominator type.
Parameters
----------
current_type : list of str
list of current shape types
Returns
----------
default_type : str
default shape type
"""
default = "vertical"
if not current_type:
return default
first_type = current_type[0]
if all(shape_type == first_type for shape_type in current_type):
return first_type
return default
|
def bubblesort(a):
"""Bubble Sort algorithm for sorting a sequence container of values.
"""
l = len(a)
swaps = 1
while swaps > 0:
swaps = 0
for i,j in zip(range(0,l-1),range(1,l)):
if a[i] > a[j]:
t = a[i]
a[i] = a[j]
a[j] = t
swaps += 1
return a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.