content
stringlengths 42
6.51k
|
---|
def fizzbuzz(n):
"""
Return an array containing the numbers from 1 to N, where N is the parametered value. N will never be less than 1.
:param n: an integer value.
:return: an array with integers and FizzBuzz.
"""
final = []
for i in range(1, n + 1):
if i % 3 == 0 and i % 5 == 0:
final.append("FizzBuzz")
elif i % 3 == 0:
final.append("Fizz")
elif i % 5 == 0:
final.append("Buzz")
else:
final.append(i)
return final
|
def indicator_object(passed_keywords: dict) -> dict: # pylint: disable=R0912 # noqa: C901
"""Create a properly formatted single indicator payload.
{
"action": "string",
"applied_globally": true,
"description": "string",
"expiration": "2021-10-22T10:40:39.372Z",
"host_groups": [
"string"
],
"metadata": {
"filename": "string"
},
"mobile_action": "string",
"platforms": [
"string"
],
"severity": "string",
"source": "string",
"tags": [
"string"
],
"type": "string",
"value": "string"
}
"""
# flake8 / pylint both complain about complexity due to the number of if statements.
# Ignoring the complaint as this is just running through the potential passed keywords.
returned_payload = {}
if passed_keywords.get("action", None):
returned_payload["action"] = passed_keywords.get("action", None)
if passed_keywords.get("applied_globally", None):
returned_payload["applied_globally"] = passed_keywords.get("applied_globally", None)
if passed_keywords.get("description", None):
returned_payload["description"] = passed_keywords.get("description", None)
if passed_keywords.get("expiration", None):
returned_payload["expiration"] = passed_keywords.get("expiration", None)
if passed_keywords.get("host_groups", None):
returned_payload["host_groups"] = passed_keywords.get("host_groups", None)
if passed_keywords.get("metadata", None):
returned_payload["metadata"] = passed_keywords.get("metadata", None)
if passed_keywords.get("filename", None):
returned_payload["metadata"] = {
"filename": passed_keywords.get("filename", None)
}
if passed_keywords.get("mobile_action", None):
returned_payload["mobile_action"] = passed_keywords.get("mobile_action", None)
if passed_keywords.get("platforms", None):
returned_payload["platforms"] = passed_keywords.get("platforms", None)
if passed_keywords.get("severity", None):
returned_payload["severity"] = passed_keywords.get("severity", None)
if passed_keywords.get("source", None):
returned_payload["source"] = passed_keywords.get("source", None)
if passed_keywords.get("tags", None):
returned_payload["tags"] = passed_keywords.get("tags", None)
if passed_keywords.get("type", None):
returned_payload["type"] = passed_keywords.get("type", None)
if passed_keywords.get("value", None):
returned_payload["value"] = passed_keywords.get("value", None)
if passed_keywords.get("id", None):
returned_payload["id"] = passed_keywords.get("id", None)
return returned_payload
|
def create_psql_connection_string(user: str, db_name: str,
host: str, password: str):
"""
this function is responsible for creating the psql connection string
username: str = username of server
password: str = password of server
host: str = name of server
db_name: str = name of database that want to scan
"""
connection_string = (f"host={host} "
f"user={user} dbname={db_name} "
f"password={password} sslmode=require")
return connection_string
|
def year_month_sorter(x: str):
"""
:param x: year_month_day splited by '-'
"""
y, m = x.split('-')
return [int(y), int(m)]
|
def isSubKey(potentialParent:str, potentialChild:str) -> bool:
"""
## Example
`isSubKey("Rocket", "Rocket.name")` -> True
`isSubKey("SimControl", "Rocket.name")` -> False
"""
if potentialParent == "":
# All keys are children of an empty key
return True
pLength = len(potentialParent)
cLength = len(potentialChild)
if cLength <= pLength:
# Child key can't be shorter than parent key
return False
elif potentialChild[:pLength] == potentialParent and potentialChild[pLength] == ".":
# Child key must contain parent key
return True
else:
return False
|
def to_sorted_subfloats(float_string):
"""Convert string of floats into sorted list of floats.
This function will first remove the "TSH" and turn the string of
floats into the list of floats and sort them from smaller to larger.
Args:
float_string (str): A string begin with "TSH" and followed by
a sequence of floats.
Returns:
list: a list of pure sorted floats.
"""
float_list = float_string.split(",")
float_list.remove("TSH")
float_list = [float(i) for i in float_list]
float_list.sort()
return float_list
|
def _residue_key_func(node):
"""
Creates a residue "key" for a node. Keys should be identical only for nodes
that are part of the same residue.
"""
attrs = 'mol_idx', 'chain', 'resid', 'resname'
return tuple(node.get(attr) for attr in attrs)
|
def strip_off_fasta_suffix(s):
"""
e.g. "bin_abc.fasta" --> "bin_abc", or "bin_def.fna" --> "bin_def"
:param s: string to strip fasta suffix off of
:return: string without fasta suffix
"""
print('strip off fasta suffix for {}'.format(s))
try:
if ".fasta" in s:
return s.rstrip("\.fasta")
elif ".fna" in s:
return s.rstrip("\.fna")
except ValueError:
print("Couldn't strip fasta suffix off of {}".format(s))
|
def add_arg_to_cmd(cmd_list, param_name, param_value, is_bool=False):
"""
@cmd_list - List of cmd args.
@param_name - Param name / flag.
@param_value - Value of the parameter
@is_bool - Flag is a boolean and has no value.
"""
if is_bool is False and param_value is not None:
cmd_list.append(param_name)
if param_name == "--eval":
cmd_list.append("\"{0}\"".format(param_value))
else:
cmd_list.append(param_value)
elif is_bool is True:
cmd_list.append(param_name)
return cmd_list
|
def proctime(d):
"""
Convers D to an integer in seconds
Args:
d (str): Duration
Returns:
int: Time in seconds of duration
"""
t = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
suffix = d[-1]
d = int(d[:-1])
d = d * t[suffix]
return d
|
def fibab(n, a, b):
"""
This is general fib method.
"""
if n<2:
return 1
else:
return a*fibab(n-1,a,b) + b*fibab(n-2,a,b)
|
def sum_of_square_difference(v1, v2):
"""Calculates the sum of the square differences of the components of
v1 and v2 vectors.
"""
sum = 0.0
for c1, c2 in zip(v1, v2):
t = c1 - c2
sum += t * t
return sum
|
def fix_number_value(query_toks, query_toks_no_value):
"""
There is something weird in the dataset files - the `query_toks_no_value` field anonymizes all values,
which is good since the evaluator doesn't check for the values. But it also anonymizes numbers that
should not be anonymized: e.g. LIMIT 3 becomes LIMIT 'value', while the evaluator fails if it is not a number.
"""
def split_and_keep(s, sep):
if not s:
return [""] # consistent with string.split()
# Find replacement character that is not used in string
# i.e. just use the highest available character plus one
# Note: This fails if ord(max(s)) = 0x10FFFF (ValueError)
p = chr(ord(max(s)) + 1)
return s.replace(sep, p + sep + p).split(p)
# input is tokenized in different ways... so first try to make splits equal
temp = []
for q in query_toks:
temp += split_and_keep(q, ".")
query_toks = temp
i_val, i_no_val = 0, 0
while i_val < len(query_toks) and i_no_val < len(query_toks_no_value):
if query_toks_no_value[i_no_val] != "value":
i_val += 1
i_no_val += 1
continue
i_val_end = i_val
while (
i_val + 1 < len(query_toks)
and i_no_val + 1 < len(query_toks_no_value)
and query_toks[i_val_end + 1].lower()
!= query_toks_no_value[i_no_val + 1].lower()
):
i_val_end += 1
if (
i_val == i_val_end
and query_toks[i_val] in ["1", "2", "3", "4", "5"]
and query_toks[i_val - 1].lower() == "limit"
):
query_toks_no_value[i_no_val] = query_toks[i_val]
i_val = i_val_end
i_val += 1
i_no_val += 1
return query_toks_no_value
|
def argmax(l: list) -> int:
"""
numpy.argmax()
"""
return l.index(max(l))
|
def _ArgsException(key):
"""
Dump Response of required attribute not exist exception.
:param key: the missed required attribute
:return: response string
"""
return r"""{"return": "Missed required attribute: %s", "code": "Failed"}""" % key
|
def make_ewma_metric(metric, alpha):
""" Format the name of an EWMA metric. """
return f"{metric}-ewma-alpha{alpha}"
|
def trim_method_name(full_name):
"""
Extract method/function name from its full name,
e.g., RpcResponseResolver.resolveResponseObject -> resolveResponseObject
Args:
full_name (str): Full name
Returns:
str: Method/Function name
"""
point_pos = full_name.rfind('.')
if point_pos != -1:
return full_name[point_pos + 1:]
else:
return full_name
|
def shift_scale_theta(theta):
"""
input: between -1 (-180-deg) and +1 (+180 deg)
output: between 0 (-180-deg) and +1 (+180 deg)
"""
return (theta+1)/2
|
def iou(box1, box2):
"""
Implements the intersection over union (IoU) between box1 and box2
Arguments:
box1 -- first box, list object with coordinates (box1_x1, box1_y1, box1_x2, box_1_y2)
box2 -- second box, list object with coordinates (box2_x1, box2_y1, box2_x2, box2_y2)
"""
# Assigns variable names to coordinates for clarity.
(box1_x1, box1_y1, box1_x2, box1_y2) = box1
(box2_x1, box2_y1, box2_x2, box2_y2) = box2
# Calculates the (yi1, xi1, yi2, xi2) coordinates of the intersection of box1 and box2.
xi1 = max(box1_x1,box2_x1)
yi1 = max(box1_y1,box2_y1)
xi2 = min(box1_x2,box2_x2)
yi2 = min(box1_y2,box2_y2)
inter_width = xi2-xi1
inter_height = yi2-yi1
inter_area = max(inter_width,0)*max(inter_height,0)
# Calculates the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)
box1_area = abs((box1_x2-box1_x1)*(box1_y2-box1_y1))
box2_area = abs((box2_x2-box2_x1)*(box2_y2-box2_y1))
union_area = (box1_area+box2_area)-inter_area
# Computes the IoU.
iou = inter_area/union_area
return iou
|
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
|
def ensure_str(string):
"""Convert string into str (bytes) object."""
if not isinstance(string, str):
return string.encode('utf-8')
return string
|
def _get_errors(exc):
""" Partial method to extract exception messages to list
"""
if hasattr(exc, 'message'):
errors = exc.messages
else:
errors = [str(exc)]
return errors
|
def greet(name):
"""
dynamic route, URL variable default
variable name in URL
:param name:
:return:
"""
# redirect('/hello')
return f'<h1>Hello, {name}!</h1>'
|
def _triplet(seq: str, i: int):
"""Return the ith triplet in a sequence.
Args:
seq (str): Sequence
i (int): 0-based index of the target triplet.
Returns:
str: The target triplet.
"""
start = i * 3
return seq[start:start+3]
|
def href_to_basename(href, ow=None):
"""
From the bookcontainer API. There's a typo until Sigil 0.9.5.
"""
if href is not None:
return href.split('/')[-1]
return ow
|
def pythonic(name):
""" Tries and creates pythonic name from c names """
name = str(name)
to_lower = lambda x: x if x == x.lower() else "_" + x.lower()
return name[0].lower() + ''.join([to_lower(u) for u in name[1:]])
|
def _FindRuleForRegister(cfi_row, reg):
"""Returns the postfix expression as string for a given register.
Breakpad CFI row format specifies rules for unwinding each register in postfix
expression form separated by space. Each rule starts with register name and a
colon. Eg: "CFI R1: <rule> R2: <rule>".
"""
out = []
found_register = False
for part in cfi_row:
if found_register:
if part[-1] == ':':
break
out.append(part)
elif part == reg + ':':
found_register = True
return ' '.join(out)
|
def str_quote(str_, quote='"'):
"""Utility to add quotes to strings, but just stringify non-strings."""
if isinstance(str_, str):
return str_.join((quote, quote))
else:
return str(str_)
|
def backtracer(frame):
"""
Gets the full stack backtrace
"""
backtrace = []
curr_frame = frame
while curr_frame is not None:
backtrace.append(
{
'address': '0x%8x' % curr_frame.pc(),
'function': '%s' % curr_frame.name()
}
)
curr_frame = curr_frame.older()
return backtrace
|
def elp(x):
"""These mathematical expressions use infix notation,
here the operator (e.g., +, -, *, or /)
appears in between the operands (numbers).
Python includes many ways to form compound expressions.
hoe tf do u write doc string
"""
return 3 + 5;
|
def get_sleep_time(humidity):
"""
Determines how long to humidify based on some arbitrary thresholds.
:param humidity:
:return:
"""
if humidity > 65:
return None
elif humidity > 55:
return 10
else:
return 20
|
def fft_phase(numbers, offset=0):
"""Perform a phase of flawed frequency transmission."""
output = [0 for __ in numbers]
if offset > len(numbers) // 2:
num_sum = sum(numbers[offset:])
for n in range(offset, len(numbers)):
output[n] = num_sum % 10
num_sum -= numbers[n]
else:
for i, __ in enumerate(numbers, offset):
repeat = i + 1
pattern_value = 1
for n in range(repeat - 1, len(numbers), repeat * 2):
output[i] += sum(numbers[n: n + repeat]) * pattern_value
if pattern_value == 1:
pattern_value = -1
else:
pattern_value = 1
output[i] = abs(output[i]) % 10
return output
|
def compute_dl_target(location, lpp_source, nim_lpp_sources):
"""
Compute suma DL target based on lpp source name.
When the location is empty, set the location path to
/usr/sys/inst.images
Check if a lpp_source NIM resource already exist and check the path is
the same
When the location is not a path, check that a NIM lpp_source
corresponding to the location value exists, and returns the
location path of this NIM ressource.
return:
return code : 0 - OK
1 - if error
dl_target value or msg in case of error
"""
if not location or not location.strip():
loc = "/usr/sys/inst.images"
else:
loc = location.rstrip('/')
if loc[0] == '/':
dl_target = "{}/{}".format(loc, lpp_source)
if lpp_source in nim_lpp_sources \
and nim_lpp_sources[lpp_source] != dl_target:
return 1, "SUMA Error: lpp source location mismatch. It already " \
"exists a lpp source '{}' with a location different as '{}'" \
.format(lpp_source, dl_target)
else:
if loc not in nim_lpp_sources:
return 1, "SUMA Error: lpp_source: '{}' does not exist" \
.format(loc)
dl_target = nim_lpp_sources[loc]
return 0, dl_target
|
def sort(arr):
"""
sort
:param arr:
:return:
"""
l = len(arr)
for i in range(0, l):
for j in range(i+1, l):
if arr[i] >= arr[j]:
tmp = arr[i]
arr[i] = arr[j]
arr[j] = tmp
return arr
|
def find_scenario_string_from_number(scenario):
"""
Find a string to represent a scenario from it's number (or None in the case of baseline).
Args:
scenario: The scenario value or None for baseline
Returns:
The string representing the scenario
"""
if scenario == 0:
return 'baseline'
elif scenario == 16:
return 'no transmission'
else:
return 'scenario_' + str(scenario)
|
def return_geojson(lat,lng,increment):
"""returns geojson box around lat and lng"""
geojson_geometry = { # (lng,lat)
"type": "Polygon",
"coordinates": [
[
[
lng+increment,
lat+increment
],
[
lng+increment,
lat-increment
],
[
lng-increment,
lat-increment
],
[
lng-increment,
lat+increment
],
[
lng+increment,
lat+increment
]
]
]
}
return geojson_geometry
|
def linesin(line, piece, space):
""" given a line size, piece and space size
return number of pieces that will fit it and
0-based starting position as a tuple"""
# [ ] test edge case - no pieces are fit
pieces, rem = divmod(line+space, piece+space)
if pieces == 0:
return (0, 0)
if rem == 0:
# pieces match line exacly without left and
# right borders, such as 10s10s10 == 32
# linesin(32, 10, 1) == (3, 0)
return pieces, rem
else:
return pieces, rem//2
|
def explode(pkt, *args):
"""This function takes a dict object and explodes it into the tuple requested.
It returns None for any value it doesn't find.
The only error it throws is if args is not defined.
Example:
pkt = {'a':0, 'b':1}
0, 1, None = pdcomm.explode(pkt, 'a', 'b', 'c')
"""
if not args:
raise Exception("Required arguments not provided")
# If there is an error make sure to return a tuple of the proper length
if(not isinstance(pkt, dict)):
return tuple([None] * len(args))
# Now just step through the args and pop off everything from the packet
# If a key is missing, the pkt.get(a, None) returns None rather than raising an Exception
return tuple([pkt.get(a, None) for a in args])
|
def net_specs(name, input_shape, num_classes, dtype='float'):
"""Returns object with net specs."""
return {
'name': name,
'input_shape': input_shape,
'num_classes': num_classes,
'dtype': dtype
}
|
def strip_password(password):
"""Strip the trailing and leading whitespace.
Returns:
String
"""
return password.strip()
|
def ir(some_value):
""" Because opencv wants integer pixel values"""
return int(round(some_value))
|
def readUntilNull(s):
"""
Read a string until a null is encountered
returns (string up to null , remainder after null)
"""
item = s.split(b'\0' , 1)
if len(item) == 1:
return (item[0] , None)
else:
return (item[0] , item[1])
|
def square_root_2param_t0_fit(t, a, t0):
"""t^1/2 fit w/ 2 params: slope a and horizontal shift t0."""
return a*(t-t0)**(0.5)
|
def is_ip_v4_address_valid(ip_v4_address: str) -> bool:
"""
print "Valid IP address" If IP is valid.
or
print "Invalid IP address" If IP is invalid.
>>> is_ip_v4_address_valid("192.168.0.23")
True
>>> is_ip_v4_address_valid("192.255.15.8")
False
>>> is_ip_v4_address_valid("172.100.0.8")
True
>>> is_ip_v4_address_valid("254.255.0.255")
False
>>> is_ip_v4_address_valid("1.2.33333333.4")
False
>>> is_ip_v4_address_valid("1.2.-3.4")
False
>>> is_ip_v4_address_valid("1.2.3")
False
>>> is_ip_v4_address_valid("1.2.3.4.5")
False
>>> is_ip_v4_address_valid("1.2.A.4")
False
>>> is_ip_v4_address_valid("0.0.0.0")
True
>>> is_ip_v4_address_valid("1.2.3.")
False
"""
octets = [int(i) for i in ip_v4_address.split(".") if i.isdigit()]
return len(octets) == 4 and all(0 <= int(octet) <= 254 for octet in octets)
|
def pre_process_request_data(request_data):
"""
Pro process request database.
"""
if request_data is None:
request_data = dict()
return request_data
|
def is_stdin(fn):
"""Return true if file is stdin"""
return fn in ['-', 'stdin']
|
def isAESround(rnd, aes_rounds):
"""
Return True if rnd is an AES round.
"""
return rnd == 0 or (((rnd + 1) % (aes_rounds + 1)) != 0)
|
def round_income(x):
"""
Round income to the lower 10000th
Intput:
- income
Output:
- lower 10000th of the income. Return 0 if the income
is less than 30,000 or more than 120,000
"""
for y in range(30, 130, 10):
if x >= y*1000 and x < (y+10)*1000:
return y*1000
return 0
|
def remove_prefix(_input, prefix):
"""Remove all prefixes in the input.
:param: _input: the input
:param: prefix: the prefix
"""
if not prefix.endswith(":"):
prefix += ':'
if not _input:
return _input
if isinstance(_input, str):
if _input.startswith(prefix):
return _input[len(prefix):]
return _input
if isinstance(_input, dict):
new_result = {}
for k, v in _input.items():
if k.startswith(prefix):
new_result[k[len(prefix):]] = remove_prefix(v, prefix)
else:
new_result[k] = remove_prefix(v, prefix)
return new_result
if isinstance(_input, list):
return [remove_prefix(item, prefix) for item in _input]
return _input
|
def complete_check(input_board: list) -> bool:
"""Checks if someone wins"""
# Linear
for board_line in input_board:
if board_line.count("X") == 5:
return True
# Vertical
for num in range(5):
column = []
for board_line in input_board:
column.append(board_line[num])
if column.count("X") == 5:
return True
return False
|
def climb_stairs(n: int) -> int:
"""
Args:
n: number of steps of staircase
Returns:
Distinct ways to climb a n step staircase
Raises:
AssertionError: n not positive integer
"""
fmt = "n needs to be positive integer, your input {}"
assert isinstance(n, int) and n > 0, fmt.format(n)
if n == 1:
return 1
dp = [0] * (n + 1)
dp[0], dp[1] = (1, 1)
for i in range(2, n + 1):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[n]
|
def has_required(*argv):
"""For list of object/attrib pairs confirm that the objects have the attrib."""
npairs = len(argv) // 2
for i in range(npairs):
if not argv[2 * i + 1] in argv[2 * i]:
return False
return True
|
def avg_Q(q_S, q_D):
"""
Return total transmission capacity of station, assuming transmission evenly distributed over hexagonal cell
Parameters
----------
q_S : float
channel capacity in Mbps
q_D : float
data demand rate in Mbps
Returns
-------
q : float
average demand speed, in Mbps, based on Shannon-Hartley Theorem
"""
q = q_S - q_D
return q
|
def parsePersonName(value):
"""
"""
if not value:return None
return value.lower()
|
def _dict_fix_dates(my_dict):
"""Convert values of keys 'start_date' and 'end_date' to proper ISO
date format."""
for key, val in my_dict.items():
if key in ['start_date', 'end_date']:
my_dict[key] = val.isoformat()
return my_dict
|
def get_strand(start, end):
"""
Checks the start and end coordinates of a sequence and returns -1 if the
sequence comes from the 3' strand and 1 if it comes from the 5' strand
start: An integer indicating sequence start location
end: An integer indicating sequence end location
"""
# -
if start > end:
return -1
# +
elif start <= end:
return 1
return 0
|
def efp_directory_size(directory_name):
""""Decode the directory name in the format NNNk to a numeric size, where NNN is a number string"""
try:
if directory_name[-1] == 'k':
return int(directory_name[:-1])
except ValueError:
pass
return 0
|
def which(program):
"""Returns the path to a program to make sure it exists"""
import os
def is_exe(fp):
return os.path.isfile(fp) and os.access(fp, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
|
def format_name(name):
"""
format parameter names for output
:param name: Cleans a name for output
:return:
"""
name = name.replace("_", " ")
return name
|
def lose(power_pellet_active, touching_ghost):
"""
:param power_pellet_active: bool - does the player have an active power pellet?
:param touching_ghost: bool - is the player touching a ghost?
:return: bool
"""
if not power_pellet_active and touching_ghost:
return True
else:
return False
|
def prime (num):
"""prime: calclates whether or not a number is prime
Args:
num (int): Number to be evaluated as prime
Returns:
bool: True if the number is prime, False otherwise
Note: prime() is mostly for internal use, for a more user User-friendly version,
see prime_out()
"""
count = num - 1
def remain (div):
ans = num % div
if ans == 0:
# Divides, evenly, so not prime
return True
else:
# Try again
return False
while count > 1:
if remain (count):
return False
else:
count = count - 1
# We're out of the loop, so it must be prime
return True
|
def find_data_list(data):
"""Take an object and find the first list
"""
if isinstance(data, list):
# List, cool
return data
if not isinstance(data, dict):
raise Exception("Loaded data type that we don't know what to do with")
print("Loaded dict with keys: %s" % data.keys())
for key, value in data.items():
# Look for first list value
if isinstance(value, list):
print("Choosing key: `%s`" % key)
return value
raise Exception("Loaded dict with no list values!")
|
def make_batches(size, batch_size):
"""Function extracted from Keras - check keras.engine.training_utils
for the original version.
Returns a list of batch indices (tuples of indices).
# Arguments
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
# Returns
A list of tuples of array indices.
"""
num_batches = (size + batch_size - 1) // batch_size # round up
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(num_batches)]
|
def _get_angle(range_index, range_len, angle_increment):
"""
Return the angle of the range index.
"""
lidar_angle = (range_index - (range_len / 2)) * angle_increment
steering_angle = lidar_angle / 2
return steering_angle
|
def load_secrets_file(path: str, path_startswith: bool) -> str:
"""Read the file from the given path, which should be a Docker-secrets-like file.
If path_startswith=True, only load the file if the path given starts with "/" or "./".
"""
if not path_startswith or (path.startswith("/") or path.startswith("./")):
with open(path, "r") as secrets_file:
return secrets_file.read().strip()
else:
return path
|
def image_output_size(input_shape, size, stride, padding):
"""Calculate the resulting output shape for an image layer with the specified options."""
if len(size) > 2 and input_shape[3] != size[2]:
print("Matrix size incompatible!")
height = size[0]
width = size[1]
out_depth = size[3] if len(size) > 2 else int(input_shape[3])
input_height = input_shape[1]
input_width = input_shape[2]
if padding == "VALID":
input_height -= height - 1
input_width -= width - 1
return (
int(input_shape[0]),
(input_height + stride[0] - 1) // stride[0],
(input_width + stride[1] - 1) // stride[1],
out_depth
)
|
def binary_search(data, target, low, high):
"""Return True if target is found in indicated portion of a Python list.
The search only considers the portion from data[low] to data[high] inclusive.
"""
if low > high:
return False # interval is empty; no match
else:
mid = (low + high) // 2
if target == data[mid]: # found a match
return True
elif target < data[mid]:
# recur on the portion left of the middle
return binary_search(data, target, low, mid - 1)
else:
# recur on the portion right of the middle
return binary_search(data, target, mid+1, high)
|
def _find_top_snp(sign_snp_data, ld_block_size, is_get_biggest=True):
"""
:param sign_snp_data: A 2D array: [[xpos1, yvalue1, text1], [xpos2, yvalue2, text2], ...]
"""
top_snp = []
tmp_cube = []
for i, (_x, _y, text) in enumerate(sign_snp_data):
if i == 0:
tmp_cube.append([_x, _y, text])
continue
if _x > tmp_cube[-1][0] + ld_block_size:
# Sorted by y_value in increase/decrease order and only get the first value [0], which is the TopSNP.
top_snp.append(sorted(tmp_cube, key=(lambda x: x[1]), reverse=is_get_biggest)[0])
tmp_cube = []
tmp_cube.append([_x, _y, text])
if tmp_cube: # deal the last one
top_snp.append(sorted(tmp_cube, key=(lambda x: x[1]), reverse=True)[0])
return top_snp
|
def interpret_box(box):
"""Expand given tuple for box specification to 4-tuple form.
The returned values are normalized to be of type float, even if
corresponding values of the input weren't originally.
Examples:
>>> interpret_box( (1,2,3,4) )
(1.0, 2.0, 3.0, 4.0)
>>> interpret_box( (1+1j, 2, 3) )
(-1.0, 3.0, -2.0, 4.0)
>>> interpret_box( (1+1j, 2+4j) )
(1.0, 2.0, 1.0, 4.0)
>>> interpret_box( (0+1j, 1) )
(-1.0, 1.0, 0.0, 2.0)
>>> interpret_box( (1-1j) )
(0.0, 2.0, -2.0, 0.0)
"""
if (isinstance(box, float) or isinstance(box, complex) or
isinstance(box, int)):
# Turn scalar into 1-tuple
box = box,
if len(box) == 4:
# Two intervals specified, nothing to do
outbox = box
elif len(box) == 3:
# We have a midpoint and radii for real and imag part
(mid, r_re, r_im) = box
# Allow real input, interpret with mid.z == 0
mid = complex(mid)
outbox = (
mid.real - r_re, mid.real + r_re,
mid.imag - r_im, mid.imag + r_im
)
elif len(box) == 2 and type(box[1]) == complex:
# We have lower-left and upper-right point in the complex plae
(ll, ur) = box
ll = complex(ll)
outbox = (ll.real, ur.real, ll.imag, ur.imag)
elif len(box) == 2:
(mid, r) = box
mid = complex(mid)
outbox = (mid.real - r, mid.real + r, mid.imag - r, mid.imag + r)
elif len(box) == 1:
mid = complex(box[0])
outbox = (mid.real - 1, mid.real + 1, mid.imag - 1, mid.imag + 1)
else: assert(False)
return tuple([float(v) for v in outbox])
|
def handle_entity_status(input_li, dv_li):
"""Handles DV360 entity status field by applying changes from the input_li to the dv_li
Args:
input_li: object representing the input line item from the input data
dv_li: object representing the DV360 line item
Returns: List of strings representing the fields to patch
"""
if 'Active' in input_li:
if input_li['Active'] != '':
dv_li['entityStatus'] = 'ENTITY_STATUS_ACTIVE' if input_li[
'Active'] == 'TRUE' else 'ENTITY_STATUS_PAUSED'
return ['entityStatus']
return []
|
def find_hotkey(text, attrs, hotkey='hotkey'):
"""
Given an urwid (text, attr) tuple (as returned by, e.g.
:meth:`Text.get_text`), returns the first character of the text matching
the *hotkey* attribute, or ``None`` if no character matches.
"""
ix = 0
for attr, rl in attrs:
if attr == hotkey:
return text[ix]
ix += rl
return None
|
def _order_from_regexp(items, order_regexps):
"""Re-order list given regular expression listed by priorities
Example:
--------
>>> _order_from_regexp(["aA", "aZ", "bb", "bY", "cZ", "aY"], [".*Y", ".*Z"])
['bY', 'aY', 'aZ', 'cZ', 'aA', 'bb']
"""
import re
ordered = list()
for order_regexp in order_regexps:
matched = [item for item in items if re.search(order_regexp, item)]
for item in matched:
ordered.append(item)
items.remove(item)
ordered += items
return ordered
|
def get_gml(geo_json):
"""
Get polygon in GML format given GeoJSON
:param geo_json:
:return:
"""
gml = "<gml:Polygon srsName=\"http://www.opengis.net/gml/srs/epsg.xml#4326\" " \
"xmlns:gml=\"http://www.opengis.net/gml\">\n " \
" <gml:outerBoundaryIs>\n <gml:LinearRing>\n <gml:coordinates>"
coordinates = geo_json["coordinates"][0]
for point in coordinates:
gml = gml + str(point[1]) + "," + str(point[0]) + " "
gml = gml[:-1] + "</gml:coordinates>\n </gml:LinearRing>\n </gml:outerBoundaryIs>\n</gml:Polygon>"
return gml
|
def safe_repr(obj, short=False):
"""
Helper class to provide backport support for `assertIn` and `assertIsInstance`
for python 2.6
"""
MAX_LENGTH = 80
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < MAX_LENGTH:
return result
return result[:MAX_LENGTH] + ' [truncated]...'
|
def merge(dict1, dict2):
"""
Recursive merge dictionaries.
:param dict1: Base dictionary to merge.
:param dict2: Dictionary to merge on top of base dictionary.
:return: Merged dictionary
"""
for key, val in dict1.items():
if isinstance(val, dict):
dict2_node = dict2.setdefault(key, {})
merge(val, dict2_node)
else:
if key not in dict2:
dict2[key] = val
return dict2
|
def _get_proxymodules_to_import(connection):
"""
used in _add_functions, _get_generated_proxies, and _remove_functions to get
modules to import proxies from.
"""
if connection and connection.ProxiesNS:
modules = connection.ProxiesNS
return [modules.filters, modules.sources, modules.writers, modules.animation]
else:
return []
|
def load_files(filenames, func_load):
"""Load a list of score files and return a list of tuples of (neg, pos)
Parameters
----------
filenames : :any:`list`
list of file paths
func_load :
function that can read files in the list
Returns
-------
:any:`list`: [(neg,pos)] A list of tuples, where each tuple contains the
``negative`` and ``positive`` sceach system/probee.
"""
if filenames is None:
return None
res = []
for filepath in filenames:
try:
res.append(func_load(filepath))
except:
raise
return res
|
def reindent(string):
"""
Add the same tab size for each line in a block of lines
:param string: String, Line to apply the format. Ie,
"
This is a block
of lines with
different tabs
"
:return: String, Line with format. Ie,
"
This is a block
of lines with
different tabs
"
"""
return "\n".join(l.strip() for l in string.strip().split("\n"))
|
def human_tidy(agents, self_state, self_name, cube):
"""
@param agents:
@param self_state:
@param self_name:
@param cube:
@return:
@ontology_type cube: Cube
"""
return [("human_pick_cube", cube), ("human_drop_cube",)]
|
def _add_dicts(*dicts):
"""
Creates a new dict with a union of the elements of the arguments
"""
result = {}
for d in dicts:
result.update(d)
return result
|
def toLower(s):
"""
toLower :: str -> str
Convert a letter to the corresponding lower-case letter, if any. Any other
character is returned unchanged.
"""
return s.lower()
|
def formatannotation(annotation, base_module=None):
"""
This is taken from Python 3.7's inspect.py; the only change is to
add documentation.
INPUT:
- ``annotation`` -- annotation for a function
- ``base_module`` (optional, default ``None``)
This is only relevant with Python 3, so the doctests are marked
accordingly.
EXAMPLES::
sage: from sage.misc.sageinspect import formatannotation
sage: import inspect
sage: def foo(a, *, b:int, **kwargs): # py3
....: pass
sage: s = inspect.signature(foo) # py3
sage: a = s.parameters['a'].annotation # py3
sage: a # py3
<class 'inspect._empty'>
sage: formatannotation(a) # py3
'inspect._empty'
sage: b = s.parameters['b'].annotation # py3
sage: b # py3
<class 'int'>
sage: formatannotation(b) # py3
'int'
"""
if getattr(annotation, '__module__', None) == 'typing':
return repr(annotation).replace('typing.', '')
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__qualname__
return annotation.__module__+'.'+annotation.__qualname__
return repr(annotation)
|
def extract_datetime_str(line):
"""
Construct string containing recon-all stage date and time from a log string,
for easier parsing with datetime functions.
(Example: "#@# STAGE_NAME Sun Nov 14 12:31:34 UTC 2021" --> "Nov 14 2021 12:31:34")
:param str line: line in recon-surf.log containing recon-all stage info.
This must be of the form:
#@# STAGE_NAME Fri Nov 26 15:51:40 UTC 2021
:return: str datetime_str: extracted datetime string
"""
datetime_str = line.split(' ')[-5] + ' ' + \
line.split(' ')[-4] + ' ' + \
line.split(' ')[-1] + ' ' + \
line.split(' ')[-3]
return datetime_str
|
def lint_py_check_per_line_filter(_repo, cf):
"""
Run multiple line-by-line checks
"""
if not cf or cf.binary or cf.deleted:
return False
with open(cf.name, 'r') as f:
firstline = f.readline()
if not cf.name.endswith(".py") and not 'python' in firstline:
_repo.log.info("%s: skipping, not a Python file", cf.name)
return False
return True
|
def orientation(p, q, r):
"""
Finds the orientation of an ordered set of vertices(p, q, r).
p: First vertex represented as a tuple.
q: Second vertex represented as a tuple.
r: Third vertex represented as a tuple.
returns:
0 : Collinear points
1 : Clockwise points
2 : Counterclockwise
"""
val = ((q[1] - p[1]) *(r[0] - q[0])) - ((q[0] - p[0]) * (r[1] - q[1]))
if val == 0:
#Collinear
return 0
if val > 0:
# Clock
return 1
else:
# Counterclock
return 2
|
def to_color(category):
"""Map each category color a good distance away from each other on the HSV color space."""
import colorsys
v = (category - 1) * (137.5 / 360)
return colorsys.hsv_to_rgb(v, 1, 1)
|
def is_string_matched_in_regular_expression_objects(string, regex_objects):
""" param regex_objects contains regular expression objects compiled from patterns
searches string for any occurence of each regex_object
"""
for regex_object in regex_objects:
if regex_object.search(string):
return True
return False
|
def get_qual(fsize):
"""Returns file size qualifier"""
if (fsize > 2**30):
fsize = fsize/(2**30)
qual = 'Giga'
elif (fsize > 2**20):
fsize = fsize/(2**20)
qual = 'Mega'
elif (fsize > 2**10):
fsize = fsize/(2**10)
qual = 'Kilo'
else: qual = ''
fsize = round(fsize, 2)
return fsize, qual
|
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data to conform to the schema.
"""
# nothing more to process
return proc_data
|
def _make_plural(s):
"""Poor man 'plural' version for now"""
if s.endswith('repository'):
return s.replace('repository', 'repositories')
else:
return s + 's'
|
def timeout_cmd(cmd, timeout):
"""Return a command line prefaced with a timeout wrappers and stdout/err unbuffered."""
return 'timeout -sKILL %us stdbuf -o0 -e0 %s' % (timeout, cmd)
|
def canon_decimal(msg):
"""
Format is:
139,99,22
Handler for messages that income as decimal, as opposed to hex
"""
m = msg.strip().split(",")
newmsg = [int(x) for x in m]
return newmsg
|
def find_offensive_plays(possible_plays):
"""
Helper function used to find offensive plays in possible plays.
:param possible_plays: list of cards possible to be played
:return: list of cards which can be played as attack
"""
offensive_plays = []
for card in possible_plays:
if card == ('pikes', 'K') or card == ('hearts', 'K'):
offensive_plays.append(card)
elif card[1] in '2 3 4 J'.split():
offensive_plays.append(card)
return offensive_plays
|
def _remove_suffix_apple(path):
"""
Strip off .so or .dylib.
>>> _remove_suffix_apple("libpython.so")
'libpython'
>>> _remove_suffix_apple("libpython.dylib")
'libpython'
>>> _remove_suffix_apple("libpython3.7")
'libpython3.7'
"""
if path.endswith(".dylib"):
return path[: -len(".dylib")]
if path.endswith(".so"):
return path[: -len(".so")]
return path
|
def max_consecutive_ones(x):
# e.g. x= 95 (1101111)
"""
Steps
1. x & x<<1 --> 1101111 & 1011110 == 1001110
2. x & x<<1 --> 1001110 & 0011100 == 0001100
3. x & x<<1 --> 0001100 & 0011000 == 0001000
4. x & x<<1 --> 0001000 & 0010000 == 0000000
:param x:
:return:
"""
count = 0
while x > 0:
x = x & (x << 1)
count += 1
return count
|
def _divide_and_round(a, b):
"""divide a by b and round result to the nearest integer
When the ratio is exactly half-way between two integers,
the even integer is returned.
"""
q, r = divmod(a, b)
r *= 2
greater_than_half = r > b if b > 0 else r < b
if greater_than_half or r == b and q % 2 == 1:
q += 1
return q
|
def splglob_simple(pattern):
""" Return a splglob that either matches a full path or match a simple file """
if "/" not in pattern:
# Assume we've been given a simple file name: app.conf, *.tgz
pattern = "^.../{}$".format(pattern)
else:
pattern = "^{}$".format(pattern)
return pattern
|
def is_ascii(str_data: str) -> bool:
"""Checks if string contains only ascii chars.
Necessary because python 3.6 does not have a str.isascii() method.
Parameters
----------
str_data : str
string to check if it contains only ascii characters
Returns
-------
bool
True if only ascii characters in the string, else False.
"""
try:
str_data.encode('ascii')
except (UnicodeEncodeError, AttributeError):
return False
return True
|
def get_imdb_string(number):
"""Convert number to IMDB_ID String.
Args:
number: number entered
Returns:
imdb_id string
"""
return 'tt{arg}'.format(arg=str(int(number)).zfill(7))
|
def get_list_of_tuples(list_given: list) -> list:
"""
:param list_given: List containing List
:return: List containing tuple
"""
list_result = []
for list_item in list_given:
list_result.append(tuple(list_item))
return list_result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.