content
stringlengths 42
6.51k
|
---|
def smallestRangeI(A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
A.sort()
if A[len(A)-1] - A[0] - 2 * K > 0:
return A[len(A)-1] - A[0] - 2 * K
else:
return 0
|
def padded_num_to_str(num, characters):
"""Return a string of the number, left-padded with zeros up to characters
num: number to convert
characters: number of total characters in resultant string, zero-padded
"""
word = str(num)
padded_word = (characters-len(word))*'0' + word
return padded_word
|
def nd_cross_variogram(x1, y2, x2, y1):
"""
Inner most calculation step of cross-variogram
This function is used in the inner most loop of `neighbour_diff_squared`.
Parameters
----------
x, y : np.array
Returns
-------
np.array
"""
res = (x1 - x2) * (y1 - y2)
return res
|
def _get_used_ports(vms):
"""
Return a set of ports in use by the ``vms``.
:param vms: list of virtual machines
:type vms: list(:class:`~.vm.VM`)
:return: set of ports
:rtype: set(int)
"""
used_ports = set()
for vm in vms:
ip, port = vm.get_ssh_info()
if port:
used_ports.add(port)
return used_ports
|
def create_indexed_tag(key, value):
"""
Creates a tag for the api.
"""
key = key.upper()
key = key.replace("INDEXED_", "")
key = key.replace("UNINDEXED_", "")
return "->".join([key, value]).upper()
|
def sxs_id_from_alt_names(alt_names):
"""Takes an array of alternative names from an SXS metadata.json file
and returns the SXS ID of the simulation."""
pattern = 'SXS'
if not isinstance(alt_names, (list, tuple)):
alt_names = [alt_names]
sxs_id = str(next((ss for ss in alt_names if pattern in ss), None))
return sxs_id
|
def construct_publish_comands(additional_steps=None):
"""Get the shell commands we'll use to actually build and publish a package to PyPI.
Returns:
List[str]: List of shell commands needed to publish a module.
"""
return (additional_steps or []) + [
"python setup.py sdist bdist_wheel",
"twine upload --verbose dist/*",
]
|
def test_for_symbolic_cell(raw_python_source: str) -> bool:
"""
Returns True if the text "# Long" is in the first line of
`raw_python_source`. False otherwise.
"""
first_element = raw_python_source.split("\n")[0]
if "#" in first_element and "symbolic" in first_element.lower():
return True
return False
|
def check_ends_with_period(docstring, context, is_script):
"""First line should end with a period.
The [first line of a] docstring is a phrase ending in a period.
"""
if docstring and not eval(docstring).split('\n')[0].strip().endswith('.'):
return True
|
def line1(lines):
""" convert a string with newlines to a single line with ; separating lines
"""
return lines.replace('\n',';')
|
def separate_units_by_type(all_units):
"""Separate all_units to their respective unit type group."""
immune = {}
infection = {}
unit_type_to_group = {
'immune': immune,
'infection': infection,
}
for unit_no, unit in all_units.items():
group = unit_type_to_group[unit['type']]
group[unit_no] = unit
return immune, infection
|
def _get_target_column_list(market: str) -> list:
"""Generate a column list according to the ``market``.
Each market has its own column list. This function generates the corresponding list
of column qualifiers in terms of the given ``market``.
Args:
market (str): The given market such as `1x2`, `ah`, `ou` etc.
Returns:
list: A list of column qualifiers.
"""
odds_1x2_cols = ["h", "a", "d"]
odds_ah_cols = ["k", "h", "a"]
odds_ou_cols = ["k", "ovr", "und"]
target_cols = []
if market.startswith("1x2"):
target_cols = odds_1x2_cols
elif market.startswith("ah"):
target_cols = odds_ah_cols
else:
target_cols = odds_ou_cols
return target_cols
|
def remove_duplicates(aList : list) -> list:
"""
Returns a list without duplicates
Parameters:
aList (list): A list.
Returns:
result (list): The given list without duplicates.
"""
result = list(dict.fromkeys(aList))
return result
|
def getSqlServerNameFromJDBCUrl(jdbcURL):
"""Returns the Azure SQL Server name from given Hive, Oozie or Ranger JDBC URL"""
return jdbcURL[17:].split(";")[0]
|
def get_dict_from_list(smu_info_list):
"""
Given a SMUInfo array, returns a dictionary keyed by the SMU name with SMUInfo as the value.
"""
smu_info_dict = {}
for smu_info in smu_info_list:
smu_info_dict[smu_info.name] = smu_info
return smu_info_dict
|
def get_polygon(annotation):
"""
Extracts a polygon from an annotation
Parameters
----------
- annotation : Kili annotation
"""
try:
return annotation['boundingPoly'][0]['normalizedVertices']
except KeyError:
return None
|
def check_variable_list_are_valid(variable_type_dict):
"""
Checks that the provided variable_type_dict is valid, by:
- Confirming there is no overlap between all variable lists
:param variable_type_dict: A dictionary, with keys describing variables types, and values listing particular
variables
:type variable_type_dict: {str:[str]}
:return: True, if there is no overlap
:rtype: bool
"""
for outer_key, outer_value in variable_type_dict.items():
for inner_key, inner_value in variable_type_dict.items():
# Do not compare variable types to themselves
if inner_key == outer_key:
continue
else:
intersection = set(outer_value).intersection(set(inner_value))
if len(intersection) > 0:
raise ValueError('Variable lists {} and {} overlap, and share key(s): {}'.
format(inner_key, outer_key, intersection))
return True
|
def timeout(func, args=(), timeout_duration=2, default=None, **kwargs):
"""This will spwan a thread and run the given function using the args, kwargs and
return the given default value if the timeout_duration is exceeded
"""
import threading
class InterruptableThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = default
def run(self):
try:
self.result = func(*args, **kwargs)
except:
pass
it = InterruptableThread()
it.start()
it.join(timeout_duration)
return it.result
|
def hhmmss_to_seconds(hhmmss: str) -> int: #need more doc
"""Converts time from 24 hour to seconds elapsed from 00:00
This function assumes 24 hour input format, but can take time in forma
hh, hh:mm or hh:mm:ss
"""
power = 2
seconds = 0
for num in hhmmss.split(':'):
seconds += int(num)*(60**power)
power -= 1
return seconds
|
def get_block_string_indentation(value: str) -> int:
"""Get the amount of indentation for the given block string.
For internal use only.
"""
is_first_line = is_empty_line = True
indent = 0
common_indent = None
for c in value:
if c in "\r\n":
is_first_line = False
is_empty_line = True
indent = 0
elif c in "\t ":
indent += 1
else:
if (
is_empty_line
and not is_first_line
and (common_indent is None or indent < common_indent)
):
common_indent = indent
is_empty_line = False
return common_indent or 0
|
def specificrulevalue(ruleset, summary, default=None):
"""
Determine the most specific policy for a system with the given multiattractor summary.
The ruleset is a dict of rules, where each key is an aspect of varying specificity:
- 2-tuples are the most specific and match systems with that summary.
- Integers are less specific and match systems with that number of distinct attractors.
- The None key indicates the default rule.
"""
specific = None
attractors, monotonic = summary
if summary in ruleset:
specific = summary
elif attractors in ruleset:
specific = attractors
return ruleset[specific] if specific in ruleset else default
|
def get_prob_current(psi, psi_diff):
"""
Calculates the probability current Im{psi d/dx psi^*}.
"""
print("Calculating probability current")
curr = psi*psi_diff
return -curr.imag
|
def textbar(maximum, value, fill="-", length=20):
"""print a text bar scaled to length with value relative to maximum"""
percent = value / maximum
num = int(round(length * percent))
ret = ""
for x in range(num):
ret += fill
return ret
|
def get_diff_between_groups(group1, group2):
"""
Get difference between groups as a set.
"""
return set(group1).difference(set(group2))
|
def splitActionOwnerName(action) :
"""Takes the name of an action that may include both owner and action
name, and splits it.
Keyword arguments:
action - Name of action, possibly including both owner and name.
"""
s = action.split("/")
return (s[0], s[1]) if len(s) > 1 else ("", s[0])
|
def _find_from_file(full_doc, from_file_keyword):
"""
Finds a line in <full_doc> like
<from_file_keyword> <colon> <path>
and return path
"""
path = None
for line in full_doc.splitlines():
if from_file_keyword in line:
parts = line.strip().split(':')
if len(parts) == 2 and parts[0].strip() == from_file_keyword:
path = parts[1].strip()
break
return path
|
def render_docstr(func, indent_str='', closing_str=''):
""" Render a docstring as a string of lines.
The argument is either a docstring or an object.
Note that we don't use a sequence, since we want
the docstring to line up left, regardless of
indentation. The shorter triple quotes are
choosen automatically.
The result is returned as a 1-tuple."""
if not isinstance(func, str):
doc = func.__doc__
else:
doc = func
if doc is None:
return None
doc = doc.replace('\\', r'\\')
compare = []
for q in '"""', "'''":
txt = indent_str + q + doc.replace(q[0], "\\"+q[0]) + q + closing_str
compare.append(txt)
doc, doc2 = compare
doc = (doc, doc2)[len(doc2) < len(doc)]
return doc
|
def error_match(actual, expected):
"""Check that an actual error message matches a template."""
return actual.split(":")[0] == expected.split(":")[0]
|
def pop_type(args, ptype):
""" Reads argument of type from args """
if len(args) > 0 and isinstance(args[0], ptype):
return args.pop(0)
return None
|
def unpack_bool(data):
"""Extract list of boolean from integer
Args:
data (int)
Return:
list: in bool
"""
return [bool(int(i)) for i in '{:016b}'.format(data)]
|
def replace_iterable_params(args, kwargs, iterable_params):
"""Returns (args, kwargs) with any iterable parameters converted to lists.
Args:
args: Positional rguments to a function
kwargs: Keyword arguments to a function.
iterable_params: A list of (name, index) tuples for iterable parameters.
Returns:
A tuple (args, kwargs), where any positional or keyword parameters in
`iterable_params` have their value converted to a `list`.
"""
args = list(args)
for name, index in iterable_params:
if index < len(args):
args[index] = list(args[index])
elif name in kwargs:
kwargs[name] = list(kwargs[name])
return tuple(args), kwargs
|
def test_wsgi_app(environ, start_response):
"""just a test app"""
status = '200 OK'
response_headers = [('Content-type','text/plain'),('Content-Length','13')]
start_response(status, response_headers)
return ['hello world!\n']
|
def close(conn):
"""This closes the database connection. Note that this does not
automatically call commit(). If you just close your database connection
without calling commit() first, your changes will be lost.
"""
try:
conn.close()
except:
pass
return True
|
def copy_send(src_feat, dst_feat, edge_feat):
"""doc"""
return src_feat["h"]
|
def get_current_step(t, t0, sequence):
""" returns i, (period, color) """
period = sum(s[0] for s in sequence)
tau = t - t0
while tau - period > 0:
tau -= period
i = 0
while True:
current = sequence[i][0]
if tau < current:
break
else:
i += 1
tau -= current
return i, sequence[i]
|
def simplify_postags(tagged_words):
"""
Convert part-of-speech tags (Penn Treebank tagset) to the 4 tags {_N, _V, _J, _X} for
nounds, verbs, adjectives/adverbs, and others.
Beware that this method takes a list of tuples and returns a list of strings.
:param tagged_words: [(str,str)] -- words and their associated POS-tags
:return: [str] -- words ending with {_N, _V, _J, _X}
"""
postags = {"N": ["NN", "NNS", "NNP", "NNPS"],
"V": ["VB", "VBD", "VBG", "VBN", "VBZ", "VBP"],
"J": ["JJ", "JJR", "JJS"]}
simplified = []
for w, t in tagged_words:
if t in postags["N"]:
simplified.append("_".join([w, "N"]))
elif t in postags["V"]:
simplified.append("_".join([w, "V"]))
elif t in postags["J"]:
simplified.append("_".join([w, "J"]))
else:
simplified.append("_".join([w, "X"]))
return simplified
|
def clap_convert(txt):
"""convert string of clap values on medium to actualy number
Args:
txt (str): claps values
Returns:
number on claps (int)
"""
# Medium annotation
if txt[-1] == "K":
output = int(float(txt[:-1]) * 1000)
return output
else:
return int(txt)
|
def divide(x, y):
"""Divide value x by value y and convert values to integer.
Args:
x (any): dividend
y (any): divisor
Raises:
ZeroDivisionError: divisor can't be 0
Returns:
[integer]: An integer holding the result
"""
if y != 0:
return int(x) / int(y)
else:
print("Error! Divisor can't be 0")
raise ZeroDivisionError
|
def code_to_omz(_code_points):
""" Returns a ZSH-compatible Unicode string from the code point(s) """
return r'\U' + r'\U'.join(_code_points.split(' '))
|
def _get_input_tensor(input_tensors, input_details, i):
"""Gets input tensor in `input_tensors` that maps `input_detail[i]`."""
if isinstance(input_tensors, dict):
# Gets the mapped input tensor.
input_detail = input_details[i]
for input_tensor_name, input_tensor in input_tensors.items():
if input_tensor_name in input_detail['name']:
return input_tensor
raise ValueError('Input tensors don\'t contains a tensor that mapped the '
'input detail %s' % str(input_detail))
else:
return input_tensors[i]
|
def interpolate(l, r, t0, dt, t):
"""
this is a poorly written function that linearly interpolates
l and r based on the position of t between tl and tr
"""
p = (t-t0)/dt
return [p*(j-i) + i for i, j in zip(l, r)]
|
def parse_location(d):
""" Used to parse name of location where match is played.
"""
return str(d.get("accNaam", ""))
|
def indentLevel(line):
"""Returns the indentation level of a line, defined in Piklisp as the number of leading tabs."""
for i in range(len(line)):
if line[i] != "\t":
return i # i characters were "\t" before lines[i]
return None
|
def _idivup(a, b):
"""
return the integer division, plus one if `a` is not a multiple of `b`
"""
return (a + (b - 1)) // b
|
def chk_chng(src_flist,dst_flist):
""" Returns unchanged file list and changed file list
Accepts source file list and dsetination file list"""
uc_flist = []
c_flist = []
for files in src_flist:
if files in dst_flist:
uc_flist.append(files)
else:
c_flist.append(files)
return uc_flist,c_flist
|
def breakSuffix(glyphname):
"""
Breaks the glyphname into a two item list
0: glyphname
1: suffix
if a suffix is not found it returns None
"""
if glyphname.find('.') != -1:
split = glyphname.split('.')
return split
else:
return None
|
def JoinWords(Words, Delimiter, Quote = False):
"""Join words in a list using specified delimiter with optional quotes around words.
Arguments:
Words (list): List containing words to join.
Delimiter (string): Delimiter for joining words.
Quote (boolean): Put quotes around words.
Returns:
str : String containing joined words.
"""
if Quote:
JoinedWords = Delimiter.join('"{0}"'.format(Word) for Word in Words)
else:
JoinedWords = Delimiter.join(Words)
return JoinedWords
|
def decode(e):
"""
Decode a run-length encoded list, returning the original time series (opposite of 'encode' function).
Parameters
----------
e : list
Encoded list consisting of 2-tuples of (length, element)
Returns
-------
list
Decoded time series.
"""
return sum([length * [item] for length,item in e],[])
|
def decode(bp_sp: str):
""" Return (row, column) tuple from string. The string is binary encoded, not by 0 and 1, but by F&B and L&R
Therefore we convert it to binary and use pythons int() function to convert it to decimal
"""
row = ''
col = ''
for x in range(0, 7):
if bp_sp[x] == 'F':
row = f"{row}0"
elif bp_sp[x] == 'B':
row = f"{row}1"
for x in range(7, 10):
if bp_sp[x] == 'L':
col = f"{col}0"
elif bp_sp[x] == 'R':
col = f"{col}1"
row = int(row, 2)
col = int(col, 2)
return row, col
|
def prepare_prefix(url_prefix):
"""Check if the url_prefix is set and is in correct form.
Output is None when prefix is empty or "/".
:param url_prefix: Registered prefix of registered callback.
:type url_prefix: str, list, None
:return: Url prefix of registered callback
:rtype: str, None
"""
if url_prefix is None or url_prefix.strip() == "/":
return None
elif isinstance(url_prefix, (list, tuple)):
url_prefix = "/".join(url_prefix)
else:
items = [part for part in url_prefix.split("/") if part]
url_prefix = "/".join(items)
if not url_prefix:
return None
while url_prefix.endswith("/"):
url_prefix = url_prefix[:-1]
if not url_prefix.startswith("/"):
url_prefix = "/{}".format(url_prefix)
return url_prefix
|
def clean_metadata(metadata):
"""
Transform metadata to cleaner version.
"""
metadata = metadata.copy()
# Fix missing metadata fields
metadata['description'] = metadata['description'] or metadata['summary']
metadata['summary'] = metadata['summary'] or metadata['description']
return metadata
|
def optimal_merge_pattern(files: list) -> float:
"""Function to merge all the files with optimum cost
Args:
files [list]: A list of sizes of different files to be merged
Returns:
optimal_merge_cost [int]: Optimal cost to merge all those files
Examples:
>>> optimal_merge_pattern([2, 3, 4])
14
>>> optimal_merge_pattern([5, 10, 20, 30, 30])
205
>>> optimal_merge_pattern([8, 8, 8, 8, 8])
96
"""
optimal_merge_cost = 0
while len(files) > 1:
temp = 0
# Consider two files with minimum cost to be merged
for i in range(2):
min_index = files.index(min(files))
temp += files[min_index]
files.pop(min_index)
files.append(temp)
optimal_merge_cost += temp
return optimal_merge_cost
|
def inlineKBRow(*argv) -> list:
"""
Row of inline Keyboard buttons
- *`argv` (`list`): Pass InlineKeyboardButton objects to generate a list (This is unuseful, you an use [button1,button2] with the same result)
"""
result = list()
for arg in argv:
result.append(arg)
return result
|
def add_alternatives(status, unsure, names, name):
"""
Add alternative dependencies to either the list of dependencies
for the current package, or to a list of unused dependencies.
Parameters
status : dict, main package contents
unsure : dict, alternative dependencies
names : list, package names
name : str, name of current package
"""
# dependencies without links
leftovers = []
# try block since some packages lack alternative dependencies
try:
for d in unsure[name]:
d = d.split()[0]
if d in names:
status[name]["Dependencies"].append(d)
else:
leftovers.append(d)
status[name]["Alternatives"] = leftovers
except:
pass
return status
|
def _init_data(data, key, init_data):
"""Initialize the data at specified key, if needed"""
if isinstance(key, int):
try:
data[key]
except IndexError:
data.append(init_data)
else:
data[key] = data.get(key, init_data)
return data
|
def prod(F, E):
"""Check that the factorization of P-1 is correct. F is the list of
factors of P-1, E lists the number of occurrences of each factor."""
x = 1
for y, z in zip(F, E):
x *= y**z
return x
|
def singleton_bound_asymp(delta,q):
"""
Computes the asymptotic Singleton bound for the information rate.
EXAMPLES::
sage: singleton_bound_asymp(1/4,2)
3/4
sage: f = lambda x: singleton_bound_asymp(x,2)
sage: plot(f,0,1)
"""
return (1-delta)
|
def rivers_with_station(stations):
"""returns a list, from a given list of station objects, of river names with a monitoring station"""
rivers = set() # build an empty set
for station in stations:
river = station.river
rivers.add(river)
return sorted(rivers)
|
def remove_parenthesis(dictionary):
""" remove () from the value"""
for key in dictionary:
value = dictionary[key]
if type(value) == str:
new_value = value.replace('(', '')
new_value = new_value.replace(')', '')
dictionary[key] = new_value
else:
continue
return dictionary
|
def select_transect(shore_pts, i_start, j_start, i_end, j_end):
"""Select transect position among shore points, by avoiding positions
on the bufferred parts of the transect. Buffers overlap between up to
4 tiles, and cause the same positions to be selected multiple times.
Inputs:
-shore_pts: shore point coordinates (absolute rows & cols)
-i_start, j_start, i_end, j_end: tile extents without the
buffers. i/j refer to row/col, and start/end refer to
smallest/largest values.
Returns a list of [i, j] point coordinates that are not within
the buffer region"""
if not len(shore_pts):
return None
# Return the transect with the smallest i first, and j second
sorted_points = sorted(shore_pts, key = lambda p: p[1])
sorted_points = sorted(sorted_points, key = lambda p: p[0])
# Remove points in the buffer to avoid 2 transects appear side-by-side
valid_points = []
for p in sorted_points:
# print 'trying point', p,
if p[0] < i_start:
continue
elif p[1] < j_start:
continue
elif p[0] > i_end:
continue
elif p[1] > j_end:
continue
# print('valid')
valid_points.append(p)
# if not len(valid_points):
# print('No valid points in', sorted_points)
# print('point limits', (i_start, j_start), (i_end, j_end))
return valid_points
|
def _bin(x):
"""
>>> _bin(0)
[]
>>> _bin(1)
[1]
>>> _bin(5)
[1, 0, 1]
"""
def f(x):
while x>0:
yield x % 2
x = x // 2
return list(f(x))
|
def api_upload_text(body): # noqa: E501
"""upload text data on Bitcoin SV.
upload text data on Bitcoin SV. # noqa: E501
:param body: upload text data on Bitcoin SV.
:type body: dict | bytes
:rtype: ResponseUploadTextModel
"""
# if connexion.request.is_json:
# body = RequestUploadTextModel.from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
|
def _parse_cisco_mac_address(cisco_hardware_addr):
"""
Parse a Cisco formatted HW address to normal MAC.
e.g. convert
001d.ec02.07ab
to:
00:1D:EC:02:07:AB
Takes in cisco_hwaddr: HWAddr String from Cisco ARP table
Returns a regular standard MAC address
"""
cisco_hardware_addr = cisco_hardware_addr.replace(".", "")
blocks = [
cisco_hardware_addr[x : x + 2] for x in range(0, len(cisco_hardware_addr), 2)
]
return ":".join(blocks).upper()
|
def find_destination_containers(
container_wrapper, target_container, exclude_containers=None
):
"""Search through a collection of containers and return all containers
other than the given container
:param container_wrapper: A collection of containers in the Google Tag Manager
List Response format
:type container_wrapper: dict
:param target_container: The target container, which will be excluded from the
returned list
:type target_container: dict
:param exclude_containers: A list of containers to exclude from being cloned to
:type exclude_containers: list
:return: A list of Google Tag Manager container objects
:rtype: list
"""
if not exclude_containers:
exclude_containers = []
destination_containers = []
for container in container_wrapper["container"]:
if (
not container["containerId"] == target_container["containerId"]
and container["name"] not in exclude_containers
):
destination_containers.append(container)
return destination_containers
|
def adjacent_sents(a, b, th):
"""
DESCRIPTION: Define if two sentences are adjacent measured in sentences
INPUT: a <int> - Sentence a index,
b <int> - Sentence b index
th <int> - maximum gap between indexes
OUTPUT: True if the two sentences are adjacents, False otherwise
"""
if abs(a - b) - 1 <= th:
return True
else:
return False
|
def pad_data(value, width, fillbyte):
"""Append the fill byte to the end of the value until it has the
requested width.
"""
return value.ljust(width, fillbyte)
|
def get_intron_exon_border_coords(ex_s, ex_e,
us_intron_len=False,
ds_intron_len=False,
eib_width=20):
"""
Get intron-exon border region coordinates surrounding one exon.
ex_s:
Genomic exon start (0-based)
ex_e:
Genomic exon end (1-based)
us_intron_len:
Upstream intron length (for single exon transcripts = False)
ds_intron_len:
Downstream intron length (for single exon transcripts = False)
eib_width:
Width of intron/exon border region.
>>> get_intron_exon_border_coords(1000, 2000)
(980, 1000, 1000, 1020, 1980, 2000, 2000, 2020)
>>> get_intron_exon_border_coords(1000, 1020, eib_width=50)
(950, 1000, 1000, 1020, 1000, 1020, 1020, 1070)
>>> get_intron_exon_border_coords(1000, 1020, eib_width=50, us_intron_len=30, ds_intron_len=40)
(970, 1000, 1000, 1020, 1000, 1020, 1020, 1060)
"""
# Exon length.
ex_len = ex_e - ex_s
# Upstream intron border region.
i1s = ex_s - eib_width
if us_intron_len:
if us_intron_len < eib_width:
i1s = ex_s - us_intron_len
i1e = ex_s
# Upstream exon border region.
e1s = ex_s
e1e = ex_s + eib_width
if ex_len < eib_width:
e1e = ex_s + ex_len
# Downstream exon border region.
e2s = ex_e - eib_width
if ex_len < eib_width:
e2s = ex_e - ex_len
e2e = ex_e
# Downstream intron border region.
i2s = ex_e
i2e = ex_e + eib_width
if ds_intron_len:
if ds_intron_len < eib_width:
i2e = ex_e + ds_intron_len
return i1s, i1e, e1s, e1e, e2s, e2e, i2s, i2e
|
def filter_orders_violating_max_xrate(xrate, b_orders, s_orders, fee):
"""Remove orders that violate the maximum exchange rate (considering the fee)."""
# For b_orders: xrate <= max_xrate * (1 - fee)
b_orders = [
order for order in b_orders
if xrate <= order.max_xrate * (1 - fee.value)
]
# For s_orders: 1 / xrate <= max_xrate * (1 - fee)
s_orders = [
order for order in s_orders
if 1 / xrate <= order.max_xrate * (1 - fee.value)
]
return b_orders, s_orders
|
def get_files_preview(thepath=".", preview_size=40):
"""Returns a tuple containing the file name and the first 40 characters of the file for each file in a given directory. Includes gzip files. Excludes .tar files.
Examples:
>>> from pprint import pprint\n
>>> pprint( previewgzipfiles() )\n
[('misc_funcs.py', '#######################################\\n'),\n
('repl_tricks.py', '# %%\\n###################################'),\n
('test_text.txt.gz', "ok\\nso here is some\\n'blah blah'\\nWe are pl"),\n
('all_funcs.py', '#######################################\\n'),\n
('system_funcs.py', '#######################################\\n'),\n
('two_dimension_funcs.py', '#######################################\\n'),\n
('test_text.txt', "ok\\nso here is some\\n'blah blah'\\nWe are pl"),\n
('string_funcs.py', '#######################################\\n'),\n
('regex_funcs.py', '#######################################\\n'),\n
('lambda_and_map_examples.py', ''),\n
('array_funcs.py', '#######################################\\n'),\n
('specific_use_case_funcs.py', '#######################################\\n'),\n
('dict_funcs.py', '#######################################\\n'),\n
('file_folder_funcs.py', '#######################################\\n'),\n
('test.txt', "ok\\nso here is some\\n'blah blah'\\nWe are pl"),\n
('notes.py', '# %%\\n###################################'),\n
('conversion_encoding_bytes_chr_funcs.py',\n
'#######################################\\n'),\n
('test_copy.txt', "ok\\nso here is some\\n'blah blah'\\nWe are pl")]
Args:
thepath (str, optional): Specify the directory. Defaults to ".".
preview_size (int, optional): Specify the number of characters to preview. Defaults to 40.
Returns:
list: Returns a list of tuples where the 1st element is the file name, and the 2nd element is the first 40 characters of the file
"""
import pathlib
import gzip
path_obj = pathlib.Path(thepath).resolve()
results_preview = []
for file in path_obj.glob("*"):
if file.is_file() and ".tar" not in file.name:
if file.name.endswith(".gz"):
with gzip.open(file, "rt") as f:
preview = f.read()[:preview_size]
results_preview.append((file.name, preview))
else:
results_preview.append((file.name, file.read_text()[:preview_size]))
return results_preview
|
def sentence_to_allen_format(sentence, sign_to_id, usingRealSigns):
"""
Transform the sentence to AllenNLP format
:param sentence: the sentence to transform
:param sign_to_id: dictionary of sign to id
:param usingRealSigns: whether using the signs as is
:return: the AllenNLP format for BiLSTM
"""
signs = ""
if usingRealSigns:
for sign in sentence:
if sign == " " or sign == "\t" or sign == "\n":
continue
try:
signs += str(sign_to_id[sign]) + " "
except:
signs += "0 " # default index
else:
for sign in sentence.split(","):
signs += sign + " "
return signs
|
def removeSlash(p):
"""
If string p does not end with character '/', it is added.
"""
if p.endswith('/'):
return p[:-1]
else:
return p
|
def transform(seq):
"""
Transforms "U" to "T" for the processing is done on DNA alphabet
"""
S = ""
for s in seq:
if s == "T":
S += "U"
else:
S += s
return S
|
def bounds_to_space(bounds):
"""
Takes as input a list of tuples with bounds, and create a dictionary to be processed by the class Design_space. This function
us used to keep the compatibility with previous versions of GPyOpt in which only bounded continuous optimization was possible
(and the optimization domain passed as a list of tuples).
"""
space = []
for k in range(len(bounds)):
space += [{'name': 'var_'+str(k+1), 'type': 'continuous', 'domain':bounds[k], 'dimensionality':1}]
return space
|
def local_coord_to_global(in_coord, center_coord, max_x, max_y):
""" Converts a coordinate from a 3x3 grid into coordinate from large grid
Args:
:in_coord: tuple of local coordinates to convert to global
:center:coord: center coordinate of 3x3 grid cell in global coordinate system
:max_x/y: maxium x / y value the global coordinates can be
Returns:
Tuple of coordinates in global system
Raises:
"""
new_coord_0 = center_coord[0] + in_coord[0]-1
new_coord_1 = center_coord[1] + in_coord[1]-1
# only return valid coordinates, do nothing if coordinates would be negative
if new_coord_0 >= 0 and new_coord_1 >= 0 and new_coord_0 <= max_x and new_coord_1 <= max_y:
return (new_coord_0, new_coord_1)
|
def parse_result_format(result_format):
"""This is a simple helper utility that can be used to parse a string result_format into the dict format used
internally by great_expectations. It is not necessary but allows shorthand for result_format in cases where
there is no need to specify a custom partial_unexpected_count."""
if isinstance(result_format, str):
result_format = {"result_format": result_format, "partial_unexpected_count": 20}
else:
if "partial_unexpected_count" not in result_format:
result_format["partial_unexpected_count"] = 20
return result_format
|
def dayOfWeek(dayInt):
"""
Takes the day of the week as integar and returns the
name of the day as a string
"""
if dayInt == 0:
return "Monday"
elif dayInt == 1:
return "Tuesday"
elif dayInt == 2:
return "Wednesday"
elif dayInt == 3:
return "Thursday"
elif dayInt == 4:
return "Friday"
elif dayInt == 5:
return "Saturday"
elif dayInt == 6:
return "Sunday"
|
def complementray_filter(one_gyro_angle, one_accel_angle, alpha=0.96):
""" docstring """
#http://www.geekmomprojects.com/gyroscopes-and-accelerometers-on-a-chip/
return alpha*one_gyro_angle + (1.0 - alpha)*one_accel_angle
|
def first_lower(string):
"""
Return a string with the first character uncapitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].lower() + string[1:]
|
def azure_translation_format(sentence):
"""
Puts an individual sentence into the required format for the Azure Translation API.
azure_translation_format(sentence)
sentence: str
The sentence to be formated
Returns: dict; The dict-format accepted by the Azure Translation API.
"""
return {'Text': sentence}
|
def merge_intervals(indexes, length):
"""Merges overlapping intervals of matches for given indexes and generic
lzngth. This function assume the indexes are allready sorted in ascending
order.
:param indexes: the list of indexes acs sorted to merge
:param length: the length of the generic
:type indexes: list of int
:type length: int
:return: a list of couples of begining of the interval and the end
:rtype: list of list
"""
out = []
L = list(map(lambda v: [v, v + length], indexes))
tmp = L[0]
for (s, e) in L: #allready sorted
if s <= tmp[1]:
tmp[1] = max(tmp[1], e)
else:
out.append(tuple(tmp))
tmp = [s, e]
out.append(tuple(tmp))
return out
|
def reverse_comp(DNA):
"""returns a string with reverse complement of the string argument DNA
sequence"""
rev_dna = DNA[::-1]
new_dna = []
for i, element in enumerate(rev_dna):
if element in 'Gg':
new_dna.extend('c')
elif element in 'Cc':
new_dna.extend('g')
elif element in 'Uu':
new_dna.extend('a')
elif element in 'Aa':
new_dna.extend('u')
else:
print('this is not a basepair',element, i)
return ''.join(new_dna)
|
def units_decoder(units):
"""
https://darksky.net/dev/docs has lists out what each
unit is. The method below is just a codified version
of what is on that page.
"""
si_dict = {
'nearestStormDistance': 'Kilometers',
'precipIntensity': 'Millimeters per hour',
'precipIntensityMax': 'Millimeters per hour',
'precipAccumulation': 'Centimeters',
'temperature': 'Degrees Celsius',
'temperatureMin': 'Degrees Celsius',
'temperatureMax': 'Degrees Celsius',
'apparentTemperature': 'Degrees Celsius',
'dewPoint': 'Degrees Celsius',
'windSpeed': 'Meters per second',
'windGust': 'Meters per second',
'pressure': 'Hectopascals',
'visibility': 'Kilometers',
}
ca_dict = si_dict.copy()
ca_dict['windSpeed'] = 'Kilometers per hour'
ca_dict['windGust'] = 'Kilometers per hour'
uk2_dict = si_dict.copy()
uk2_dict['nearestStormDistance'] = 'Miles'
uk2_dict['visibility'] = 'Miles'
uk2_dict['windSpeed'] = 'Miles per hour'
uk2_dict['windGust'] = 'Miles per hour'
us_dict = {
'nearestStormDistance': 'Miles',
'precipIntensity': 'Inches per hour',
'precipIntensityMax': 'Inches per hour',
'precipAccumulation': 'Inches',
'temperature': 'Degrees Fahrenheit',
'temperatureMin': 'Degrees Fahrenheit',
'temperatureMax': 'Degrees Fahrenheit',
'apparentTemperature': 'Degrees Fahrenheit',
'dewPoint': 'Degrees Fahrenheit',
'windSpeed': 'Miles per hour',
'windGust': 'Miles per hour',
'pressure': 'Millibars',
'visibility': 'Miles',
}
switcher = {
'ca': ca_dict,
'uk2': uk2_dict,
'us': us_dict,
'si': si_dict,
}
return switcher.get(units, "Invalid unit name")
|
def _leaky_relu_not_vect(x, derivative=False):
"""Non vectorized LeakyReLU activation function.
Args:
x (list): Vector with input values.
derivative (bool, optional): Whether the derivative should be returned instead. Defaults to False.
"""
a = 0.02
if x > 0:
if derivative:
return 1
else: return x
else:
if derivative:
return a
else: return a*x
|
def format_exc(exc):
""" format the exception for printing """
try:
return f"<{exc.__class__.__name__}>: {exc}"
except:
return "<Non-recognized Exception>"
|
def pad(tokens, length, pad_value=1):
"""add padding 1s to a sequence to that it has the desired length"""
return tokens + [pad_value] * (length - len(tokens))
|
def hide_tokens(request):
""" Hide tokens in a request """
if 'context' in request:
[request['context']['tokens'].update({key: '*****'})
for key in (request['context']['tokens'] if 'tokens' in request['context'] else [])]
return request
|
def cross(v1, v2):
"""
Returns the cross product of the given two vectors
using the formulaic definition
"""
i = v1[1] * v2[2] - v2[1] * v1[2]
j = v1[0] * v2[2] - v2[0] * v1[2]
k = v1[0] * v2[1] - v2[0] * v1[1]
return [i, -j, k]
|
def encode_ip_addr(address_octets):
"""encodes IP octets into a 32-bit int (for DCC use)"""
ip_int = 0
octets = address_octets.split('.')
if(len(octets) == 4):
ip_int += int(octets[0]) << 24
ip_int += int(octets[1]) << 16
ip_int += int(octets[2]) << 8
ip_int += int(octets[3])
return str(ip_int)
|
def natural_size(num: float, unit: str = "B", sep: bool = True) -> str:
"""
Convert number to a human readable string with decimal prefix.
:param float num: Value in given unit.
:param unit: Unit suffix.
:param sep: Whether to separate unit and value with a space.
:returns: Human readable string with decimal prefixes.
"""
sep_char = " " if sep else ""
for prefix in ("", "K", "M", "G"):
if abs(num) < 1000.0:
return f"{num:3.1f}{sep_char}{prefix}{unit}"
num /= 1000.0
prefix = "T"
return f"{num:.1f}{sep_char}{prefix}{unit}"
|
def render_vextab(vextab: str, show_source: bool = False) -> str:
"""Create Javascript code for rendering VExflow music notation
:param vextab: The Vexflow source code to render into music notation
:param show_source: ``True`` to include the original Vexflow music notation
source code in the cell output
:return: The Javascript code as a single string
"""
import time
vextab_js = vextab.replace('\n', r'\n')
cid='vextab-{}'.format(int(round(time.time() * 1000)))
output = [
'require(["vextabjs"], function(VEXTABJS) {',
#This works if we reload the notebook page
'element.prepend("<div class=\'vex-tabdiv\'>{}</div>");'.format(vextab_js),
#This doesn't seem to work?
#'element.prepend("<div id=\'{}\', class=\'vex-tabdiv\'></div>");'.format(cid),
#'VexTab = VEXTABJS.VexTab;',
#'Artist = VEXTABJS.Artist;',
#'Renderer = VEXTABJS.Vex.Flow.Renderer;',
# '// Create VexFlow Renderer from canvas element with id #boo and a random component.',
#'renderer = new Renderer($(\'#{}\')[0], Renderer.Backends.CANVAS);'.format(cid),
# '// For SVG, you can use Renderer.Backends.SVG',
#'// Initialize VexTab artist and parser.',
#'artist = new Artist(10, 10, 600, {scale: 0.8});',
#'vextab = new VexTab(artist);',
#'// Parse VexTab music notation passed in as a string.',
#'vextab.parse("{}")'.format(vextab_js),
#'vextab.parse("tabstave notation=true\n notes :q 4/4\n");'.replace('\n', r'\n'),
#'// Render notation onto canvas.',
#'artist.render(renderer);',
'});']
if show_source:
output.insert(3,
'element.prepend("<pre>{}</pre>");'
.format(vextab).replace('\n', '<br />'))
return ''.join(output)
|
def total_chars(transcript: str) -> int:
"""
Sums the total amount of characters in the transcript.
:param transcript: A string containing the contents of the transcribed audio file.
:return: Returns the number of characters in the file.
"""
counter = 0
for i in transcript:
counter += 1
return counter
|
def traffic_data_maxmin_unnormalize(data, max_value=100, min_value=0):
"""
maxmin_unnormalize data.
:param data: ndarray, data.
:return: ndarray
"""
if max_value > 0:
scaler = max_value - min_value
data = data * scaler + min_value
return data
|
def rho_beta(levels , ps, check=False):
"""
This function returns the Boer beta field, given the levels in G and the ps (hPa)
uses xarray
returns:
xr.DataArray in the same dimension as ps levels
"""
aa= (levels < ps)
if check is True:
print('ratio ' + str( aa.sum()/float(aa.size)) )
return aa*1 * ps
|
def get_patched_apps(themes, plugins, apps):
"""
Patches in the active themes and plugins to the installed apps list provided by settings.py
"""
return plugins['active'] + [
# Theme first so it has template priority!
themes['active']
] + apps
|
def to_string(obj, last_comma=False):
"""Convert to string in one line.
Args:
obj(list, tuple or dict): a list, tuple or dict to convert.
last_comma(bool): add a comma at last.
Returns:
(str) string.
Example:
>>> to_string([1, 2, 3, 4], last_comma=True)
>>> # 1, 2, 3, 4,
>>> to_string({'a': 2,'b': 4})
>>> # a=2, b=4
"""
s = ''
if type(obj) == list or type(obj) == tuple:
for i, data in enumerate(obj):
s += str(data)
if last_comma or i != len(obj)-1:
s += ', '
elif type(obj) == dict:
for i, data in enumerate(obj.items()):
k, v = data
s += '%s=%s' % (str(k), str(v))
if last_comma or i != len(obj)-1:
s += ', '
else:
s = str(obj)
return s
|
def split_namespace(tag):
"""returns a tuple of (namespace,name) removing any fragment id
from namespace"""
if tag[0] == "{":
namespace, name = tag[1:].split("}", 1)
return namespace.split("#")[0], name
else:
return (None, tag)
|
def fitsum(list):
"""Sum of fitnesses in the list, needed to build
wheel of fortune."""
sum=0.0
for i in range(0,len(list)):
sum+=list[i]
return sum
|
def _border_color_from_bin(bin, n_bins):
""" Return border color for bin."""
if n_bins <= 0:
return 'grey'
ratio = bin/float(n_bins)
if ratio <= 0.3:
return 'grey'
elif ratio <= 0.5:
return 'black'
return 'black'
|
def split_on_uppercase(s, keep_contiguous=False):
"""
>>> split_on_uppercase('theLongWindingRoad')
['the', 'Long', 'Winding', 'Road']
>>> split_on_uppercase('TheLongWindingRoad')
['The', 'Long', 'Winding', 'Road']
>>> split_on_uppercase('TheLongWINDINGRoadT', True)
['The', 'Long', 'WINDING', 'Road', 'T']
>>> split_on_uppercase('ABC')
['A', 'B', 'C']
>>> split_on_uppercase('ABCD', True)
['ABCD']
>>> split_on_uppercase('')
['']
Args:
s: string
keep_contiguous bool: flag to indicate we want to keep contiguous uppercase chars together
Returns:
"""
string_length = len(s)
is_lower_around = (lambda: s[i-1].islower() or
string_length > (i + 1) and s[i + 1].islower())
start = 0
parts = []
for i in range(1, string_length):
if s[i].isupper() and (not keep_contiguous or is_lower_around()):
parts.append(s[start: i])
start = i
parts.append(s[start:])
return parts
|
def find_res(line: str):
"""Find video resolution"""
lines = [
le.rstrip()
for le in line.split()
if not le.startswith("(") and not le.endswith(")")
]
lines = [le for le in lines if not le.startswith("[") and not le.endswith("]")]
res = None
for le in lines:
if le.endswith("p"):
res = le
break
return res
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.