content
stringlengths 42
6.51k
|
---|
def validate_mask(mask):
"""Check if the netmask is valid
return mask as string in the range [0, 32] or False if not valid
"""
if not mask:
return False
mask = mask.strip()
if mask.isdigit() and int(mask) >= 0 and int(mask) <= 32:
return mask
return False
|
def Sphere(individual):
"""Sphere test objective function.
F(x) = sum_{i=1}^d xi^2
d=1,2,3,...
Range: [-100,100]
Minima: 0
"""
y=sum(x**2 for x in individual)
return y
|
def get_config_list(ranking, ckpt_path2is_3class):
"""Assemble a model list for a specific task based on the ranking.
In addition to bundling information about the ckpt_path and whether to
model_uncertainty, the config_list also lists the value of the metric to
aid debugging.
Args:
ranking (list): list containing (Path, float), corresponding to
checkpoint-metric pairs ranked from best to worst
by metric value
ckpt_path2is_3class (dict): mapping from ckpt_path to is_3class
(whether to model_uncertainty)
Returns:
config_list (list): list bundling information about ckpt_path,
model_uncertainty, and metric value
"""
config_list = []
for ckpt_path, value in ranking:
is3_class = ckpt_path2is_3class[ckpt_path]
ckpt_info = {'ckpt_path': str(ckpt_path),
'is_3class': is3_class,
'value': value}
config_list.append(ckpt_info)
return config_list
|
def calculate_version_code(values_dict, shift):
"""
Version Code is calculated based on the four version integers.
Major is for crosswalk's large update, and minor is for based chromium.
Major and minor will always be increasing, so use the sum of them is
enough.
For each major and minor refresh, build will reset to 0. After that,
the build will be increasing for 6 weeks (12 weeks if we skip one upstream
beta rebasing), so 100 numbers for build are enough.
After we branch it from trunk, the patch will be increasing for the rest of
this branch's life, 100 numbers are also enough since it will last for most
24 weeks, but the version increasing will be much less frequent after branch
point.
Shift is the last bit for different configurations we want to upload to
PlayStore.
"""
try:
major = values_dict['MAJOR']
minor = values_dict['MINOR']
build = values_dict['BUILD']
patch = values_dict['PATCH']
return (major + minor) * 100000 +\
build * 1000 +\
patch * 10 +\
shift
except KeyError:
return 0
|
def generate_baseline(_iterable, window_length):
"""Generate a sliding baseline of an iterable
Creates a list of sliding baselines for the iterable. e.g. if you pass in
a list of len==5 with a baseline length of 2, we will generate:
[
[elem0 (first element), elem1, elem2],
[elem1, elem2, elem3],
[elem2, elem3, elem4(last element)]
]
The first element in each list is the element that the baseline is created for, followed by
window_length number of elements as the baseline.
Args:
_iterable: iterable to create baselines for
window_length: Number of elements to form a baseline
Returns:
list like [[window_1], [window_2], [window_3] ...]
"""
if len(_iterable) <= window_length:
return [_iterable]
num_windows = len(_iterable) - window_length
return [_iterable[window:window + window_length + 1] for window in range(num_windows)]
|
def _tryint_(s):
"""
Tries to turn input into an integer.
If that fails, returns input without conversion.
Source: http://nedbatchelder.com/blog/200712/human_sorting.html
"""
try:
return int(s)
except ValueError:
return s
|
def compare_bases_and_generate_key(tx_bases, rx_bases, measure):
"""Compares TX and RX bases and return the selected bits."""
if not (len(tx_bases) == len(rx_bases) == len(measure)):
return None, "tx_bases(%d), rx_bases(%d) and measure(%d) must have the same length." % (len(tx_bases), len(rx_bases), len(measure))
ret = ''
for bit, tx_base, rx_base in zip(measure, tx_bases, rx_bases):
if tx_base == rx_base:
ret += bit
return ret, None
|
def num_jobs(num_workers):
"""How many jobs to run in stress tests."""
return min(100, num_workers * 10)
|
def format_email_pass_id(submit_id):
"""Return tuple of the formatted email and password IDs based on the base submit_id value.
Args:
submit_id: id used to create unique element IDs
Returns:
tuple: formatted IDs: `(email_id, pass_id)`
"""
return [f'{submit_id}-{key}' for key in ['email', 'password']]
|
def octal_to_int(oct_str):
"""Convert an octal string to an int, respecting None"""
return int(oct_str, 8) if oct_str else None
|
def sorted_dict(d, key=None, reverse=False):
"""
Return dictionary sorted using key. If no key provided sorted by dict keys.
"""
if key is None:
return dict(sorted(d.items(), key=lambda e: e[0], reverse=reverse))
return dict(sorted(d.items(), key=key, reverse=reverse))
|
def _runtime_maybe(ctx, variants):
"""Check predicates at runtime and return matched value or None."""
for value, predicate in variants:
if callable(predicate):
if predicate(ctx):
return value
elif predicate:
return value
|
def find_element_by_key(obj, key):
"""
Recursively finds element or elements in dict.
"""
path = key.split(".", 1)
if len(path) == 1:
if isinstance(obj, list):
return [i.get(path[0]) for i in obj]
elif isinstance(obj, dict):
return obj.get(path[0])
else:
return obj
else:
if isinstance(obj, list):
return [find_element_by_key(i.get(path[0]), path[1]) for i in obj]
elif isinstance(obj, dict):
return find_element_by_key(obj.get(path[0]), path[1])
else:
return obj
|
def unary_superposition_mop_r_3(ar_1: tuple, mop: tuple):
"""substitution for unary multioperations.
:param tuple f: multioperation which.
:param tuple g: multioperation for
:rtype: tuple
"""
dic_mop = {
1: (0,),
2: (1,),
3: (0, 1),
4: (2,),
5: (0, 2),
6: (1, 2),
7: (0, 1, 2)
}
a, b, c = 0, 0, 0
if ar_1[0] != 0:
for x in dic_mop[ar_1[0]]:
a = a | mop[x]
if ar_1[1] != 0:
for x in dic_mop[ar_1[1]]:
b = b | mop[x]
if ar_1[2] != 0:
for x in dic_mop[ar_1[2]]:
c = c | mop[x]
return a, b, c
|
def test_rar (archive, compression, cmd, verbosity, interactive):
"""Test a RAR archive."""
cmdlist = [cmd, 't']
if not interactive:
cmdlist.extend(['-p-', '-y'])
cmdlist.extend(['--', archive])
return cmdlist
|
def count_ed_mgs(db):
"""
Just return how many times Ed answered.
"""
try:
count = db.get('ed_info')['qty_answed_message']
except:
count = 1
return count
|
def swap(heights_list, index01, index02):
"""swap two positions in a list at given indexes
Args:
heights_list (list): iterable in which swapping occurs
index01 (int): index of first element
index02 (int): index of second element
Returns:
list: list with element positions swapped
"""
heights_list[index01], heights_list[index02] = heights_list[index02], heights_list[index01]
return heights_list
|
def gather_types(input_step, varname):
"""
Given and input step, return a SPARQL fragment to gather the types for the step
:param input_step:
:return: SPARQL fragment as string
"""
if not input_step['object']['literal']:
return ' ?' + input_step['object']['name'] + ' a ?' + varname + ' . '
else:
return ''
|
def get_recurrence_rule(doc):
"""Recurring Event not implemeted."""
# until = datetime.strptime(doc.repeat_till, '%Y-%m-%d').strftime("%Y%m%dT%H%M%SZ")
# if doc.repeat_on == "Every Day": return ["RRULE:FREQ=DAILY;UNTIL=%s;BYDAY=%s"%(until,get_by_day_string(doc))]
# elif doc.repeat_on == "Every Week": return ["RRULE:FREQ=WEEKLY;UNTIL=%s"%(until)]
# elif doc.repeat_on == "Every Month": return ["RRULE:FREQ=MONTHLY;UNTIL=%s"%(until)]
# else: return ["RRULE:FREQ=YEARLY;UNTIL=%s"%(until)]
return []
|
def dict_max(*ds):
"""Take the maximum of dictionaries.
Args:
*ds (dict): Dictionaries.
Returns:
dict: `max(*ds)`.
"""
if not all([set(d.keys()) == set(ds[0].keys()) for d in ds[1:]]):
raise ValueError("Dictionaries have different keys.")
return {k: max([d[k] for d in ds]) for k in ds[0].keys()}
|
def add_arrays(digits1, digits2):
"""Adds numbers represented by arrays of digits."""
if len(digits1) > len(digits2):
longer = digits1[::-1]
shorter = digits2[::-1]
else:
longer = digits2[::-1]
shorter = digits1[::-1]
sum_digits = []
carry = 0
for i, d in enumerate(shorter):
sum_digits.append(d + longer[i] + carry)
carry = 0
if sum_digits[i] > 9:
carry = sum_digits[i] // 10
sum_digits[i] = sum_digits[i] % 10
if len(longer) > len(shorter):
for d in longer[len(shorter):]:
sum_digits.append(d + carry)
carry = 0
if carry != 0:
sum_digits.append(carry)
return sum_digits[::-1]
|
def is_multiclass(classes):
"""
Returns True if values in classes are anything but 1, 0, True, or False,
otherwise returns False.
"""
try:
return len(set(int(i) for i in classes) - {0, 1}) != 0
except ValueError:
return True
|
def dedupe(duped_data, encoding: str = 'utf-8'):
"""
Removes duplicates from a given data structure
"""
# Import libraries
from hashlib import md5
codes = set()
deduped_data = []
for item in duped_data:
hash_digest = md5(str(item).encode(encoding)).hexdigest()
if hash_digest not in codes:
codes.add(hash_digest)
deduped_data.append(item)
return deduped_data
|
def indexed_tags(tags):
"""Returns a list of tags that must be indexed.
The order of returned tags is from more selective to less selective.
"""
if not tags:
return []
return sorted(
set(t for t in tags if t.startswith(('buildset:', 'build_address:')))
)
|
def fibonacci_recursive(number):
"""Returns the nth Fibonacci number in the Fibonacci sequence.
The Fibonacci numbers are the numbers in the Fibonacci sequence, defined by
the equation and seed values:
F(n) = F(n - 1) + F(n - 2)
F(0) = 0
F(1) = 1
* O(2^n) time complexity
* O(n) space complexity
Args:
number: An integer.
"""
if number == 0 or number == 1:
return number
return fibonacci_recursive(number - 1) + fibonacci_recursive(number - 2)
|
def longuest_sub_array(arr):
"""
This function caculates the longuest sub array in a list
A sub array is an array which doesn't contain any 0
It returns the index of the last element which composes the sub array
and the length of the sub array
"""
sub_arrays = []
last_index = 0
length = 0
for i,l in enumerate(arr):
if l != 0 :
length += 1
last_index = i
else:
if last_index == i-1:
sub_arrays.append((last_index, length))
length=0
if sub_arrays == [(0,0)]:
print('The image cannot be cropped vertically')
return None
return max(sub_arrays, key=lambda p: p[1])
|
def _faster_comp(candidates, pairs):
"""
helper function for run(), evaluates winner of pairs, but faster (by
about two orders of magnitude) than _graph() (now deprecated)
"""
# This computation doesn't create the whole graph, but relies on the idea
# that the winner will never have an edge pointing to it
edges = set()
children = set()
for (i, j) in pairs:
if i in candidates and j in candidates and \
i not in children and (j, i) not in edges:
children.add(j)
edges.add((i, j))
winners = set()
for c in candidates:
if c not in children:
winners.add(c)
return winners
|
def getColPermutations(possible_columns, max_num_of_perms = 100):
"""
Get all possible combinations given a list of column names
:return: Given Input = [a,b,c]
Then, Output= [ [a], [b], [c], [a,b], [a,c], [b,c] ]
"""
permutations = {col: 1 for col in possible_columns}
for perm_size in range(len(possible_columns)-1):
for permutation in list(permutations.keys()):
tokens_in_perm = permutation.split(':')
if len(tokens_in_perm) == perm_size:
tokens_in_perm.sort()
for col in possible_columns:
if col in tokens_in_perm:
continue
new_perm = tokens_in_perm + [col]
new_perm.sort()
new_perm_string = ':'.join(new_perm)
permutations[new_perm_string] = 1
if len(permutations) > max_num_of_perms:
break
if len(permutations) > max_num_of_perms:
break
ret = [perm.split(':') for perm in list(permutations.keys())]
return ret
|
def get_hover_string(n_commits, dt):
"""
"""
if n_commits == 0:
n_txt = 'No commits on '
elif n_commits == 1:
n_txt = '1 commit on '
else:
n_txt = f'{n_commits} commits on '
if dt == '':
return ''
else:
return n_txt + dt.strftime('%b %d, %Y')
|
def isStructureCompatible(lp1, lp2, bp):
"""
Checks, if the region within lp1 and lp2 is structurally balanced
"""
x = lp1 + 1
while x < lp2:
if bp[x] <= lp1 or bp[x] > lp2:
return False
if x == bp[x]:
x += 1
else:
x = bp[x] + 1
return x == lp2
|
def dic_partiel(n):
"""retourne un dictionnaire partiel de mot de taille n correctement parenthese"""
if n <= 0:
return [""]
elif n == 1:
return ["()"]
sub_list = dic_partiel(n-1)
new_list = []
for word in sub_list:
new_list.append(word + "()")
new_list.append("(" + word + ")")
return new_list
|
def shorten_file_by_removing_comments(content: str) -> str:
"""
Removes all lines with line comments or block comments from the file content, so that the ast parser does not get confused.
"""
lines = content.splitlines(keepends=True)
delete = False
for line_num, line in enumerate(lines):
# if the line starts with a comment, remove the line
if line.strip().startswith("#"):
lines[line_num] = ""
# if the line starts with a block comment, remove the line
if line.strip().startswith('"""') or line.strip().startswith("'''"):
delete = not delete
lines[line_num] = ""
if delete:
lines[line_num] = ""
lines = [line for line in lines if line.strip() != ""]
return "".join(lines)
|
def gtex_location_to_gwas_location_(gtex_location: str) -> str:
"""Converts variant locations in GWAS catalog format to GTEx format.
i.e. given 'chr1_64764_C_T_b38', returns '4:79296443'.
"""
parts = gtex_location.split("_")
chr = parts[0][3]
return f"{chr}:{parts[1]}"
|
def error(endpoint, reason, advice=None):
"""Generate error packet.
`endpoint`
Optional endpoint name
`reason`
Error reason
`advice`
Error advice
"""
return u'7::%s:%s+%s' % (endpoint or '',
(reason or ''),
(advice or ''))
|
def get_process_identifier(args):
"""by looking at arguments we try to generate a proper identifier
>>> get_process_identifier(['python', 'echo.py', '1'])
'python_echo.py_1'
"""
return '_'.join(args)
|
def changes_existing_nodes(nodes_exist, alpha, dict_proj, dict_proj_changed_exist):
"""
For nodes that are in both sequential snapshots, update their embedding by their current embedding and new
calculated embedding. Do it only for those that their neighbours are changed.
:param nodes_exist: Nodes that exist in both sequential snapshots and their neighbours are changed
:param alpha: Parameter representing the importance that is given to the old embedding
:param dict_proj: Dictionary of embeddings of the nodes
:param dict_proj_changed_exist: Dict of previous embedding of the changed existing nodes.
:return: An updated embedding dictionary
"""
for node in nodes_exist:
if dict_proj.get(node) is not None:
new_proj = dict_proj[node]
old_proj = dict_proj_changed_exist[node]
final_proj = alpha * old_proj + (1 - alpha) * new_proj
dict_proj[node] = final_proj
return dict_proj
|
def verify_expected_list(list_expected_strs, list_actual_strs):
"""
Return True if each str in list_expected_strs is in list_actual_strs
"""
return all(elem in list_actual_strs for elem in list_expected_strs)
|
def xpath_class_check(cls: str) -> str:
"""
Parameters
----------
cls : str
The CSS class name to check
Returns
-------
xpath: str
An xpath expression for finding an element with class `cls`
"""
return f"contains(concat(' ',normalize-space(@class),' '),' {cls} ')"
|
def DL_ignore(answers):
"""Return False if any dictionary-learning method was selected.
Arguments
---------
answers: dict
Previous questions answers.
Returns
-------
bool
True if DL verbosity question should be ignored.
"""
expected = ['ITKrMM', 'wKSVD', 'BPFA']
flag = [meth in answers['method'] for meth in expected]
return True not in flag
|
def in_any_list(list1, list2):
"""
Check if any items in list1 are in list2
:param list1: list
:param list2: list
:return:
"""
list1 = list1.split(" ") if isinstance(list1, str) else list1
list2 = list2.split(" ") if isinstance(list2, str) else list2
return any(i in list2 for i in list1)
|
def path_to_startdate(path):
""" Given a path of the form fall2018/coursename,
returns corresponding start date.
>>> path_to_startdate('fall2018/coursename')
'2018-09-01'
>>> path_to_startdate(u'fall2018')
'2018-09-01'
>>> path_to_startdate('spring2018/foo/bar/baz')
'2018-01-01'
>>> path_to_startdate('summer2018/any')
'2018-06-01'
>>> path_to_startdate('anything/else')
''
"""
# The string before the first / should be fall|summer|spring + YYYY,
# else this returns the empty string.
s = str(path.split('/')[0])
if s.startswith('fall'):
return s[-4:] + '-09-01'
elif s.startswith('spring'):
return s[-4:] + '-01-01'
elif s.startswith('summer'):
return s[-4:] + '-06-01'
else:
return ''
|
def complement(sequence, reverse=False):
""" Return complement of sequence """
complements = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'}
sequence = reversed(sequence) if reverse else sequence
return ''.join(complements.get(base, 'N') for base in sequence)
|
def jsonapi_errors(jsonapi_errors):
"""Construct api error according to jsonapi 1.0
:param iterable jsonapi_errors: an iterable of jsonapi error
:return dict: a dict of errors according to jsonapi 1.0
"""
return {'errors': [jsonapi_error for jsonapi_error in jsonapi_errors],
'jsonapi': {'version': '1.0'}}
|
def map_quads(coord):
"""
Map a quadrant number to Moran's I designation
HH=1, LH=2, LL=3, HL=4
Args:
coord (int): quadrant of a specific measurement
Returns:
classification (one of 'HH', 'LH', 'LL', or 'HL')
"""
if coord == 1:
return 'HH'
elif coord == 2:
return 'LH'
elif coord == 3:
return 'LL'
elif coord == 4:
return 'HL'
return None
|
def group_sequential(a):
"""
Purpose: Group the numerical list or 1D array by sequential blocks.
Return: a list of list.
Example:
>>> a=[2,3,4,67,78,79,102]
>>> print mathex.group_sequential(a)
>>> a=[2,3,4,67,78,79,102,103]
>>> print mathex.group_sequential(a)
>>> a=[0,3,4,67,78,79,102,103]
>>> print mathex.group_sequential(a)
>>> a=[0,3,4,67,78,79,102,103,120]
>>> print mathex.group_sequential(a)
"""
if isinstance(a,int):
raise TypeError('Only list or 1D numpy array is accepted')
else:
if len(a)==1:
return [[a[0]]] #to make sure for np.array(5) or [5], [[5]] is returned.
else:
ind_begin=0
d=[]
for i in range(len(a)-1):
if a[i+1]==a[i]+1:
#handle the case if last several members of a are connceted; such as [23,24,25,56,57,89,103,104]
if i==len(a)-2:
d.append(list(a[ind_begin:len(a)]))
else:
ind_end=i
d.append(list(a[ind_begin:ind_end+1]))
ind_begin=i+1
#handle the case when the last one element is not the sequential for its previous one; such as [23,24,25,56,57,89,103]
if i==len(a)-2:
d.append([a[i+1]])
return d
|
def publish_file_in_ipfs(ipfs_client, filepath, wrap_with_directory=True):
"""
push a file to ipfs given its path
"""
try:
with open(filepath, 'r+b') as file:
result = ipfs_client.add(
file, pin=True, wrap_with_directory=wrap_with_directory)
if wrap_with_directory:
return result[1]['Hash']+'/'+result[0]['Name']
return result['Hash']
except Exception as err:
print("File error ", err)
|
def solution(A, B, M, X, Y):
# write your code in Python 2.7
"""
floors = [0 to M ]
elevator:
max capacity of X people
weight limit of Y lbs
N people gathered in floor[0] standing in a queue()
k: a kth numbered person
A[k] = weight, B[k] = target floor or dst ex)A[0] and B[0] => 0th person
count total number of times that elavator stops
ex1)
floors = [] * 6 (o to M=5)
elavator:
X = 2, Y = 200
People:
A = [60,80,40]
B = [2,3,5]
queue = [(a1,b1)...(an,bn)]
queue = [(60,2), (80,3),(40,5)]
my approach#1:
* stops = 0
* while queue is not empty
* people = X ex) X = 2
* elevator = [], weight = 0
* while queue and people > 0 and weight <= Y
values = queue.pop()
elevator.push(values)
weight += values[0]
people -= 1
* while elavator is not empty
* values = elavator.dequeue()
* while elavator and elavator.first[1] == values[1]
* elavator.dequeue()
stops += 1
* stops += 1
Time: O(N * M)?
Space: O(M + N)
"""
# assumed that all the inputs are valid and exist
stops = 0
queue = []
N,i = len(A),0
# add to queue (A[i],B[i])...
while i < N:
queue.append((A[i],B[i]))
i += 1
# while queue is not empty
while queue:
# intializations
# max capacity of people & weight
people,weight = X,Y
elevator = []
while queue and people and weight > 0:
values = queue[0]
weight -= values[0]
if weight < 0: break
elevator.append(values)
queue.pop(0)
people -= 1
# making stops for people inside the elevator
# keep checking the next passenger is also at the same target floor as current pass.
# if true, let them exit from elevator
while elevator:
values = elevator.pop(0)
while elevator and elevator[0][1] == values[1]:
elevator.pop(0)
stops += 1
stops += 1 # ending at ground level
return stops
|
def climb_stairs(k):
"""
Return the number of ways that `k` stairs can be climbed
given that you can climb either 1 or 2 stairs at a time
"""
num_ways = [1]
for idx in range(1, k+1):
one_step = num_ways[idx-1]
two_step = num_ways[idx-2] if idx > 1 else 0
num_ways.append(one_step + two_step)
return num_ways[-1]
|
def get_location_list(state_alert_list):
"""Get lat, lon list from state alert list"""
locations = []
for item in state_alert_list:
locations.append([item["lat"], item["lon"]])
return locations
|
def calc_tot_orbits(n_orbit_dict):
"""Calculate the total number of direct and indirect orbits in
n_orbit_dict.
Parameters
----------
n_orbit_dict : dictionary
A dictionary where each key is an object in the system and each value
is a tuple of 2 values. The first represents the number of direct
orbits and the second the number of indirect orbits.
Returns
-------
n_direct_tot : int
The number of direct orbits
n_indirect_tot : int
The number of indirect orbits
"""
n_direct_tot = sum([n_orbits[0] for n_orbits in n_orbit_dict.values()])
n_indirect_tot = sum([n_orbits[1] for n_orbits in n_orbit_dict.values()])
return n_direct_tot, n_indirect_tot
|
def ee_bands_rgb(collection):
"""
Earth Engine band names
"""
dic = {
'Sentinel2': ['B4','B3','B2'],
'Landsat7': ['B3','B2','B1'],
'CroplandDataLayers': ['cropland']
}
return dic[collection]
|
def get_atom_neighbours(atom_idx: int, molecule_covalent_nbrs: list):
"""Get all neighbours for atom_idx.
Extract all covalent bonding partner (alpha), all nearest neighbours
(beta), and all nearest-nearest neighbours (gamma) for atom with index
'atom_idx'.
Args:
atom_idx: index of atom to extract neighbours for
molecule_covalent_list: list of covalent partners of each atom
Returns:
alpha: list of all covalent bonding atom indices of atom_idx
beta: list of nearest neighbour atom indices of atom_idx
gamma: list of nearest-nearest neighbour atom indices of atom_idx
"""
# extract alpha neighbours
alpha = molecule_covalent_nbrs[atom_idx]
# extract beta neighbours
beta = list()
for _, a in enumerate(alpha):
b = molecule_covalent_nbrs[a]
diff = list(set([atom_idx]) ^ set(b))
if len(diff) > 0:
beta.extend(diff)
# extract gamma neighbours
gamma = list()
for _, b in enumerate(beta):
c = molecule_covalent_nbrs[b]
inter = list(set(alpha).intersection(set(c)))
diff = list(set(inter) ^ set(c))
gamma.extend(diff)
gamma = list(dict.fromkeys(gamma))
return alpha, beta, gamma
|
def get_bcl2fastq_read_type_map(read_info, sample_index_read=None, dual_indexed=False, ignore_dual_index=False):
"""
Get a mapping between ReadInfo read name (R1,I1,I2,R2) and bcl2fastq
output file naming (R1/R2/R3/R4/I1)
The guarantee here is that the 10X sample index will always be on I1,
if generated. If dual-indexing is specified, the secondary index will
be on I2. Upstream pipestances can expect read1 to be in R1, sample
indexes to be on I1, and read2 to be on R2.
:param read_info: The ReadInfo block from RunInfo.xml
:param sample_index_read: The ReadInfo read (I1, I2) to use as the sample index
:param ignore_dual_index: Whether the dual index was ignored (and thus not sequenced)
"""
#read_names = [r["read_name"] for r in read_info]
read_map = {}
reads_counted = 0
for idx, r in enumerate(read_info):
read_name = r["read_name"]
if read_name == sample_index_read:
read_map[read_name] = 'I1'
elif ignore_dual_index and r["index_read"]:
# we didn't read this index -- see make_bases_mask_val
continue
elif dual_indexed and r["index_read"]:
read_map[read_name] = 'I2'
else:
reads_counted += 1
read_map[read_name] = 'R%d' % reads_counted
return read_map
|
def circular_shift_key_one_left(key):
"""The function does a cyclic shift left of the whole key.
To be able to shift the whole 40 bytes left in a cyclic manner, the function
shifts the bits between two adjacent bytes each time"""
l = len(key)
return [ ((key[i] << 1) & 0xff) | ((key[(i + 1) % l] & 0x80) >> 7) for i in range(0, l) ]
|
def get_include_role(validation):
"""Returns Included Role"""
try:
if 'tasks' in validation:
return validation['tasks'][0]['include_role']['name']
return validation['roles'][0]
except KeyError:
return list()
|
def meta_name(file_name):
"""Generate the name of the meta file"""
return "{}.json".format(file_name)
|
def to_list(tup: tuple):
"""Convert from tuple to packed list
Allows us to call function with arguments in a loop
Parameters
----------
tup: tuple
tuple of objects to convert to packed list
Raises
------
ValueError
If passed uneven number of arguments without a list. Please wrap your args in a list.
Examples
--------
>>> to_list(([x1, x2, x3], [y1, y2, y3]))
[[x1, y1], [x2, y2], [x3, y3]]
>>> to_list(([x1], [y1]))
[[x1, y1]]
>>> to_list(([x1, x2, x3], ))
[[x1], [x2], [x3]]
>>> to_list((x1, ))
[[x1]]
>>> to_list((x1, y1))
[[x1, y1]]
>>> to_list((x1, x2, x3, y1, y2, y3))
[[x1, y1], [x2, y2], [x3, y3]]
"""
n_tup = len(tup)
if n_tup == 0:
return []
if not isinstance(tup[0], list):
# the first element is data
if n_tup == 1:
return [list(tup)]
if n_tup % 2 != 0:
raise ValueError('Don\'t know how to handle uneven number of args '
'without a list. Please wrap your args in a list.')
# assume first half of args is input and second half is outcome
return [list(el) for el in zip(tup[:(n_tup // 2)], tup[(n_tup // 2):])]
if n_tup == 1:
return [[x] for x in tup[0]]
n_mods = len(tup[0])
lists_packed = [[] for _ in range(n_mods)]
for i in range(n_mods):
for j in range(n_tup):
lists_packed[i].append(tup[j][i])
return lists_packed
|
def mock_glob_method(path):
"""
Given a path input, returns a list of candidates
"""
if ".x86" in path:
return ["linux"]
if ".app" in path:
return ["darwin"]
if ".exe" in path:
return ["win32"]
if "*" in path:
return "Any"
return []
|
def GetFunctionToGLVersionsMap(gl_versions):
"""Construct map from a function names to GL versions which define the
function.
Args:
extensions: Map of gl versions => functions.
Returns:
Map of function name => gl versions.
"""
function_to_gl_versions = {}
for gl_version, functions in gl_versions.items():
for function in functions:
if not function in function_to_gl_versions:
function_to_gl_versions[function] = set([])
function_to_gl_versions[function].add(gl_version)
return function_to_gl_versions
|
def capitalized_comp_name(context):
"""return the component name in capitalized format
Args:
context (dict): complete package and component transformation
Returns:
str: component name in capitalized format, underscore being removed.
Examples:
>>> context = {componentName="another_comp" frecuency="100"/>
>>> capitalized_comp_name(context)
"AnotherComp"
"""
return context['componentName'].title().replace("_", "")
|
def combineRulesWithOperator(listOfRules, operator):
"""
Takes a list of rules and makes an overall rule that ties them together
with the AND or the OR operator
Parameters
----------
listOfRules: list
A list of string representation of rules
operator: str
Should be either AND or OR
Returns
-------
total: str
String representation of the rules combined using the given operator
"""
assert type(listOfRules) == list
assert all(map(lambda r: type(r) == str, listOfRules))
assert operator.lower() in ['and', 'or']
if len(listOfRules) == 1:
return listOfRules[0]
operator = operator.lower()
total = listOfRules[0]
for i in range(1, len(listOfRules)):
total = operator + "(" + total + ", " + listOfRules[i] + ")"
return total
|
def _sign(number):
"""
Get sign of the given number
:param number:
:type number: int
:return: -1, 0, 1
:rtype: int
"""
if number > 0:
return 1
if number < 0:
return -1
return 0
|
def _failsafe_values_atom(schema, values, errors, source, kw):
"""Map all erroneous inputs to a single value."""
for key in errors:
kw[key] = values
return kw
|
def _is_complex_type(item_type: str) -> bool:
""" Checks to see if the type is a complex type (as primitives are decoded differently)
Args:
item_type: A string that has the type of the object
Returns:
True if complex, False if primitive
"""
if item_type == 'Boolean' or item_type == 'BitString' or item_type == 'Integer':
return False
else:
return True
|
def jossann_reclassifier(original_classifier, probability):
"""
Reclassify: note that if probabilities are (0, 1) or (1, 0) then we override
the original classifier.
"""
if sum(probability) > 1:
probability = tuple([i / sum(probability) for i in probability])
if probability in [(1, 0), (0, 1)]:
original_classifier["stochastic"] = False
elif sum(probability) != 0:
original_classifier["stochastic"] = True
return original_classifier
|
def map_gen(size):
"""Generates map"""
map_ = []
for _ in range(size):
map_.append([" ~"] * size)
return map_
|
def asbool(obj):
"""
Interprets an object as a boolean value.
:rtype: bool
"""
if isinstance(obj, str):
obj = obj.strip().lower()
if obj in ('true', 'yes', 'on', 'y', 't', '1'):
return True
if obj in ('false', 'no', 'off', 'n', 'f', '0'):
return False
raise ValueError('Unable to interpret value "%s" as boolean' % obj)
return bool(obj)
|
def head_number_poly(cappa):
"""Calculate head number for a given pump's typical number.
The polynomial has been calculated applaying the curve fitting at nodes
cappa .2 .3 .4 .5 .6 .7 .8 .9 1.0 1.1 1.2
psi .583 .575 .560 .535 .515 .489 .465 .441 .415 .395 .380
weights ones(cappa)
n 5
:param cappa (float): typical number
:return psi (float): head number
"""
coef = [0.531, 0.613, -2.339, 3.255, -2.284, 0.641]
psi = sum([val * cappa**idx for idx, val in enumerate(coef)])
return psi
|
def get_service_id(path):
"""Get the service ID (e.g. octue.services.<uuid>) from a topic or subscription path (e.g.
projects/<project-name>/topics/octue.services.<uuid>)
:param str path:
:return str:
"""
return path.split("/")[-1]
|
def calculate(x: int, y: int = 1, *, subtract: bool = False) -> int:
"""Calculates the sum (or difference) of two numbers.
Parameters:
`x` : int, required
The first number
`y` : int, optional
The second number (default is 1)
`subtraction`: bool, optional
Whether to perform subtraction. (Default is `False`.)
Returns:
int
"""
return x - y if subtract else x + y
|
def exp_opts(expansion):
"""Return the options of an expansion. If options are not defined, return {}"""
if isinstance(expansion, str):
return {}
return expansion[1]
|
def avg_brightness(image_list):
"""
A list of grey scale images
"""
brightness_per_block=[]
for image in image_list:
img_shape = image.shape
img_Size = image.size
total=0
for i in range(0,img_shape[0]):
for j in range(0,img_shape[1]):
total+=image[i][j]
total/=img_Size
brightness_per_block.append(total)
return brightness_per_block
|
def volCuboid(length: float, breadth: float, height: float) -> float:
"""Finds volume of a cuboid"""
volume: float = length * breadth * height
return volume
|
def toString(obj):
""" Convert the diffmask object to string. If it has a `toString'
method, use it. Otherwise, hope it'll converted implicitly. This
is a cheap wrapper to avoid Python2/3 string incompatibility.
"""
if hasattr(obj, 'toString'):
return obj.toString()
else:
return obj
|
def shellsplit(text):
"""Very simple shell-like line splitting.
:param text: Text to split.
:return: List with parts of the line as strings.
"""
ret = list()
inquotes = False
current = ""
for c in text:
if c == "\"":
inquotes = not inquotes
elif c in ("\t", "\n", " ") and not inquotes:
if current != "":
ret.append(current)
current = ""
else:
current += c
if current != "":
ret.append(current)
return ret
|
def _has_sectors(tax_name: str, ignore_sectors: bool) -> bool:
"""Determine whether we are doing a sector-based forecast."""
return tax_name in ["birt", "sales", "wage", "rtt"] and not ignore_sectors
|
def fast_exp(a, b):
"""Returns a^b in O(lg n) time complexity, where a and b are both
integers."""
if not isinstance(a, int) == isinstance(b, int) == True:
raise TypeError('a and b should both be integers.')
return None
if b < 0:
return 0
result = 1
while (b != 0):
if b & 1 == 1:
result *= a
b >>= 1
a *= a
return result
|
def redis_stream_id_add_one(message_id):
"""Add one to the message ID
This is useful when we need to xrange() events exclusive of the given ID,
rather than inclusive of the given ID (which is the sensible default).
There is no chance of missing events with this method.
"""
milliseconds, n = map(int, message_id.split("-"))
return f"{milliseconds:013d}-{n + 1}"
|
def tokenize(s):
"""
tokenize
"""
#s = re.sub('\d+', NUM, s).lower()
# tokens = nltk.RegexpTokenizer(r'\w+|<sil>|[^\w\s]+').tokenize(s)
tokens = s.split(' ')
return tokens
|
def escape_ansi(line):
"""It removes the ansi codes from a string."""
import re
ansi_escape=re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
return ansi_escape.sub('',line)
|
def _split_member(member):
"""Splits an IAM member into type and optional value.
Args:
member (str): The IAM member to split.
Returns:
tuple: The member type and optionally member value.
"""
# 'allUsers' and 'allAuthenticatedUsers' member do not contain ':' so they
# need to be handled seperately.
if ':' in member:
return member.split(':', 1)
return (member, None)
|
def _is_float(value):
"""Checks if the value is float. Which means it contains two non empty integer parts separated by .
Args:
value (str): The value.
Returns:
bool: True if the value represents a float.
"""
if "." not in value:
return False
a, b = value.split(".", 1)
return a.isdigit() and b.isdigit()
|
def dna_to_rna(seq):
"""
Convert a DNA sequence to RNA.
"""
# Determine if original sequence was uppercase
seq_upper = seq.isupper()
# Convert to lowercase
seq = seq.lower()
# Swap out 't' for 'u'
seq = seq.replace('t', 'u')
# Return upper or lower case RNA sequence
if seq_upper:
return seq.upper()
else:
return seq
|
def is_list_of_dicts(li: list) -> bool:
"""Return true if input is a list of dicts."""
return all(isinstance(elem, dict) for elem in li)
|
def tolist(item):
"""
tolist wraps a single value in a list, or converts the item to a list
"""
return item if type(item) == list else list(item)
|
def yesno_to_bool(s):
"""Convert yes/no answer to True/False"""
if s == "y" or s == "yes":
return True
return False
|
def generic_type_template(tipo: str, name: str, behaviour: str, result0: str, result1: str) -> str:
"""Template for feature behaviour reason generated from DICE
Returns:
str: generic behaviour based on the type
"""
dict_type = {
"category": (f"{name} {behaviour} from {result0} to {result1}."),
"continuous": (f"{name} {behaviour} from {result0} to {result1}"),
}
return dict_type[tipo].format(
name = name,
behaviour = behaviour,
result0 = result0,
result1 = result1
)
|
def from_c_string(addr):
"""utf-8 decode a C string into a python string"""
if addr is None:
return None
if type(addr) == str:
return addr
return addr.decode('utf-8')
|
def rotate_right(arr):
"""
Rotate a copy of given 2D list
clockwise by 90 degrees
and return a new list.
:param arr: A 2D-list of arbitrary
dimensions.
:return: A list that is "arr" rotated
90 degree clockwise by its center.
"""
n = len(arr)
m = len(arr[0])
res = [[None for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
res[i][j] = arr[n - j - 1][i]
return res
|
def _dict_to_ebuild(dictionary):
"""Helper to format a dictionary into an ebuild file."""
output = []
for key in sorted(dictionary.keys()):
output.append('%s="%s"' % (key, dictionary[key]))
output.append('\n')
return '\n'.join(output)
|
def present_to_future_value(ir, t):
"""
Returns a coefficient to convert money from one point in time to
t years in the future, with an annual interest rate of ir. This is
the inverse of future_to_present_value if calculated with the same
rate and number of years.
Example:
>>> round(present_to_future_value(.07,10),7)
1.9671514
>>> round(present_to_future_value(.07,10)*\
future_to_present_value(.07,10),7) == 1
True
"""
return (1+ir)**t
|
def basic_backquote_string_evaluator(val):
"""The basic evaluator for backquoted strings"""
return eval(val,globals(),locals())
|
def calculate_center(shape: tuple):
"""
Calculate and return the center point of ``shape``.
:param shape: A tuple (width, height) of odd numbers
:return: A ``tuple`` (x, y) containing the center points coordinates
"""
if any(d%2 == 0 for d in shape):
raise ValueError("width and height of shape must be odd numbers")
x, y = [int((d-1)/2) for d in shape[-2:]]
return (x, y)
|
def flatten(x):
"""
Flatten lists of lists of any depth. Preserves tuples.
"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, str) and type(el)!=tuple and not issubclass(type(el), tuple):
result.extend(flatten(el))
else:
result.append(el)
return result
|
def cleanup_double_endline(text: str) -> str:
"""Removes unnecessary empty lines in text"""
return text.replace('\n\n\n', '\n')
|
def _ContinuationAlignStyleStringConverter(s):
"""Option value converter for a continuation align style string."""
accepted_styles = ('SPACE', 'FIXED', 'VALIGN-RIGHT')
if s:
r = s.strip('"\'').replace('_', '-').upper()
if r not in accepted_styles:
raise ValueError('unknown continuation align style: %r' % (s,))
else:
r = accepted_styles[0]
return r
|
def dispatch_strategy(declaration):
"""How are we going to call the underlying implementation of a
declaration? There are two strategies:
- use_derived: we want to call the implementation on CPUDoubleType
(or a similar, derived Type instance). Because these derived
instances deal in Tensors, not Variables (it's a completely different
object, so it doesn't dispatch back to VariableType), code on
this dispatch path needs to wrap/unwrap tensors. If the
derived implementation takes and returns tensors, the
implementation is usually differentiable (although we also use
the derived dispatch path for non-differentiable functions
that we still want to dispatch on the derived Type instance;
e.g., size())
- use_type: we want to call the implementation on Type, because
it is implemented concretely, and the functions it invokes will
get dispatched back to VariableType (which will ensure that they
are differentiable.)
"""
if declaration['abstract'] or declaration['derivative'] is not None:
# If the function is abstract (not implemented on at::Type), we must
# call the implementation on the derived type with unpacked tensors.
# If the function has a derivative specified and is concrete, we could
# call either implementation. We prefer the calling the derived
# type's implementation with unpacked tensors because it is more
# performant in some cases: any internal calls to other ATen functions
# won't have the history tracked.
# If the function has a type dispatched argument (i.e. is a factory),
# we prefer calling the derived type's implementation both because it is
# more performant and to ensure factory functions return tensors with _version
# of 0 (probably not strictly necessary, but nice to have to keeps versions simple
# to understand.
return 'use_derived'
else:
# If the function is concrete (we don't have to override it) and we
# didn't declare it in derivatives.yaml, we'll assume that it is
# actually implemented out of differentiable functions. (This
# assumption might not hold, but then you'll see gradcheck fail.)
return 'use_type'
|
def sanitize_code(code):
""" Make a canonical text version of a code - a 5 digit, 0-padded string
"""
try:
code = "%05i"%code
except TypeError:
code=str(code)
return code
|
def cmpversion(a, b):
"""Compare versions the way chrome does."""
def split_version(v):
"""Get major/minor of version."""
if '.' in v:
return v.split('.', 1)
if '_' in v:
return v.split('_', 1)
return (v, '0')
a_maj, a_min = split_version(a)
b_maj, b_min = split_version(b)
if a_maj == b_maj:
return cmpversion(a_min, b_min)
return int(a_maj) > int(b_maj)
|
def sizeof_fmt(num):
"""
Convert file size to human readable format.
"""
for x in ['b', 'K', 'M', 'G', 'T']:
if num < 1024.0:
return "{0:.2f}{1}".format(num, x)
num /= 1024.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.