content
stringlengths 42
6.51k
|
---|
def strip_quotes(arg):
""" Strip outer quotes from a string.
Applies to both single and double quotes.
:param arg: str - string to strip outer quotes from
:return str - same string with potentially outer quotes stripped
"""
quote_chars = '"' + "'"
if len(arg) > 1 and arg[0] == arg[-1] and arg[0] in quote_chars:
arg = arg[1:-1]
return arg
|
def _finish_plot(ax, names, legend_loc=None, no_info_message="No Information"):
"""show a message in the axes if there is no data (names is empty)
optionally add a legend
return Fase if names is empty, True otherwise"""
if( not names ):
ax.text(0.5,0.5, no_info_message,
fontweight='bold', va='center', ha='center',
transform=ax.transAxes)
return False
if( legend_loc is not None ):
ax.legend(names, loc=legend_loc)
return True
|
def _priority_to_int(priority: str) -> int:
"""Get an int given a priority."""
priorities = {
"priority_critical": 90,
"priority_major": 70,
"priority_medium": 50,
"priority_low": 30,
}
return priorities.get(priority, 0)
|
def get_type(object):
"""Convert a datetime.timedelta object into Days, Hours, Minutes, Seconds."""
return type(object).__name__
|
def mDistCompute(a, b):
"""Takes in two 1D arrays and computes sum of
manhattan distances. This function is an inner function of mDist()
"""
# Initialize the sum value
sum_dist = 0
# Both arrays must be of of the same length
length = len(a)
# Compute sum of differences
for i in range(0, length):
sum_dist += abs(a[i]- b[i])
return sum_dist
|
def partial_product(start, stop):
"""Product of integers in range(start, stop, 2), computed recursively.
start and stop should both be odd, with start <= stop.
"""
numfactors = (stop - start) >> 1
if not numfactors:
return 1
elif numfactors == 1:
return start
else:
mid = (start + numfactors) | 1
return partial_product(start, mid) * partial_product(mid, stop)
|
def get_process_data(indexes, processes):
"""Extract and label the process data from the ps command output.
Args:
indexes: dictionary with the indexes of the PID, PPID, and COMMAND headings in the column header
processes: array of the currently-running processes
Returns:
process_data: array of dictionarys where each dict has the process data for the PID, PPID, and COMMAND headings
"""
process_data = []
maxsplit = indexes['command']
for process in processes:
process = process.rstrip()
process_values = process.split(None, maxsplit)
if len(process_values) <= 2:
continue
pid = process_values[indexes['pid']]
ppid = process_values[indexes['ppid']]
command = process_values[indexes['command']]
process_data.append({'pid': pid, 'ppid': ppid, 'command': command})
return process_data
|
def parse_logistic_flag(kwargs):
""" Checks whether y_dist is binomial """
if "y_dist" in kwargs:
if kwargs["y_dist"] == "binomial":
return True
return False
|
def diff_between_angles(angle_a: float, angle_b: float) -> float:
"""Calculates the difference between two angles angle_a and angle_b
Args:
angle_a (float): angle in degree
angle_b (float): angle in degree
Returns:
float: difference between the two angles in degree.
"""
delta_mod = (angle_b - angle_a) % 360
if delta_mod > 180:
delta_mod -= 360
return delta_mod
|
def calculate_nps(scores):
"""Takes in a list of floats and returns the Net Promoter Score based
on those scores.
Args:
scores -- list of floats representing scores
Returns:
float -- Net Promoter Score from -100.0 to +100.0
"""
detractors, promoters = 0, 0
for s in scores:
if s <= 6:
detractors += 1
if s >= 9:
promoters += 1
# below we calculate the Net Promoter Score with this formula
nps = (float(promoters) / len(scores) - \
float(detractors) / len(scores)) * 100
return nps
|
def str2dict(string):
"""Parse a str representation of a dict into its dict form.
This is the inverse of dict2str()
:param string: The string to parse.
:returns: A dict constructed from the str representation in string.
"""
res_dict = {}
for keyvalue in string.split(','):
(key, value) = keyvalue.split('=', 1)
res_dict[key] = value
return res_dict
|
def get_crop_shape(dataset_name):
"""Returns cropping shape corresponding to dataset_name."""
if dataset_name == 'Pascal':
return (200, 272)
else:
raise ValueError('Unknown dataset name {}'.format(dataset_name))
|
def xds_detector_name(xia2_name):
"""Translate from a xia2 name from the detector library to an XDS detector
name."""
if "pilatus" in xia2_name:
return "PILATUS"
if "rayonix" in xia2_name:
return "CCDCHESS"
if "adsc" in xia2_name:
return "ADSC"
if "saturn" in xia2_name:
return "SATURN"
if "raxis" in xia2_name:
return "RAXIS"
raise RuntimeError("detector %s unknown" % xia2_name)
|
def removeWords(answer):
"""Removes specific words from input or answer, to allow for more leniency."""
words = [' town', ' city', ' island', ' badge', 'professor ', 'team ']
answer = answer.lower()
for word in words:
answer = answer.replace(word, '')
return answer
|
def hello(name = "person", lastname = "exposito"):
"""Function that prints "hello world!"""
"""print("Hello world!")"""
return f"Hello {name} {lastname}!"
|
def get_resinum_to_resi_map(resiname_file, offset = 0, indexing = 1, aa_code = 3):
"""
This function returns a dictionary that relates the residue
number for a given system to a residue name. A PDB or prmtop file
for the system of interest is required. The string that comprises
the residue name (the values of the dictionary) is the amino
acid code (in three-letter or one-letter format; default three-
letter) followed by the residue number (e.g., Thr72 or T72).
An optional offset can be passed to the function. This
changes the residue number in the string, which is useful if
the numbering in the PDB file is not the preferred numbering
for the system.
For most indexed applications, the first residue is
numbered as 0 instead of 1. A zero-based dictionary can be
set by passing indexing = 0 to the function. The default
is 1.
If the PDB file input to the function is not
found, a message will alert the user. In this case, the
values in the dictionary are simply the numbers from 1 to
9999 (the maximum residue number of a PDB file).
"""
resi_map = {}
if resiname_file == None:
print('Warning: No prmtop or PDB file given.\n' + \
' No residue number information will be presented.')
for i in range(10000):
resi_map[i] = str(i)
return resi_map
try:
f = file(resiname_file)
except IOError:
print('Warning: Could not open ' + resiname_file + '.\n' + \
' No residue number information will be presented.')
for i in range(10000):
resi_map[i] = str(i)
return resi_map
# If the file is a prmtop file...
if not resiname_file.endswith('.pdb'):
resi_num = 1
residue_section = False
for line in f:
if line.startswith('%FLAG RESIDUE_POINTER'):
break
if line.startswith('%FLAG RESIDUE_LABEL'):
residue_section = True
if not residue_section or line.startswith('%F'):
continue
else:
residue_names = line.split()
for resi_name in residue_names:
if aa_code == 1:
resi_name = ThrLett_to_OneLett(resi_name)
resi_name = resi_name.capitalize() + str(resi_num + offset)
resi_map[resi_num + indexing - 1] = resi_name
resi_num += 1
# If the file is a PDB file...
else:
for line in f:
if not (line.startswith('ATOM') or line.startswith('HETATM')):
continue
resi_name = line[17:21].strip()
resi_num = int(line[22:26].strip())
if aa_code == 1:
resi_name = ThrLett_to_OneLett(resi_name)
resi_name = resi_name.capitalize() + str(resi_num + offset)
resi_map[resi_num + indexing - 1] = resi_name
f.close()
if not resi_map:
print("Warning: Could not extract residue information from prmtop or PDB file.\n")
print(" No residue number information will be presented.")
for i in range(10000):
resi_map[i] = str(i)
return resi_map
return resi_map
|
def strip_newline(block):
"""
Strips any blank lines from the beginning and end of the block.
:param block: The block to parse.
:return: The block w/o the proceeding/trailing blank lines
"""
start = 0
end = len(block)
for line in block:
if line.strip():
break
start += 1
# Early termination for empty block
if start == end:
return []
for line in reversed(block[start:]):
if line.strip():
break
end -= 1
return block[start: end]
|
def is_valid_integer_response(response):
"""
Returns true if a integer response is valid.
str -> bool
"""
try:
return int(response)
except ValueError:
return False
|
def region(lat, lon):
""" Return the SRTM1 region number of a given lat, lon.
Map of regions:
http://dds.cr.usgs.gov/srtm/version2_1/SRTM1/Region_definition.jpg
"""
if 38 <= lat and lat < 50 and -125 <= lon and lon < -111:
return 1
if 38 <= lat and lat < 50 and -111 <= lon and lon < -97:
return 2
if 38 <= lat and lat < 50 and -97 <= lon and lon < -83:
return 3
if 28 <= lat and lat < 38 and -123 <= lon and lon < -100:
return 4
if 25 <= lat and lat < 38 and -100 <= lon and lon < -83:
return 5
if 17 <= lat and lat < 48 and -83 <= lon and lon < -64:
return 6
if -15 <= lat and lat < 60 and ((172 <= lon and lon < 180) or (-180 <= lon and lon < -129)):
return 7
raise ValueError('Unknown location: %s, %s' % (lat, lon))
|
def divisor(baudrate):
"""Calculate the divisor for generating a given baudrate"""
CLOCK_HZ = 50e6
return round(CLOCK_HZ / baudrate)
|
def asset_france(gee_dir):
"""return the france asset available in our test account"""
return f"{gee_dir}/france"
|
def indent(string, prefix):
""" a backward compatible textwrap.indent replacement """
if not string:
return ""
return "\n".join(prefix + l for l in string.splitlines())
|
def AFL(tlv_objects_list):
"""AFL():
"""
return tlv_objects_list.get('94', None)
|
def comment(score, test):
"""Returns a comment on the complexity of text based on FRES and LIX."""
if (score > 90 and test == 'FRES') or (score < 25 and test == 'LIX'):
return ("Very easy to read", "5th grade")
elif (score > 80 and test == 'FRES') or (score < 30 and test == 'LIX'):
return ("Easy to read", "6th grade")
elif (score > 70 and test == 'FRES') or (score < 35 and test == 'LIX'):
return ("Fairly easy to read", "7th grade")
elif (score > 60 and test == 'FRES') or (score < 45 and test == 'LIX'):
return ("Plain English", "8th to 9th grade")
elif (score > 50 and test == 'FRES') or (score < 55 and test == 'LIX'):
return ("Fairly difficult to read", "10th to 12th grade")
elif (score > 30 and test == 'FRES') or (score < 60 and test == 'LIX'):
return ("Difficult to read", "College")
return ("Very difficult to read", "College graduate")
|
def get_command_line(pid):
"""
Given a PID, use the /proc interface to get the full command line for
the process. Return an empty string if the PID doesn't have an entry in
/proc.
"""
cmd = ''
try:
with open('/proc/%i/cmdline' % pid, 'r') as fh:
cmd = fh.read()
cmd = cmd.replace('\0', ' ')
fh.close()
except IOError:
pass
return cmd
|
def pretty_hex_str(byte_seq, separator=","):
"""Converts a squence of bytes to a string of hexadecimal numbers.
For instance, with the input tuple ``(255, 0, 10)``
this function will return the string ``"ff,00,0a"``.
:param bytes byte_seq: a sequence of bytes to process. It must be
compatible with the "bytes" type.
:param str separator: the string to be used to separate each byte in the
returned string (default ",").
"""
# Check the argument and convert it to "bytes" if necessary.
# This conversion assert "byte_seq" items are in range (0, 0xff).
# "TypeError" and "ValueError" are sent by the "bytes" constructor if
# necessary.
if isinstance(byte_seq, int):
byte_seq = bytes((byte_seq, ))
else:
byte_seq = bytes(byte_seq)
return separator.join(['%02x' % byte for byte in byte_seq])
|
def _value_is_type_text(val):
"""
val is a dictionary
:param val:
:return: True/False
"""
if ((u'type' in val.keys()) and
(val['type'].lower() == u"text")):
return True
return False
|
def word_score(word: str) -> int:
"""
A word's score is based on the length of the word:
1 point for 4-letter words, 5 points for 5-letter words,
6 points for 6-letter words, etc., plus a bonus of
7 points if the word contains 7 unique letters
"""
if len(word) == 4:
return 1
return len(word) + 7 * (len(set(word)) == 7)
|
def _remove_localizers_by_imagetype(dicoms):
"""
Search dicoms for localizers and delete them
"""
# Loop overall files and build dict
filtered_dicoms = []
for dicom_ in dicoms:
if 'ImageType' in dicom_ and 'LOCALIZER' in dicom_.ImageType:
continue
# 'Projection Image' are Localizers for CT only see MSMET-234
if 'CT' in dicom_.Modality and 'ImageType' in dicom_ and 'PROJECTION IMAGE' in dicom_.ImageType:
continue
filtered_dicoms.append(dicom_)
return filtered_dicoms
|
def exists(path):
"""
Check for file/URL existence with smart_open.
Recommended method at https://github.com/RaRe-Technologies/smart_open/issues/303
"""
try:
with open(path):
return True
except IOError:
return False
|
def organize_reads(design: list):
"""
Organize reads
"""
samples = []
for sample in design:
reads = ','.join(sample[3:])
samples.append(f'{sample[0]},{sample[1]}_rep{sample[2]},{reads}')
return '\n'.join(samples)
|
def fmeasure(precision, recall):
"""Computes f-measure given precision and recall values."""
if precision + recall > 0:
return 2 * precision * recall / (precision + recall)
else:
return 0.0
|
def evaluate_metrics(conf_matrix, label_filter=None):
"""
Evaluate Precision, Recall and F1 based on a confusion matrix as produced by `create_confusion_matrix`.
Args:
conf_matrix: a confusion matrix in form of a dictionary from `(gold_label,guess_label)` pairs to counts.
label_filter: a set of gold labels to consider. If set to `None` all labels are considered.
Returns:
Precision, Recall, F1, F0.5 triple.
"""
tp = 0
tn = 0
fp = 0
fn = 0
for (gold, guess), count in conf_matrix.items():
if label_filter is None or gold in label_filter or guess in label_filter:
if gold == 'None' and guess != gold:
fp += count
elif gold == 'None' and guess == gold:
tn += count
elif gold != 'None' and guess == gold:
tp += count
elif gold != 'None' and guess == 'None':
fn += count
else: # both gold and guess are not-None, but different
fp += count if label_filter is None or guess in label_filter else 0
fn += count if label_filter is None or gold in label_filter else 0
prec = tp / (tp + fp) if (tp + fp) > 0 else 0.0
recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
f1 = 2 * prec * recall / (prec + recall) if prec * recall > 0 else 0.0
f05 = ((1 + (0.5 ** 2)) * prec * recall) / (recall + (0.5 ** 2) * prec) if prec * recall > 0 else 0.0
return prec, recall, f1, f05
|
def chomp(text: str) -> str:
"""Remove any training spaces from string."""
return "\n".join([x.rstrip() for x in text.splitlines()])
|
def min(raw_metric):
"""
.. note:: Deprecated use `longMin`, `doubleMin' instead
"""
return {"type": "min", "fieldName": raw_metric}
|
def number_of_neighbours_alive(board_array, cell_neighbours):
""" Returns the number of live cell neighbours of the given cell.
Parameters:
board_array (list(list(int, int...))): The 2D list representing the grid of cells
cell_neighbours (list(int, int...)): A list of integers representing the indexes of
cells surrounding a given cell.
Returns:
int: The total number of neighbours that are alive.
"""
return sum(1 for cell in cell_neighbours if board_array[cell[0]][cell[1]] == 1)
|
def set_xpath(blockette, identifier):
"""
Returns an X-Path String to a blockette with the correct identifier.
"""
try:
identifier = int(identifier)
except Exception:
msg = 'X-Path identifier needs to be an integer.'
raise TypeError(msg)
abbr_path = '/xseed/abbreviation_dictionary_control_header/'
end_of_path = '[text()="%s"]/parent::*'
if blockette == 30:
return abbr_path + \
'data_format_dictionary/data_format_identifier_code' + \
end_of_path % identifier
elif blockette == 31:
return abbr_path + \
'comment_description/comment_code_key' + \
end_of_path % identifier
elif blockette == 33:
return abbr_path + \
'generic_abbreviation/abbreviation_lookup_code' + \
end_of_path % identifier
elif blockette == 34:
return abbr_path + \
'units_abbreviations/unit_lookup_code' + \
end_of_path % identifier
# All dictionary blockettes.
elif blockette == 'dictionary':
return abbr_path + \
'*/response_lookup_key' + \
end_of_path % identifier
msg = 'XPath for blockette %d not implemented yet.' % blockette
raise NotImplementedError(msg)
|
def indicator(cond):
"""Compute indicator function: cond ? 1 : 0"""
res = 1 if cond else 0
return res
|
def ReadFileAsLines(file_path):
"""Read a file as a series of lines."""
with open(file_path) as f:
return f.readlines()
|
def _fuzzy_match(match, search_string):
"""
Fuzzily matches match against search_string
e.g.: a_long_variable_name can be matched by:
alvn, aloname
but not:
a_bob, namebob, a_long_variable_name2
Args:
match |str| = The string to match against a list of strings
search_string |str| = The string to match
Returns:
|bln| = Whether or not the string matches
Raises:
|None|
"""
next_match = 0
for match_char in match:
next_match = search_string.find(match_char, next_match)
# Bail out if there are no more matches
if next_match < 0:
return False
return True
|
def mnormalize(matrix):
"""Scale a matrix according to the value in the center."""
width = len(matrix[0])
height = len(matrix)
factor = 1.0 / matrix[int(height / 2)][int(width / 2)]
if 1.0 == factor: return matrix
for i in range(height):
for j in range(width):
matrix[i][j] *= factor
return matrix
|
def have_dollar_symbol(l):
"""Check dollar is present"""
if "$" in str(l):
return 1
else:
return 0
|
def filter_by_frequency(query_dict, reference_dict, score_modifier = 1):
"""
Compare the frequence of terms (keys) in query dict to the ones in the
reference dict
If the term have an higher frequency in the query_dict than in the
reference_dict then we keep it, it must be specific to the field of our
corpus, if not we delete it
You can specify a score_modifier for a wider range in reference_dict
It must be something like: 5% --> 1.05
"""
keys_to_pop = list()
for key in query_dict.keys():
if key in reference_dict.keys():
if query_dict[key] <= score_modifier*reference_dict[key]:
keys_to_pop.append(key)
if len(keys_to_pop) > 0:
for key in keys_to_pop:
query_dict.pop(key, None)
return query_dict
|
def point_is_on_frontier(point, cell_points):
""" Takes a point as an argument (as an (x, y) tuple) and decides whether the point is on the
edge of the given cell or not; this is done by checking whether any of the surrounding points
are still in the cell or not. The cell is specified by a list of points in the form of (x, y)
tuples """
if (point[0] + 1, point[1]) not in cell_points or (point[0], point[1] + 1) not in cell_points \
or (point[0] - 1, point[1]) not in cell_points or (point[0], point[1] - 1) not in cell_points:
return True
else:
return False
|
def factorial(n):
""" factorial """
if n == 0:
return 1
return n * factorial(n - 1)
|
def append_missing_features(features, n_features):
"""Appends missing features to a list of features."""
all_features = range(n_features)
missing_features = [f for f in all_features if f not in features]
if len(missing_features):
features.append(missing_features)
return features
|
def temp_pressure(p_temp, h_temp):
"""
Use this function as :attr:`~SenseEnviron.temp_source` if you want
to read temperature from the pressure sensor only. This is the default.
"""
# pylint: disable=unused-argument
return p_temp
|
def render_dummy(title: str) -> str:
"""Render dummy markdown."""
return fr"""---
title: {title}
...
::: {{style="display:none"}}
$\,$
```c
```
:::
"""
|
def is_end_offset_none(end_offsets: dict, start_offsets: dict) -> bool:
"""
Utility function to check if the partition that has start offset has end offset too.
:param end_offsets: topic partition and end offsets
:param start_offsets:topic partition and start offsets
:return: True/False
"""
if len(end_offsets) == 0:
return True
for tp, offset in end_offsets.items():
if offset is None and start_offsets[tp] is not None:
return True
return False
|
def mditem(indexlst, array):
"""
MDITEM indexlist array
outputs the member of the multidimensional ``array`` selected by
the list of numbers ``indexlist``.
"""
while indexlst:
array = array[indexlst[0]]
indexlst = indexlst[1:]
return array
|
def is_valid_sender(sender):
"""Test if the sender config option is valid."""
length = len(sender)
if length > 1:
if sender[0] == '+':
return sender[1:].isdigit()
elif length <= 11:
return sender.isalpha()
return False
|
def sum_to(options, goal, picks, force_take_all=False):
"""Returns numbers that sum as close to a goal value as possible.
options -- the numbers to select from
goal -- the value to attempt to sum to
picks -- the length of the list to be returned"""
selected = []
if not picks:
selected = [0]
elif force_take_all and goal < min(options):
selected = [goal]
else:
while goal >= min(options) and picks > 0:
selected.append(min((x for x in options if x <= goal),
key=lambda x: abs(x - goal/picks - 0.1)))
goal -= selected[-1]
picks -= 1
if force_take_all and max(options) - selected[-1] >= goal:
selected[-1] += goal
return selected
|
def division(a, b):
"""
Divides one operator from another one
Parameters:
a (int): First value
b (int): Second value
Returns:
int: division result
"""
if b == 0:
raise ValueError('Can not divide by zero')
return a / b
|
def is_perfect_match(dict_a, dict_b):
"""Look for matching nsrid"""
try:
if dict_a['no-barnehage:nsrid'] == dict_b['no-barnehage:nsrid']:
return True
except KeyError:
pass
return False
# """A perfect match is in this case that all keys in a match those in b"""
# for key in dict_a:
# try:
# if dict_a[key] != dict_b[key]:
# return False
# except KeyError:
# return False
# # no breaks
# return True
|
def get_matches(lf, candidate_set, match_values=[1,-1]):
"""
A simple helper function to see how many matches (non-zero by default) an LF gets.
Returns the matched set, which can then be directly put into the Viewer.
"""
matches = []
for c in candidate_set:
label = lf(c)
if label in match_values:
matches.append(c)
print("%s matches" % len(matches))
return matches
|
def smart_city_event_data(event_dict):
"""
Assembles smart_city event relevant data for transmission to smart_city cloud endpoint
params:
event_dict which contains event_type, speed, gps-coordinates, and time information
returns:
a dictionary that captures all the information that an insurance event must contain per the insurance cloud endpoint
"""
return {
"event_type": event_dict["EVENT_TYPE"],
"speed": event_dict["SPEED"],
"event_timestamp": event_dict["TIME"] ,
"gps_coord": event_dict["GPS"]
}
|
def serialize_uint32(n: int) -> bytes:
"""
Serialize an unsigned integer ``n`` as 4 bytes (32 bits) in big-endian
order.
Corresponds directly to the "ser_32(i)" function in BIP32
(https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki#conventions).
:param n: The integer to be serialized.
:return: A byte sequence containing the serialization of ``n``.
"""
return n.to_bytes(4, 'big')
|
def create_in_term_for_db_queries(values, as_string=False):
"""
Utility method converting a collection of values (iterable) into a tuple
that can be used for an IN statement in a DB query.
:param as_string: If *True* the values of the list will be surrounded
by quoting.
:type as_string: :class:`bool`
:default as_string: *False*
"""
as_string_list = values
if as_string:
as_string_list = []
for value in values:
string_value = '\'%s\'' % (value)
as_string_list.append(string_value)
tuple_content = ', '.join(str(i) for i in as_string_list)
return '(%s)' % (tuple_content)
|
def get_method(cls, name):
""" python3 doesn't need the __func__ to get the func of the
method.
"""
method = getattr(cls, name)
if hasattr(method, "__func__"):
method = method.__func__
return method
|
def lineSegmentsIntersect(p1, p2, q1, q2):
"""Checks if two line segments intersect.
Keyword arguments:
p1 -- The start vertex of the first line segment.
p2 -- The end vertex of the first line segment.
q1 -- The start vertex of the second line segment.
q2 -- The end vertex of the second line segment.
Returns:
True if the two line segments intersect
"""
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
da = q2[0] - q1[0]
db = q2[1] - q1[1]
# segments are parallel
if (da*dy - db*dx) == 0:
return False
s = (dx * (q1[1] - p1[1]) + dy * (p1[0] - q1[0])) / (da * dy - db * dx)
t = (da * (p1[1] - q1[1]) + db * (q1[0] - p1[0])) / (db * dx - da * dy)
return s >= 0 and s <= 1 and t >= 0 and t <= 1
|
def validate_chunk_width(chunk_width):
"""Validate a chunk-width string , returns boolean.
Expected to be a string representing either an integer, like '20',
or a comma-separated list of integers like '20,30,16'"""
if not isinstance(chunk_width, str):
return False
a = chunk_width.split(",")
assert len(a) != 0 # would be code error
for elem in a:
try:
i = int(elem)
if i < 1:
return False
except:
return False
return True
|
def cquote(x):
"""enquote a string with c rules"""
return str(x).replace('\\','\\\\').replace('"','\\"')
|
def two_col_list(m) -> str:
"""Create two col css list from dict, used in reports
Parameters
----------
m : dict
Dict to convert\n
"""
body = ''
for name, val in m.items():
body = f'{body}<li><span>{name}:</span><span>{val}</span></li>'
return f'<ul class="two_col_list_narrow">{body}</ul>'
|
def reverse_list(lst):
"""
Returns the reversed form of a given list.
Parameters
----------
lst : list
Input list.
Returns
-------
reversed_list : list
Reversed input list.
Examples
--------
>>> lst = [5, 4, 7, 2]
>>> reverse_list(lst)
[2, 7, 4, 5]
"""
reversed_list = lst[::-1]
return reversed_list
|
def subdict(d, keep=None, drop=None):
"""Compute the "subdictionary" of a dict, *d*.
A subdict is to a dict what a subset is a to set. If *A* is a
subdict of *B*, that means that all keys of *A* are present in
*B*.
Returns a new dict with any keys in *drop* removed, and any keys
in *keep* still present, provided they were in the original
dict. *keep* defaults to all keys, *drop* defaults to empty, so
without one of these arguments, calling this function is
equivalent to calling ``dict()``.
>>> from pprint import pprint as pp
>>> pp(subdict({'a': 1, 'b': 2}))
{'a': 1, 'b': 2}
>>> subdict({'a': 1, 'b': 2, 'c': 3}, drop=['b', 'c'])
{'a': 1}
>>> pp(subdict({'a': 1, 'b': 2, 'c': 3}, keep=['a', 'c']))
{'a': 1, 'c': 3}
"""
if keep is None:
keep = d.keys()
if drop is None:
drop = []
keys = set(keep) - set(drop)
return type(d)([(k, v) for k, v in d.items() if k in keys])
|
def update(value, iterable=None, **kwargs):
"""
Insert ``key: value`` pairs into the report
:param value: data chunk to operator on
:type value: :py:class:`dict` or :py:class:`~collections.Mapping`
:param iterable: iterable of ``(key, value)`` pairs
:type iterable: iterable[(str, T)]
:param kwargs: explicit ``key=value`` parameters
"""
value = value.copy()
if iterable:
value.update(iterable, **kwargs)
else:
value.update(**kwargs)
return value
|
def set_dict_keys(d: dict, key_list: list):
"""Return a new dictionary with specified key order"""
assert set(d) == set(key_list)
return {k: d[k] for k in key_list}
|
def use_node_def_or_num(given_value, default_func):
"""Transform a value of type (None, int, float, Callable) to a node annotation function."""
# Default: use pre-defined function from this module
if given_value is None:
func = default_func
# Transform: value to function that returns the value
elif isinstance(given_value, (int, float)):
given_value = float(given_value)
def func(atom):
return given_value
# Passthrough: value itself is a function
else:
func = given_value
return func
|
def sort_array_with_id(arr):
"""
Sort array ascending and keep track of ids.
:param arr: array with values
:return: array with tuples (id, val)
"""
tuple_arr = [(id, arr[id]) for id in range(len(arr))]
return sorted(tuple_arr, key=lambda t: t[1])
|
def bio_tagging_fix(pred_tags):
"""
Fix BIO tagging mistakes
:param pred_tags: list of predicted tags
:return: list of fixed tags
"""
start = 0
while start < len(pred_tags) and pred_tags[start][0] == 'I':
pred_tags[start] = 'B-other'
start += 1
for index in range(start, len(pred_tags) - 1):
if pred_tags[index - 1][0] == 'I' \
and (pred_tags[index - 1] == pred_tags[index + 1]) \
and (pred_tags[index - 1][2:] != pred_tags[index][2:]):
pred_tags[index] = pred_tags[index - 1]
return pred_tags
|
def factor_idx2compared_f_values(idx,k,nr_of_sim_parameters,proposed_worstcase):
"""p-rint('factor_idx2compared_f_values(idx,k,nr_of_sim_parameters,proposed_worstcase)')
p-rint('proposed_worstcase {}'.format(proposed_worstcase))
p-rint('len(proposed_worstcase) {}'.format(len(proposed_worstcase)))"""
factor_checks_if_exp_is_the_arg_max_for_a_sim_par=False
sim_idx=idx%nr_of_sim_parameters#idx pairs is order in row-major order
if proposed_worstcase[idx]>0:
#the experiment is the proposed maximum,
#the factor has to check if k is the minimum
l=k
u=idx
else:
#the factor has to check if idx is the smaller the the max for this simulation parameter
l=idx
#We search the corresponding claimed worst case experiment
u=sim_idx
while proposed_worstcase[u]==0:
u=u+nr_of_sim_parameters
factor_checks_if_exp_is_the_arg_max_for_a_sim_par=True
if l==u:
l=None
u=None
return l,u, factor_checks_if_exp_is_the_arg_max_for_a_sim_par
|
def __zero_forward_closed(x, y, c, l):
"""convert coordinates to zero-based, both strand, open/closed coordinates.
Parameters are from, to, is_positive_strand, length of contig.
"""
y += 1
if not c: x, y = l - y, l - x
return x, y
|
def almost_equal(a, b, places=3):
"""
Returns True or False if the number a and b are are equal inside the
decimal places "places".
:param a: a real number
:type a: int/float
:param b: a real number
:type b: int/float
:param places: number of decimal places
:type places: int
>>> almost_equal(1.0001, 1.0005)
True
>>> almost_equal(1.1, 1.0005)
False
>>> almost_equal(2, 1)
False
"""
return round(abs(a - b), places) == 0
|
def split_time_value(sec):
"""Split a time value from seconds to hours / minutes / seconds.
Parameters
----------
sec : float
Time value, in seconds.
Returns
-------
hours, minutes, seconds : float
Time value, split up into hours, minutes, and seconds.
"""
minutes, seconds = divmod(sec, 60)
hours, minutes = divmod(minutes, 60)
return hours, minutes, seconds
|
def union_list(*lists) -> list:
"""
Union elements in all given lists.
"""
l = list(set.union(*[set(lst) for lst in lists]))
l.sort()
return l
|
def del_start_stop(x, start, stop):
"""
>>> l = [1,2,3,4]
>>> del_start_stop(l, 0, 2)
[3, 4]
>>> l
[3, 4]
>>> l = [1,2,3,4,5,6,7]
>>> del_start_stop(l, -1, -20)
[1, 2, 3, 4, 5, 6, 7]
>>> del_start_stop(l, -20, -8)
[1, 2, 3, 4, 5, 6, 7]
>>> del_start_stop(l, -6, -4)
[1, 4, 5, 6, 7]
>>> del_start_stop(l, -20, -2)
[6, 7]
>>> l
[6, 7]
>>> del_start_stop(l, -2, 1)
[7]
>>> del_start_stop(l, -2, 3)
[]
>>> del_start_stop(l, 2, 4)
[]
>>> del_start_stop([1,2,3,4], 20, -20)
[1, 2, 3, 4]
>>> del_start_stop([1,2,3,4], 0, 0)
[1, 2, 3, 4]
"""
del x[start:stop]
return x
|
def value(fn_or_value, *args, **kwargs):
"""
Evaluate argument, if it is a function or return it otherwise.
Args:
fn_or_value:
Callable or some other value. If input is a callable, call it with
the provided arguments and return. Otherwise, simply return.
Examples:
>>> sk.value(42)
42
>>> sk.value(lambda: 42)
42
"""
if callable(fn_or_value):
return fn_or_value(*args, **kwargs)
return fn_or_value
|
def is_subsequence(needle, haystack):
"""Are all the elements of needle contained in haystack, and in the same order?
There may be other elements interspersed throughout"""
it = iter(haystack)
for element in needle:
if element not in it:
return False
return True
|
def escape_filename_sh_ansic(name):
"""Return an ansi-c shell-escaped version of a filename."""
out =[]
# gather the escaped characters into a list
for ch in name:
if ord(ch) < 32:
out.append("\\x%02x"% ord(ch))
elif ch == '\\':
out.append('\\\\')
else:
out.append(ch)
# slap them back together in an ansi-c quote $'...'
return "$'" + "".join(out) + "'"
|
def _file_name_builder(test_name):
"""Build a name for the output file."""
try:
file_name = (test_name + ".dat").replace(' ', '_')
except TypeError:
file_name = ("NoName" + ".dat")
return file_name
|
def latin1_to_ascii(unicrap):
"""This takes a UNICODE string and replaces Latin-1 characters with
something equivalent in 7-bit ASCII. It returns a plain ASCII string.
This function makes a best effort to convert Latin-1 characters into
ASCII equivalents. It does not just strip out the Latin-1 characters.
All characters in the standard 7-bit ASCII range are preserved.
In the 8th bit range all the Latin-1 accented letters are converted
to unaccented equivalents. Most symbol characters are converted to
something meaningful. Anything not converted is deleted.
"""
xlate = {0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',
0xc6: 'Ae', 0xc7: 'C',
0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E',
0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',
0xd0: 'Th', 0xd1: 'N',
0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O', 0x152: 'Oe',
0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',
0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',
0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',
0xe6: 'ae', 0xe7: 'c',
0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e',
0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',
0xf0: 'th', 0xf1: 'n',
0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o', 0x153: 'oe',
0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',
0xfd: 'y', 0xfe: 'th', 0xff: 'y',
0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',
0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',
0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',
0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',
0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: "'",
0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',
0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',
0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',
0xd7: '*', 0xf7: '/'
}
r = ''
for i in unicrap:
if ord(i) in xlate:
r += xlate[ord(i)]
elif ord(i) >= 0x80:
pass
else:
r += str(i)
return r
|
def url2id(url):
"""Extract the ID from a URL"""
return int(url.split("/")[-2])
|
def command_string(command_dict, command_number=None, numbers=True,
time=True):
"""Create the string representation for the command entry.
inputs:
command_dict: A command dictionary of the form {"cmd": "<command",
"ts": <timestamp>}. The "ts" arugment is optional.
numbers: If true and comamnd_number is not None, then include the
line numhber. If false, do not include the line number.
time: If true, then include the timestamp. If false, do not
include the timestamp.
returns:
A string represenation of the command.
"""
string_rep = ""
if numbers:
string_rep += str(command_number) + ": "
string_rep += command_dict["cmd"] + "\n"
if time:
string_rep += command_dict.get("ts", "<timestamp> missing")
string_rep += "\n"
return string_rep
|
def is_number(s):
"""
Checks if the parameter s is a number
:param s: anything
:return: true if s is a number, false otherwise
"""
try:
float(s)
return True
except ValueError:
return False
|
def test(predictions, labels, k=1):
"""
Return precision and recall modeled after fasttext's test
"""
precision = 0.0
nexamples = 0
nlabels = 0
for prediction, labels in zip(predictions, labels):
for p in prediction:
if p in labels:
precision += 1
nexamples += 1
nlabels += len(labels)
return (precision / (k * nexamples), precision / nlabels)
|
def bool_to_str(bool_val):
"""convert boolean to strings for YAML configuration"""
if not isinstance(bool_val, bool):
raise TypeError("The bool_val is required to be boolean type")
return "true" if bool_val else "false"
|
def beam_curv(z, z0):
"""
doc
"""
return z + z0**2 / z
|
def _get_wanted_channels(wanted_sig_names, record_sig_names, pad=False):
"""
Given some wanted signal names, and the signal names contained in a
record, return the indices of the record channels that intersect.
Parameters
----------
wanted_sig_names : list
List of desired signal name strings
record_sig_names : list
List of signal names for a single record
pad : bool, optional
Whether the output channels is to always have the same number
of elements and the wanted channels. If True, pads missing
signals with None.
Returns
-------
list
The indices of the wanted record channel names.
"""
if pad:
return [
record_sig_names.index(s) if s in record_sig_names else None
for s in wanted_sig_names
]
else:
return [
record_sig_names.index(s)
for s in wanted_sig_names
if s in record_sig_names
]
|
def splitword(word, start):
"""Split a starting string of from a word, return in tuple-form"""
return [start, word[len(start):]]
|
def get_formatted_issue(repo, issue, title, url):
"""
Single place to adjust formatting output of PR data
"""
# Newline support writelines() call which doesn't add newlines
# on its own
return("* {}/{}: [{}]({})\n".format(repo, issue, title, url))
|
def GetSettingNames():
""" Return list of dictionary keys of selected settings and results. """
return ['active_channels']
|
def get_all_determinizations_comb(determinizations_of_all_effects):
"""
Generates all possible combinations of the given list of lists of
probabilistic effects determinizations.
Each element of the input variable is a tuple with all the determinizations of
a given probabilistic effect, as returned by function
"get_all_determinizations_effect".
The method returns a list with all the possible combinations of these
determinizations, in the same order they are given.
So, for example, if the input is:
((action_0, (0, det_0_0), ..., (k_0, det_0_2)),
(action_1, (0, det_1_0), ..., (k_0, det_1_1)))
The method will return the following list of 6 combinations:
( ((action_0, (0, det_0_0)), (action_1, (0, det_1_0))),
((action_0, (0, det_0_0)), (action_1, (0, det_1_1))),
...,
((action_0, (2, det_0_2)), (action_1, (0, det_1_0))),
((action_0, (2, det_0_2)), (action_1, (0, det_1_1))) )
"""
# Base case for the recursion, only determinizations for one effect.
all_determinizations = []
if len(determinizations_of_all_effects) == 1:
# Note that determinizations_of_all_effects[0] is a tuple:
# (action_name, all_determinizations_of_the_actions_effect)
for determinization in determinizations_of_all_effects[0][1]:
all_determinizations.append([(determinizations_of_all_effects[0][0],
determinization)])
return all_determinizations
# We do this recursively by generating all combinations of effects from the
# second effect onwards, then generating all combinations of the first
# effect's determinization with the resulting list.
remaining_determinizations_comb = (
get_all_determinizations_comb(determinizations_of_all_effects[1:]))
for effect_determinization in determinizations_of_all_effects[0][1]:
for remaining_effects_determinization in remaining_determinizations_comb:
determinization = [(determinizations_of_all_effects[0][0],
effect_determinization)]
determinization.extend(remaining_effects_determinization)
all_determinizations.append(determinization)
return all_determinizations
|
def compute_average_precision(positive_ranks):
"""
Extracted from: https://github.com/tensorflow/models/blob/master/research/delf/delf/python/detect_to_retrieve/dataset.py
Computes average precision according to dataset convention.
It assumes that `positive_ranks` contains the ranks for all expected positive
index images to be retrieved. If `positive_ranks` is empty, returns
`average_precision` = 0.
Note that average precision computation here does NOT use the finite sum
method (see
https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision)
which is common in information retrieval literature. Instead, the method
implemented here integrates over the precision-recall curve by averaging two
adjacent precision points, then multiplying by the recall step. This is the
convention for the Revisited Oxford/Paris datasets.
Args:
positive_ranks: Sorted 1D NumPy integer array, zero-indexed.
Returns:
average_precision: Float.
"""
average_precision = 0.0
num_expected_positives = len(positive_ranks)
if not num_expected_positives:
return average_precision
recall_step = 1.0 / num_expected_positives
for i, rank in enumerate(positive_ranks):
if not rank:
left_precision = 1.0
else:
left_precision = i / rank
right_precision = (i + 1) / (rank + 1)
average_precision += (left_precision + right_precision) * recall_step / 2
return average_precision
|
def real_space_pixel_scales_tag_from_real_space_pixel_scales(real_space_pixel_scales):
"""Generate a sub-grid tag, to customize phase names based on the sub-grid size used.
This changes the phase name 'phase_name' as follows:
real_space_pixel_scales = None -> phase_name
real_space_pixel_scales = 1 -> phase_name_real_space_pixel_scales_2
real_space_pixel_scales = 4 -> phase_name_real_space_pixel_scales_4
"""
if real_space_pixel_scales is None:
return ""
y = "{0:.2f}".format(real_space_pixel_scales[0])
x = "{0:.2f}".format(real_space_pixel_scales[1])
return "__rs_pix_" + y + "x" + x
|
def generate_discovery_cache_key(name, ext):
""" Generate cache key for office web app hosting discovery
name: Operations that you can perform on an Office document
ext: The file formats that are supported for the action
"""
return 'wopi_' + name + '_' + ext
|
def json_pointer(*args):
"""
Get a list of strings and int and template them as a "json path"
exemple:
{% image_field "people" 0 'picture' as field %}
will return
people/0/picture
:param args: A list of string and int
:return: a "json pointer
"""
return "/".join([str(a) for a in args])
|
def verify_yolo_hyperparams(yolo_layers):
"""
----------
Author: Damon Gwinn (gwinndr)
----------
- Verifies hyperparams that should be the same across yolo layers (like nms_kind) are the same
- Returns True if all is good, False if all is not good
----------
"""
if(type(yolo_layers) not in [list,tuple]):
return True
if(len(yolo_layers) > 1):
ref_layer = yolo_layers[0]
for yolo_layer in yolo_layers[1:]:
is_bad = False
if(yolo_layer.nms_kind != ref_layer.nms_kind):
print("verify_yolo_hyperparams: Error: nms_kind not consistent across all yolo layers")
is_bad = True
if(yolo_layer.jitter != ref_layer.jitter):
print("verify_yolo_hyperparams: Error: jitter not consistent across all yolo layers")
is_bad = True
if(yolo_layer.random != ref_layer.random):
print("verify_yolo_hyperparams: Error: random not consistent across all yolo layers")
is_bad = True
if(is_bad):
return False
return True
|
def listtoslides(data):
"""Checks if format is correct + adds img and durration elements"""
slides = []
for slide in data:
slide = slide[:2]
slide[0] = slide[0][:25]
slide[1] = slide[1][:180]
slide.append("imgpath")
slide.append(0)
slides.append(slide)
return slides
|
def average_gateset_infidelity(modelA, modelB):
""" Average model infidelity """
# B is target model usually but must be "modelB" b/c of decorator coding...
#TEMPORARILY disabled b/c RB analysis is broken
#from ..extras.rb import theory as _rbtheory
return -1.0
|
def dash(*txts):
"""Join non-empty txts by a dash."""
return " -- ".join([t for t in txts if t != ""])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.