content
stringlengths 42
6.51k
|
---|
def iou(box1, box2):
"""Implement the intersection over union (IoU) between box1 and box2
Arguments:
box1 -- first box, list object with coordinates (x1, y1, x2, y2)
box2 -- second box, list object with coordinates (x1, y1, x2, y2)
"""
xi1 = max(box1[0], box2[0])
yi1 = max(box1[1], box2[1])
xi2 = min(box1[2], box2[2])
yi2 = min(box1[3], box2[3])
inter_area = (xi2 - xi1)*(yi2 - yi1)
# Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)
box1_area = (box1[3] - box1[1])*(box1[2]- box1[0])
box2_area = (box2[3] - box2[1])*(box2[2]- box2[0])
union_area = (box1_area + box2_area) - inter_area
# compute the IoU
iou = inter_area / union_area
return iou
|
def delete_stream(data_type, stream_id):
"""
Construct a 'deleteStream' message to delete a closed stream between the client and server, on the server.
:param data_type: int the RTMP datatype.
:param stream_id: int the stream which the message will be sent on.
"""
msg = {'msg': data_type,
'stream_id': stream_id,
'command': [u'deleteStream', 0, None, stream_id]}
return msg
|
def bytes_to_mb(B) -> float:
"""Return the given bytes as a human friendly KB, MB, GB, or TB string."""
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
return float("{0:.5f}".format(B / MB))
|
def flatten_edge_dict(edge, src_col: str = 'src', dst_col: str = 'dst'):
"""
Convert gremlin vertex (in dict form) to flat dict appropriate for pandas
- Metadata names take priority over property names
- Remap: T.inV, T.outV -> inV, outV (Neptune)
- Drop field 'type'
- src_col and dst_col define output column name for metadata entries
"""
d = {}
props = {}
for k in edge.keys():
if k == 'type':
continue
if k == 'inV':
d[src_col] = edge[k]
continue
if k == 'outV':
d[dst_col] = edge[k]
continue
if k == 'IN' and isinstance(edge[k], dict):
d[src_col] = edge[k]['id']
continue
if k == 'OUT' and isinstance(edge[k], dict):
d[dst_col] = edge[k]['id']
continue
v = edge[k]
if isinstance(v, list):
d[str(k)] = v[0]
continue
if k == 'properties' and isinstance(v, dict):
for prop_k in v:
if prop_k == src_col or prop_k == dst_col:
continue
v2 = v[prop_k]
if isinstance(v2, list) and (len(v2) == 1) and isinstance(v2[0], dict) and 'id' in v2[0] and 'value' in v2[0]:
props[str(prop_k)] = v2[0]['value']
continue
props[str(prop_k)] = str(v2)
continue
d[str(k)] = edge[k]
if len(props.keys()) > 0:
d = {**props, **d}
return d
|
def bucks(integer):
"""Format number"""
return '{:,}'.format(integer).replace(',', '.')
|
def tp(x0,U,lam=2,L=3):
"""
tp means (t)ime to escape right with (p)ositive initial vel.
"""
return ((2*(L - x0))/U + ((L - x0)*(L + x0)*lam)/U**2
+ L/(U + L*lam) - x0/(U + x0*lam))/3.
|
def makeFilesKeyword(
cmdr,
fileName,
host = "localhost",
commonRoot = "/tmp",
progSubDir = "/prog/date",
userSubDir = "/user/",
):
"""Return a files keyword with data
Fields are:
- cmdr (progID.username)
- host
- common root directory
- program and date subdirectory
- user subdirectory
- file name(s)
"""
return "agileFiles=%s, %s, %r, %r, %r, %r" % (cmdr, host, commonRoot, progSubDir, userSubDir, fileName)
|
def osm_count_sql(grid, osm, cat):
"""
Returns a sql fragment that counts OSM point features per grid cell by
category
Args:
grid (string): a PostGIS table name containing the grid
osm (string): a PostGIS table name containing the OSM point features
cat (string): a PostGIS table containing the OSM point features'
categories
Returns:
sql (string): an sql fragment
"""
sql = ("SELECT"
" SUM(CASE WHEN cat.cat = 1 THEN 1 ELSE 0 END) AS pointCult,"
" SUM(CASE WHEN cat.cat = 2 THEN 1 ELSE 0 END) AS pointIndus,"
" SUM(CASE WHEN cat.cat = 3 THEN 1 ELSE 0 END) AS pointNat,"
" SUM(CASE WHEN cat.cat = 4 THEN 1 ELSE 0 END) AS pointStruct,"
" SUM(CASE WHEN cat.cat = 0 THEN 1 ELSE 0 END) AS pointMisc,"
" grid.id AS id"
" FROM %s as grid,"
" %s AS osm,"
" %s AS cat"
" WHERE cat.osm_id = osm.osm_id AND ST_Intersects(grid.cell, osm.way)"
" GROUP BY"
" id")
return sql % (grid, osm, cat)
|
def powerlaw_sample(prob, alpha, minval, maxval):
"""
"""
# PDF = X_0 x^-alpha
# CDF = integral(X_0 x^-alpha,xmin,x) = [X_0 / (-alpha+1) x^(-alpha+1)]_xmin^xmax
# 1 = X_0/(1-alpha)*(xmax^(1-alpha)-xmin^(1-alpha))
X0 = (1.-alpha)/(maxval**(1.-alpha)-minval**(1.-alpha))
#cdf = X0/(1.-alpha) * (x**(1.-alpha)-minval**(1.-alpha))
#return cdf
x = ((1.-prob)*(1.-alpha)/X0 + minval**(1.-alpha))**(1./(1.-alpha))
return x
|
def get_bit(x, n):
""" Get the n'th bit of integer x (from little end)."""
return (x&(0x01 << n)) >> n
|
def PI(improvements):
"""
Format a string of percent improvements.
"""
Z = sum(improvements)
if Z == 0:
return f"[{0.0:6.2f},{0.0:6.2f},{0.0:6.2f}]"
z = improvements/Z
return "[" + ",".join(f"{x*100.0:6.2f}" for x in z) + "]"
|
def remove_uptake_and_secretion_reactions(reactions):
"""
Remove all the uptake and secretion reactions added to a model, eg. when you are running multiple simulations.
:param reactions: The reactions dict
:type reactions: dict
:return: The enzymes, compounds, and reactions data structure
:rtype: dict
"""
toremove = set()
for r in reactions:
if r.startswith("UPTAKE_SECRETION_REACTION"):
toremove.add(r)
for r in toremove:
reactions.pop(r)
return reactions
|
def _tester(func, *args):
"""
Tests function on arguments and returns first positive.
:param func: function(arg)->boolean
:param args: other arguments
:return: something or none
"""
for arg in args:
if arg is not None and func(arg):
return arg
return None
|
def remainder(a,b):
"""Returns the remainder of a divided by b*
*Python uses different modulo calculation than C++ - this is the C++ one
a - (float), b - (float)
return - (float)
"""
return a - int(a/b) * b
|
def make_display_name(gene_name, allele_name):
""" Make a combined gene/allele name, e.g., TRAV1-1*01 """
if allele_name is None:
return gene_name
else:
return gene_name + "*" + allele_name
|
def set_dict(_dict, _key, _value):
"""
>>> _dict = {}
>>> _key = 'a.b.c' or ['a', 'b', 'c']
>>> _value = 1
{
'a': {
'b': {
'c': 1
}
}
}
}
"""
if not isinstance(_key, list):
_key = _key.split('.')
length = len(_key)
if length <= 0:
return _dict
if length == 1:
_dict[_key[0]] = _value
return _dict
i = 0
temp_dict = _dict
while i <= length - 1:
if _key[i] not in temp_dict:
if length - i > 1:
temp_dict[_key[i]] = {}
else:
temp_dict[_key[i]] = _value
temp_dict = temp_dict[_key[i]]
i += 1
return _dict
|
def SumShares(shares):
"""Sums the shares in a stack of purchases."""
net = 0
for (date, units) in shares:
net += units
return net
|
def generate_running_scan_job_dict(input_dict):
"""
Generate the running scan job dictionary
"""
running_jobs = {}
if not "running_jobs" in input_dict.keys():
return running_jobs
for species, jobs in input_dict["running_jobs"].iteritems():
running_jobs[species] = {}
for job in jobs:
if job["job_type"] == "scan":
running_jobs[species][job["job_name"]]=job["pivots"]
if not running_jobs[species]:
running_jobs.pop(species, None)
return running_jobs
|
def CleanLine(line):
"""Converts a line from coolrunning results to a tuple of values."""
t = line.split()
if len(t) < 6:
return None
place, divtot, div, gun, net, pace = t[0:6]
if not '/' in divtot:
return None
for time in [gun, net, pace]:
if ':' not in time:
return None
return place, divtot, div, gun, net, pace
|
def get_max_id(corpus):
"""Get the highest feature id that appears in the corpus.
Parameters
----------
corpus : iterable of iterable of (int, numeric)
Collection of texts in BoW format.
Returns
------
int
Highest feature id.
Notes
-----
For empty `corpus` return -1.
"""
maxid = -1
for document in corpus:
if document:
maxid = max(maxid, max(fieldid for fieldid, _ in document))
return maxid
|
def get_query_string(request, key):
"""
Return calculated values for some query string keys, else return value.
"""
if not request:
return
if key == "paginated":
paginated = request.GET.get("paginated")
if paginated == u"false":
return False
else:
return True
elif key == "search" and request.method == "POST":
return request.POST.get("search", "")
elif key == "costs": # plot
costs = request.GET.get("costs")
if costs:
costs = costs.split(" ")
else:
costs = []
costs = [i.split(",") for i in costs]
return costs
elif key == "grosses": # plot
grosses = request.GET.get("grosses")
if grosses:
grosses = grosses.split(" ")
else:
grosses = []
grosses = [i.split(",") for i in grosses]
return grosses
elif key == "nets": # plot
nets = request.GET.get("nets")
if nets:
nets = nets.split(" ")
else:
nets = []
nets = [i.split(",") for i in nets]
return nets
elif key == "checkbox":
query_checkbox = {}
query_checkbox_active = request.POST.get("checkbox-active")
query_checkbox_subscribe = request.POST.get("checkbox-subscribe")
condition = ( # if any of these exist
query_checkbox_active == "on"
or query_checkbox_active == "off"
or query_checkbox_subscribe == "on"
or query_checkbox_subscribe == "off"
)
query_checkbox["active"] = query_checkbox_active
query_checkbox["subscribe"] = query_checkbox_subscribe
query_checkbox["condition"] = condition
return query_checkbox
elif key == "copy":
return request.POST.get("copy")
elif key == "delete":
return request.POST.get("delete")
elif key == "invoiced":
query_invoiced = {}
query_invoiced_state = request.POST.get("invoiced")
query_invoiced["state"] = query_invoiced_state
condition = ( # if either of these exist
query_invoiced_state == "true" or query_invoiced_state == "false"
)
query_invoiced["condition"] = condition
return query_invoiced
else:
return request.GET.get(key)
|
def connect_string(config):
"""return connect string including application_name"""
return "postgresql://" + config['username'] + ":" + config['password'] + "@" + \
config['db_url'] + "?application_name={}".format(config['ME'])
|
def range2peak(chrom, rang):
"""
Transform a peak from Python range object to string.
"""
return f'{chrom}:{rang.start}-{rang.stop-1}'
|
def get_left_strip(chonk):
"""
Compute the left vertical strip of a 2D list.
"""
return [chonk[_i][0] for _i in range(len(chonk))]
|
def calc_num_parameters(layers):
"""
Compute the total number of learnable parameters in the model
Arguments:
layers -- list of (n_uints, activations) pairs that define network structure, including input layer X
Returns:
ret -- total number of learnable parameters
"""
ret = 0
for l in range(1, len(layers)):
ret = ret + layers[l][0] * (layers[l - 1][0] + 2)
return ret
|
def extract_name_from_job_arn(arn):
"""Returns the name used in the API given a full ARN for a training job or
hyperparameter tuning job.
Args:
arn:
"""
slash_pos = arn.find("/")
if slash_pos == -1:
raise ValueError("Cannot parse invalid ARN: %s" % arn)
return arn[(slash_pos + 1) :]
|
def clean(str):
"""
Goes through the string and ensures that every char is a known character.
Keyword arguments:
str -- the string to be cleaned up.
Returns:
This function returns a clean String with bad characters removed
"""
outStr = ""
for i in range(len(str)):
if(str[i] in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@$%^&*()_+-=`~{}[]\|;:\'\\,./<>?\"\#"):
outStr += str[i]
return(outStr)
|
def prefix(string, string_prefix):
"""
Check to see if a prefix is present in a string
If not, add it
:param string: string - string to check against
:param string_prefix: string - prefix to check
"""
if string.startswith(string_prefix):
return string
else:
return '%s%s' % (string_prefix, string)
|
def clean(s):
"""Clean up a string"""
if s is None:
return None
s = s.replace("\n", " ")
s = s.replace(" ", " ")
s = s.strip()
return s
|
def brix_to_sg(brix):
"""
Degrees Brix to Specific Gravity
:param float brix: Degrees Brix
:return: Specific Gravity
:rtype: float
Source:
* http://www.brewersfriend.com/brix-converter/
"""
return (brix / (258.6 - ((brix / 258.2) * 227.1))) + 1
|
def check_if_california(zip_code):
"""Checks if the zip code is in California or not.
California zip codes are between 90000 and 96100
Args:
zip_code (): [description]
Returns:
boolian: if the zip code is in california or not
"""
zip_three_digits = ((zip_code[0:3]))
zip_three_digits = int(zip_three_digits)
# True = in California
if 899 < zip_three_digits < 962:
return True
# False = not in California
else:
return False
|
def generic_strategy_wrapper(player, opponent, proposed_action, *args, **kwargs):
"""
Strategy wrapper functions should be of the following form.
Parameters
----------
player: Player object or subclass (self)
opponent: Player object or subclass
proposed_action: an axelrod.Action, C or D
The proposed action by the wrapped strategy
proposed_action = Player.strategy(...)
args, kwargs:
Any additional arguments that you need.
Returns
-------
action: an axelrod.Action, C or D
"""
# This example just passes through the proposed_action
return proposed_action
|
def IoU(boxA, boxB):
"""
Computes the Intersection over Union (IoU) metric, given
two bounding boxes.
Input:
"boxA": bounding box A
"boxB": bounding box B
Output:
"score": IoU score
"""
# Compute the intersection points of the two BBs
xLeft = max(boxA[0], boxB[0])
yLeft = max(boxA[1], boxB[1])
xRight = min(boxA[2], boxB[2])
yRight = min(boxA[3], boxB[3])
# Compute the area of the intersection rectangle
interArea = max(0, xRight - xLeft + 1) * max(0, yRight - yLeft + 1)
# Compute the area of both boxA and boxB rectangles
boxA_area = (boxA[2]-boxA[0] + 1)*(boxA[3]-boxA[1] + 1)
boxB_area = (boxB[2]-boxB[0] + 1)*(boxB[3]-boxB[1] + 1)
# Compute the intersection over union
score = interArea / float(boxA_area + boxB_area - interArea)
return score
|
def pd_convert_nested_name(col):
"""Convert nested name from kartverket geoJSON to simple"""
return col[0]["navn"]
|
def is_job_flow_done(job_flow):
"""Return True if the given job flow is done running."""
return hasattr(job_flow, 'enddatetime')
|
def parse_line(line: str) -> str:
"""Break up the line based on the semi-colon w/ space & grab right split"""
return line.split(": ")[1].strip()
|
def preprocessing(s):
"""
Returns string where \\ is replaced by \\\ so that latex new lines work
"""
return s.replace(r'\\', r'\\\\')
|
def get_arch_prefix(arch):
""" return archive table prefix """
if arch:
return "arch_"
else:
return ""
|
def rectangles_from_histogram(H):
"""Largest Rectangular Area in a Histogram
:param H: histogram table
:returns: area, left, height, right, rect. is [0, height] * [left, right)
:complexity: linear
"""
best = (float('-inf'), 0, 0, 0)
S = []
H2 = H + [float('-inf')] # extra element to empty the queue
for right in range(len(H2)):
x = H2[right]
left = right
while len(S) > 0 and S[-1][1] >= x:
left, height = S.pop()
# first element is area of candidate
rect = (height * (right - left), left, height, right)
if rect > best:
best = rect
S.append((left, x))
return best
|
def node_count(shape):
"""Total number of nodes.
The total number of nodes in a structured grid with dimensions given
by the tuple, *shape*. Where *shape* is the number of node rows and
node columns.
>>> from landlab.utils.structured_grid import node_count
>>> node_count((3, 4))
12
"""
assert len(shape) == 2
return shape[0] * shape[1]
|
def update_start(tournaments):
"""Update the start date for the tournaments query based on the most recent data"""
# This is fragile, but should only have to work like twice
return tournaments[0]['events'][0]['createdAt']
|
def clean(line):
""" Strip a string of non-alphanumerics (except underscores).
Can use to clean strings before using them in a database query.
Args:
line(unicode): String to clean.
Returns:
line(unicode): A string safe to use in a database query.
Examples:
>>> clean("Robert'); DROP TABLE Students;")
RobertDROPTABLEStudents
"""
return "".join(char for char in line if (char.isalnum() or "_" == char))
|
def ignore_null(k, v):
"""
Preprocessor for xmltodict.unparse that ignores keys with None value
"""
if v is None:
return None
return k, v
|
def __ngrams(s, n=3):
""" Raw n-grams from a sequence
If the sequence is a string, it will return char-level n-grams.
If the sequence is a list of words, it will return word-level n-grams.
Note: it treats space (' ') and punctuation like any other character.
>>> ngrams('This is not a test!')
[('T', 'h', 'i'), ('h', 'i', 's'), ('i', 's', ' '), ('s', ' ', 'i'),
(' ', 'i', 's'), ('i', 's', ' '), ('s', ' ', 'n'), (' ', 'n', 'o'),
('n', 'o', 't'), ('o', 't', ' '), ('t', ' ', 'a'), (' ', 'a', ' '),
('a', ' ', 't'), (' ', 't', 'e'), ('t', 'e', 's'), ('e', 's', 't'),
('s', 't', '!')]
>>> ngrams(["This", "is", "not", "a", "test!"])
[('This', 'is', 'not'), ('is', 'not', 'a'), ('not', 'a', 'test!')]
Args:
s: a string or a list of strings
n: an int for the n in n-gram
Returns:
list: tuples of char-level or word-level n-grams
"""
return list(zip(*[s[i:] for i in range(n)]))
|
def parse_line(line, names, types):
"""
parse line with expected names and types
Example:
parse_line(line, ['dt', 'ntstep', 'nbunch'], [float, int,int])
"""
x = line.split()
values = [types[i](x[i]) for i in range(len(x))]
return dict(zip(names, values))
|
def unescape(data):
"""Inverse of escape"""
escaping = False
record = ''
for byte in list(data):
if escaping:
record += byte
escaping = False
elif byte == '\x10':
escaping = True
else:
record += byte
return record
|
def sort_items(items: list, sort_by: str) -> list:
"""Sort list of dicts according to specified key."""
return sorted(items, key=lambda i: i[sort_by])
|
def is_url_callback(obj):
"""Check if ``obj`` can handle a URL callback.
:param obj: any :term:`function` to check
:return: ``True`` if ``obj`` can handle a URL callback
A URL callback handler is a callable that will be used by the bot to
handle a particular URL in an IRC message.
.. seealso::
Both :func:`sopel.plugin.url` :func:`sopel.plugin.url_lazy` make the
decorated function a URL callback handler.
"""
forbidden_attrs = (
'interval',
)
forbidden = any(hasattr(obj, attr) for attr in forbidden_attrs)
allowed_attrs = (
'url_regex',
'url_lazy_loaders',
)
allowed = any(hasattr(obj, attr) for attr in allowed_attrs)
return allowed and not forbidden
|
def check_requirement(line):
"""
Check this line for a requirement, which is indicated by a line betinning with "?? ".
If one is found, return that line with a newline at the end, as requirements are always
a complete line and formatted as a bulleted list. Replace the "??" with a "-" as well
so that the resulting list can later be Markdown formatted.
:param line: the line to check
:return: None or a formatted requirement
"""
if line[0:3] == "?? ":
return "- %s\n" % line[3:]
return None
|
def bool_formatter(bool_value):
"""
convert Python boolean to json/xml format of boolean
Parameters
----------
bool_value: bool
the boolean value that needs to be converted
Returns
-------
str
either "true" or "false"
"""
if isinstance(bool_value, bool):
if bool_value:
return("true")
else:
return("false")
else:
msg = "bool_value must be a boolean"
raise ValueError(msg)
|
def calculate_failures(trajectory):
""" Calculate number of failures
Args:
trajectory: list of bbox
Returns:
num_failures: number of failures
failures: failures point in trajectory, start with 0
"""
failures = [i for i, x in zip(range(len(trajectory)), trajectory)
if len(x) == 1 and x[0] == 2]
num_failures = len(failures)
return num_failures, failures
|
def get_lang(l):
"""Convert language abbr to full names"""
l = l.strip().lower()
h = dict(es='spanish', en='english', ar='arabic', it='italian', de='german')
return h.get(l, l)
|
def collided_with_level(player_state, previous_position):
"""
Called whenever the player bumps into a wall.
Usually, you just want to set player_state["position"] = previous_position
:param player_state: Our state
:param previous_position: Where were we before be bumped into the wall?
:return: the new PlayerState
"""
# Stop moving vertically whenever we bump into a wall
player_state["velocity"]["y"] = 0
player_state["position"] = previous_position
return player_state
|
def remove_repetidos(list):
"""[Remove elementos repitidos de uma lista e devolve outra lista com esses elementos em ordem crescente]
Arguments:
list {[lista]} -- [uma lista]
Returns:
[lista] -- [elementos em ordem]
"""
lista2 = []
for elemento in list:
if elemento not in lista2:
lista2.append(elemento)
lista2.sort()
return lista2
|
def input_pad_to_len(words, padded_word_len, padded_char_len, word_padding=0, char_padding=0):
"""Pad words to 'padded_word_len' with padding if 'len(words) < padded_word_len'. If if 'len(words) > padded_word_len', truncate words to `padded_word_len`. Then add padding so that each word has the same length 'padded_char_len'.
Example:
input_pad_to_len([[1, 2, 3],
[2, 3],
[3]], 5, -1, 0)
==============> [[1, 2, 3],
[2, 3, 0],
[3, 0, 0],
[-1, 0, 0],
[-1, 0, 0]]
Args:
words (list): List of list of the word index.
padded_word_len (int): The length for padding a batch of sequences to the same length.
padded_char_len (int): The length for padding each word in the sentences to the same length.
word_padding (int): The index used to pad the input sequence.
char_padding (int): The index used to pad each word in the input sequence.
"""
if len(words) < padded_word_len:
words += [[word_padding]] * (padded_word_len - len(words))
elif len(words) > padded_word_len:
words = words[:padded_word_len]
else:
pass
words = [word + [char_padding] * (padded_char_len - len(word)) if len(word) < padded_char_len else word for word in words]
return words
|
def _correct_padding(val):
"""Correct padding for JWT
:param val: value to correct
"""
return val + '=' * (4 - len(val) % 4)
|
def diff(what_should_be, what_is):
"""
Accept what_should_be (dict of name: ip)
and what_is (dict of name: ip)
Return dict of name: ip with ip from 'what_should_be' where 'what_is' does
not match 'what_should_be'.
"""
return {
name: ip
for name, ip in what_should_be.items()
if what_should_be[name] != what_is.get(name, None)
}
|
def jday(year, mon, day, hr, minute, sec):
"""Return two floats that, when added, produce the specified Julian date.
The first float returned gives the date, while the second float
provides an additional offset for the particular hour, minute, and
second of that date. Because the second float is much smaller in
magnitude it can, unlike the first float, be accurate down to very
small fractions of a second.
>>> jd, fr = jday(2020, 2, 11, 13, 57, 0)
>>> jd
2458890.5
>>> fr
0.58125
Note that the first float, which gives the moment of midnight that
commences the given calendar date, always carries the fraction
``.5`` because Julian dates begin and end at noon. This made
Julian dates more convenient for astronomers in Europe, by making
the whole night belong to a single Julian date.
This function is a simple translation to Python of the C++ routine
``jday()`` in Vallado's ``SGP4.cpp``.
"""
jd = (367.0 * year
- 7 * (year + ((mon + 9) // 12.0)) * 0.25 // 1.0
+ 275 * mon / 9.0 // 1.0
+ day
+ 1721013.5)
fr = (sec + minute * 60.0 + hr * 3600.0) / 86400.0;
return jd, fr
|
def convert_to_signed_int_32_bit(hex_str):
"""
Utility function to convert a hex string into a 32 bit signed hex integer value
:param hex_str: hex String
:return: signed 32 bit integer
"""
val = int(hex_str, 16)
if val > 0x7FFFFFFF:
val = ((val+0x80000000) & 0xFFFFFFFF) - 0x80000000
return val
|
def get_keys(tuple_of_tuples):
"""Processes a tuple of 2-element tuples and returns a tuple containing
first component of each tuple.
"""
return tuple([t[0] for t in tuple_of_tuples])
|
def _get_settings_if_empty(settings):
"""Get settings if None are given."""
if settings is None:
return {}
return settings
|
def is_set_bit(byte, position):
"""Checks if n bit is set in byte.
:param byte: Single-byte integer
:type byte: int
:param position: Bit position
:type position: int
:return: True if the bit at position is set otherwise False
:rtype: bool
"""
return bool((byte >> position) & 0b1)
|
def quantize_sequences(sequences, alphabet):
"""Giving prescribing alphabet, quantize each caracter using index in alphabet
in each sequence.
input:
sequences: [str]
return:
[[int]]
"""
print("quantizing sequences...")
new_sequences = []
for sequence in sequences:
new_sequence = []
# add width to fit the conv2D of TF
for character in sequence.lower():
if character in alphabet:
new_sequence.append(alphabet.index(character))
else:
new_sequence.append(len(alphabet))
new_sequences.append(new_sequence)
return new_sequences
|
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
|
def obj_code(object_type, obj_id):
"""Generates default object code for `obj_type` and `obj_id`."""
return "{}-{}".format(object_type.upper(), obj_id)
|
def parse_header_fields(header):
""" Parse header fields into a python dict
Attributes:
header -- Header to be parsed
"""
fields = {}
for item in header.split(';'):
kv = item.split('=')
if len(kv) == 1:
fields[kv[0].strip()] = None
continue
kv = [x.strip() for x in kv]
fields[kv[0]] = kv[1].strip('"')
return fields
|
def uniq_by(predicate, list):
"""Returns a new list containing only one copy of each element in the original
list, based upon the value returned by applying the supplied function to
each list element. Prefers the first item if the supplied function produces
the same value on two items. R.equals is used for comparison"""
used = set()
return [x for x in list if predicate(x) not in used and not used.add(predicate(x))]
|
def merge(left, right):
"""Merge sort merging function."""
left_index, right_index = 0, 0
result = []
while left_index < len(left) and right_index < len(right):
if left[left_index] < right[right_index]:
result.append(left[left_index])
left_index += 1
else:
result.append(right[right_index])
right_index += 1
result += left[left_index:]
result += right[right_index:]
return result
|
def contrast_index(x1, x2, double=True):
"""
Calculate the contrast index between two arrays.
The contrast index is symmetrical with respect to
the choice of the baseline for comparison
Parameters
----------
x1 : ndarray of floats
An ndarray to compare. The contrast index will have positive values
where x1 > x2.
x2 : ndarray of floats
An ndarray to compare. The contrast index will have negative values
where x2 > x1.
double : bool, optional
Double the contrast index.
This makes it comparable to a percent difference.
Default: True
Returns
-------
contrast_index : ndarray of floats
Contrast index calculated by doing (x1 - x2) / (x1 + x2)
"""
ci = (x1 - x2) / (x1 + x2)
if double:
return ci * 2
else:
return ci
|
def get_args(add_help=True):
"""get_args
Parse all args using argparse lib
Args:
add_help: Whether to add -h option on args
Returns:
An object which contains many parameters used for inference.
"""
import argparse
parser = argparse.ArgumentParser(
description='PaddlePaddle Classification Training', add_help=add_help)
parser.add_argument('--model', default='alexnet', help='model')
parser.add_argument('--device', default='gpu', help='device')
parser.add_argument('--img-size', default=224, help='image size to export')
parser.add_argument(
'--save-inference-dir', default='.', help='path where to save')
parser.add_argument('--pretrained', default=None, help='pretrained model')
parser.add_argument('--num-classes', default=1000, help='num_classes')
args = parser.parse_args()
return args
|
def lamevercmp(x, y):
"""Compare LAME version strings.
alpha and beta versions are considered older.
Versions with sub minor parts or end with 'r' are considered newer.
:param x: The first version to compare.
:param y: The second version to compare.
:returns: Return negative if x<y, zero if x==y, positive if x>y.
"""
def cmp(a, b):
# This is Python2's built-in `cmp`, which was removed from Python3
# And depends on bool - bool yielding the integer -1, 0, 1
return (a > b) - (a < b)
x = x.ljust(5)
y = y.ljust(5)
if x[:5] == y[:5]:
return 0
ret = cmp(x[:4], y[:4])
if ret:
return ret
xmaj, xmin = x.split('.')[:2]
ymaj, ymin = y.split('.')[:2]
minparts = ['.']
# lame 3.96.1 added the use of r in the very short version for post releases
if (xmaj == '3' and xmin >= '96') or (ymaj == '3' and ymin >= '96'):
minparts.append('r')
if x[4] in minparts:
return 1
if y[4] in minparts:
return -1
if x[4] == ' ':
return 1
if y[4] == ' ':
return -1
return cmp(x[4], y[4])
|
def factor_trial_division(n):
"""
Factor any integer into a product of primes, 0, 1, and -1.
Returns a dictionary {<prime: exponent>}.
"""
if not n:
return {0:1}
factors = {}
if n < 0:
factors[-1] = 1
n = -n
if n==1:
factors[1] = 1
return factors
d = 2
while n % d == 0:
try:
factors[d] += 1
except KeyError:
factors[d] = 1
n //= d
d = 3
while n > 1 and d*d <= n:
if n % d:
d += 2
else:
try:
factors[d] += 1
except KeyError:
factors[d] = 1
n //= d
if n>1:
try:
factors[n] += 1
except KeyError:
factors[n] = 1
return factors
|
def pretty(info):
"""Pretty print of exif data"""
string = """
Taken: {}
Source: {}
""".format(info["exif"].get(36867), info["exif"].get(42036))
return string
|
def power_data_energy_diff(measurement, net_string, f_val, direct_data):
"""Calculate differential energy."""
if "electricity" in measurement and "interval" not in net_string:
diff = 1
if "produced" in measurement:
diff = -1
if net_string not in direct_data:
direct_data[net_string] = 0
if isinstance(f_val, int):
direct_data[net_string] += f_val * diff
else:
direct_data[net_string] += float(f_val * diff)
direct_data[net_string] = float(f"{round(direct_data[net_string], 3):.3f}")
return direct_data
|
def offset_axis(axis):
"""Axis handling logic that is generic to all reductions."""
flatten = axis is None
if flatten:
axis = 0
else:
if axis < 0:
# The case of a negative input axis needs compensation, because we
# are adding a dimension by ourselves
axis -= 1
return flatten, axis
|
def get_count(symbols):
"""
Examples:
>>> get_stuff(['Li', 'H', 'Li'])
>>> {'Li': 2, 'H': 1}
"""
return dict([(e, symbols.count(e)) for e in set(symbols)])
|
def sanitize_labels(labels: dict):
"""Given prometheus metric sample labels, returns labels dict suitable for graphite 2.0"""
new_labels = dict()
for label, value in labels.items():
if value.strip() == "":
continue
nv = value.replace(" ", "_")
new_labels[label] = nv
return new_labels
|
def get_label_src(label):
""" returns the flow source label """
return label.split(' - ')[0]
|
def dcf_to_swap(dcf):
"""
Helper function transforms sorted discount factors to swap rates.
:param dcf: discount factors
:return: par swap rates
"""
num_dcf = len(dcf)
swap_rates = num_dcf * [0]
for index, dcf_ in enumerate(dcf):
if index == 0:
swap_rates[index] = (1 / dcf_) - 1
else:
swap_rates[index] = (1 - dcf_) / sum(dcf[0:index + 1])
return swap_rates
|
def string2token(t,nl,nt):
"""
This function takes a string and returns a token. A token is a tuple
where the first element specifies the type of the data stored in the
second element.
In this case the data types are limited to numbers, either integer, real
or complex, and strings. The types a denoted as follows:
i - integer
f - float/real
c - complex
s - string
For navigational purposes two more elements added to identify the line
number (nl) the token was on, and the token number (nt) within the line.
"""
try:
i_a = int(t)
#
# Toldiff should recognise that -0 and 0 are the same, however, in
# a text based comparison that is not automatic so we have to force this.
#
if i_a == 0:
i_a = 0
token = ("i",i_a,nl,nt)
except ValueError:
#
# In Fortran double precision constants are often printed with a
# "D" for the exponent rather than an "E", i.e. 1.0E+01 might be
# printed as 1.0D+01 in Fortran. Python is not aware of this convention
# so we need to replace any potential "D"-s to obtain valid floating
# values.
#
z = t.replace("d","e")
z = z.replace("D","e")
try:
i_f = float(z)
#
# Toldiff should recognise that -0.0 and 0.0 are the same, however,
# in a text based comparison that is not automatic so we have to
# force this.
#
if i_f == 0.0:
i_f = 0.0
token = ("f",i_f,nl,nt)
except ValueError:
#
# The handling of complex numbers is unlikely to work in practice
# as in most cases complex numbers are printed as (1.0,2.0)
# rather than 1.0+2.0j. Therefore it is impossible to reliably
# distinguish between a complex number and a list of 2 real numbers.
#
try:
i_c = complex(z)
#
# Toldiff should recognise that x-0.0j and x+0.0j and that
# -0.0+y*j and 0.0+y*j are the same, however, in a text based
# comparison that is not automatic so we have to force this.
#
if i_c.real == 0.0:
i_c = complex(0.0,i_c.imag)
if i_c.imag == 0.0:
i_c = complex(i_c.real,0.0)
token = ("c",i_c,nl,nt)
except ValueError:
token = ("s",t,nl,nt)
return token
|
def read_enabled_tests(filename):
"""Read list of enabled tests from specified file."""
with open(filename) as fin:
content = fin.readlines()
return [line.strip() for line in content]
|
def nonrigid_tors(spc_mod_dct_i, rotors):
""" dtermine if a nonrigid torsional model is specified and further
information is needed from the filesystem
"""
vib_model = spc_mod_dct_i['vib']['mod']
tors_model = spc_mod_dct_i['tors']['mod']
has_tors = bool(any(rotors))
tors_hr_model = bool(
tors_model in ('1dhr', '1dhrf', '1dhrfa', 'mdhr', 'mdhrv'))
tau_hr_model = bool(tors_model == 'tau' and vib_model != 'vib')
# diatomic model?
return has_tors and (tors_hr_model or tau_hr_model)
|
def output(tf_var):
"""
Generate command for 'terraform output <tfvar>'.
Args:
tf_var: name of terraform output variable to return the output of.
Returns:
tf_command: list of command-line arguments to run terraform with.
"""
tf_command = ['terraform', 'output', tf_var]
return tf_command
|
def leading_zeros(string, delimiter='_', n_digits=3, exclude=None):
"""
Returns a new string with added leading zeros.
Parse string and for every section (as is
indicated by delimiter) try to convert it to an
integer and if possible, add zeros (n_digits).
It is possible to exclude part of the string
(e.g. the file extension) by passing a string
"exclude". Only the part of the string up to the
first occurance of that string is parsed.
"""
if exclude:
fname = string[:string.find(exclude)]
ext = exclude
else:
fname = string
ext = ''
fname_info = fname.split(delimiter)
for i, info in enumerate(fname_info):
try:
# try converting string to int
num = int(info)
# if succeeded convert int to string with n_digits
nnum = '%0' + str(n_digits) + 'd'
nnum %= num
# change string entry
fname_info[i] = nnum
except:
pass
return '_'.join(fname_info) + ext
|
def search_linear(xs, target):
""" Find and return the index of target in sequence xs """
for i in range(len(xs)):
if xs[i]== target:
return i
return -1
|
def device_from_tuple(job, taskid, devtype, devid):
""" Simply build the device string associated with the given tuple.
Args:
job Job name
taskid Task ID
devtype Device type
devid Device ID
Returns:
Associated device name
"""
return "/job:" + job + "/replica:0/task:" + taskid + "/device:" + devtype + ":" + devid
|
def GetFoldTypeFromAnnotation(line):#{{{
"""Get foldType from the annotation line of the topomsa file
In the *.topomsa files, the header line has now been tagged with
fold-type (5B,7B, 6R - 5 helical broken fold-type, 7 helical broken
fold-type , 6 helical broken fold-type) in column 3.
Example from Full_alignment
>SBF_like|A0A077PBM8|tmname:TM1-S1;TM2-S1;TM3-BC1;TM4-BC1;TM5-BC1;TM6-S2;TM7-S2;TM8-BC2;TM9-BC2;TM10-BC2|5B
Example from Repeat_alignment
>Glt_symporter-NR|P73275_SYNY3|tmname:TM1-S1;TM2-S1;TM3-S1;TM4-RC1;RH-RC1;TM6-RC1|6R
"""
foldType = ""
if line:
li = line.split('|')
if len(li) >= 3:
foldType = li[3]
return foldType
|
def get_min_key(d):
"""Return the key from the dict with the min value."""
return min(d, key=d.get)
|
def dimensionize(maybe_a_list, nd=2):
"""Convert integers to a list of integers to fit the number of dimensions if
the argument is not already a list.
For example:
`dimensionize(3, nd=2)`
will produce the following result:
`(3, 3)`.
`dimensionize([3, 1], nd=2)`
will produce the following result:
`[3, 1]`.
For more information, refer to:
- https://github.com/guillaume-chevalier/python-conv-lib/blob/master/conv/conv.py
- https://github.com/guillaume-chevalier/python-conv-lib
- MIT License, Copyright (c) 2018 Guillaume Chevalier
"""
if not hasattr(maybe_a_list, '__iter__'):
# Argument is probably an integer so we map it to a list of size `nd`.
now_a_list = [maybe_a_list] * nd
return now_a_list
else:
# Argument is probably an `nd`-sized list.
return maybe_a_list
|
def combine_channel_number(major: int, minor: int) -> str:
"""Create a combined channel number from its major and minor."""
if minor == 65535:
return str(major)
return "%d-%d" % (major, minor)
|
def truncate(s, amount) -> str:
"""Return a string that is no longer than the amount specified."""
if len(s) > amount:
return s[:amount - 3] + "..."
return s
|
def get_average_poscount(pos_c):
"""Return average of positional counts."""
avg = {}
for key, value in pos_c.items():
avg[key] = sum(value.values()) / len(value)
total_counts = sum(avg.values())
for key, value in avg.items():
try:
avg[key] = value / total_counts
except ZeroDivisionError:
avg[key] = value
return avg
|
def parse_options_from_settings(settings, settings_prefix):
"""Parse out options."""
def sget(name, default=None):
return settings.get(settings_prefix + name, default)
static_url_prefix = sget('static_url_prefix', '/static/crud')
if static_url_prefix == 'None':
static_url_prefix = None
return dict(
static_url_prefix=static_url_prefix,
)
|
def baby_names_collapsed_from_list(a_baby_names_list):
"""
a_baby_names_list is a list of lists, each element [name, rank]
Collapse list element to a string
"""
print('baby_names_collapsed_from_list')
baby_names_collapsed = []
for baby_element in a_baby_names_list:
baby_names_collapsed.append('{} {}'.format(baby_element[0], baby_element[1]))
#print(baby_names_collapsed)
return baby_names_collapsed
|
def getDP(directionPointer: int) -> str:
"""
Finds the correct direction pointer string
:param directionPointer: Input direction pointer
:return: direction pointer string
"""
if directionPointer == 0:
return 'r'
if directionPointer == 1:
return 'd'
if directionPointer == 2:
return 'l'
return 'u'
|
def eat_alpha(input_str, index):
""" Helper for explode_version """
first = index
while index < len(input_str):
if not input_str[index].isalpha():
break
index += 1
return input_str[first:index], index
|
def one(iterable, too_short=None, too_long=None):
"""Return the only item from iterable.
Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item. This funcion was taken from `more_itertools.one`.
"""
it = iter(iterable)
try:
value = next(it)
except StopIteration:
raise too_short or ValueError('Too few items in iterable (expected 1)')
try:
next(it)
except StopIteration:
pass
else:
raise too_long or ValueError('Too many items in iterable (expected 1)')
return value
|
def chars_various_feats(clean_text):
"""
Args:
clean_text: (str) The cleaned text from urls and entities.
Returns:
"""
n_uppercase = sum(1 for c in clean_text if c.isupper())
n_lowercase = sum(1 for c in clean_text if c.islower())
n_digits = sum(c.isdigit() for c in clean_text)
n_letters = sum(c.isalpha() for c in clean_text)
if n_digits == 0:
ratio_letters_digits = 1 # todo: check this value here if it is OK
else:
ratio_letters_digits = n_letters / n_digits
chars_feats_dict = {
"n_uppercase": n_uppercase,
"n_lowercase": n_lowercase,
"n_digits": n_digits,
"n_letters": n_letters,
"ratio_letters_digits": ratio_letters_digits
}
return chars_feats_dict
|
def _prod(sizes):
"""
Product of list of sizes. It is faster than numpy.prod and torch.prod.
Parameter
---------
sizes : list or tuple
Size of inputs, output, or weights, usually 2/3/4 dimensions.
Performance
-----------
profile : 20 it/s
torch.prod : 500 it/s
numpy.prod : 700 it/s
_prod : 1500 it/s
"""
ans = 1
for s in sizes:
ans *= s
return ans
|
def get_commands_remove_vrrp(group):
"""Gets commands to remove an hsrp on an interface
Args:
group (str): hsrp group
Returns:
list: ordered list of commands to remove the hsrp group
Note:
Specific for Ansible module(s). Not to be called otherwise.
"""
commands = []
commands.append('no vrrp ' + group)
return commands
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.