content
stringlengths 42
6.51k
|
---|
def generate_template(topic_entity, sentence):
"""
Based on the system response and the provided entity table, the output is the sketch response.
"""
sketch_response = []
for word in sentence.split():
if word not in topic_entity:
sketch_response.append(word)
else:
if word == topic_entity[0]:
ent_type = 'topic_a'
else:
ent_type = 'topic_b'
sketch_response.append('@' + ent_type)
sketch_response = " ".join(sketch_response)
return sketch_response
|
def null_distance_results(string1: str, string2: str, max_distance: int) -> int:
"""Determines the proper return value of an edit distance function when one
or both strings are null.
Args:
string_1: Base string.
string_2: The string to compare.
max_distance: The maximum distance allowed.
Returns:
-1 if the distance is greater than the max_distance, 0 if the strings are
equivalent (both are None), otherwise a positive number whose
magnitude is the length of the string which is not None.
"""
if string1 is None:
if string2 is None:
return 0
return len(string2) if len(string2) <= max_distance else -1
return len(string1) if len(string1) <= max_distance else -1
|
def f(m, n):
"""
Parameters
----------
m : int
n : int
Returns
-------
int
"""
if m == 0:
return 2
else:
sum_ = 3*f(m-1, n)
i = 2
while m-i >= 0:
sum_ += (-1)**(i+1) * f(m-i, n)
i += 1
sum_ += ((-1)**(n % 2))*(m % 2)
return sum_
|
def is_numeric(text):
"""Migration artifact."""
try:
_ = float(text)
return True
except (TypeError, ValueError):
return False
|
def pseudocount(array):
""" add pseudo-counts to array """
array = [x+1 for x in array]
return array
|
def _convert_ifindex_to_ifname(ifindex):
"""_convert_ifindex_to_ifname. In case local_link_information is
obtained by inspector, VSG TOR will send snmp ifIndex in
port id TLV, which is not known to VSD, here we assume that numeric
value is snmp ifIndex and do conversion, otherwise it is a port
mnemonic.
High Port Count format:
32 bits unsigned integer, from most significant to least significant:
3 bits: 000 -> indicates physical port
4 bits: slot number
2 bits: High part of port number
2 bits: mda number
6 bits: Low part of port number
15 bits: channel number
High and low part of port number need to be combined to create 8 bit
unsigned int
"""
if not ifindex:
return None
if not ifindex.isdigit():
return ifindex
return "%s/%s/%s" % (
(int(ifindex) >> 25),
(int(ifindex) >> 21) & 0x3,
((int(ifindex) >> 15) & 0x3f) | ((int(ifindex) >> 17) & 0xc0))
|
def H(value):
"""Step function of H(x)=1 if x>=0 and zero otherwise. Used for the
temporal laplacian matrix."""
if value >= 0:
return 1
return 0
|
def make_protobuf_requirement(major: int, minor: int, patch: int) -> str:
""" Sometimes the versions of libprotoc and the python package `protobuf` are out of sync.
For example, while there was protoc version 3.12.3, the latest release of
`protobuf` was 3.12.2. So we'll just depend on `x.y.0` and hope that
there's no breaking changes between patches.
"""
del patch
return f"protobuf~={major}.{minor}.{0}"
|
def convert_words_to_numbers(tokens):
"""
Converts numbers in word format into number format
>>> convert_words_to_numbers(['five', "o'clock"])
['5', "o'clock"]
>>> convert_words_to_numbers(['seven', "o'clock"])
['7', "o'clock"]
"""
number_words = ["zero", "one", "two", "three", "four", "five", "six",
"seven", "eight", "nine", "ten", "eleven", "twelve"]
for index, token in enumerate(tokens):
if token.lower() in number_words:
tokens[index] = str(number_words.index(token.lower()))
return tokens
|
def NOT(a):
""" if a eq 0 return 1
else return 0 """
if(a == 0):
return 1
else:
return 0
|
def project_str(project):
"""Format project as a string."""
return f"[{project['pk']}] {project['name']}"
|
def _option_boolean(arg):
# pylint: disable=no-else-return
"""Check boolean options."""
if not arg or not arg.strip(): # no argument given, assume used as a flag
return True
elif arg.strip().lower() in ("no", "0", "false"):
return False
elif arg.strip().lower() in ("yes", "1", "true"):
return True
else:
raise ValueError('"{}" unknown boolean'.format(arg))
|
def filter_interx_elements(element_key_list):
"""
Parameters
----------
element_key_list :
Returns
-------
"""
tmp_element_list = []
for element in element_key_list:
if element in ['taxonomy-prior','sequence-representative','sequence-aligned','sequence-accession','map-rep2tid','taxonomy-sheet','stat-reps','stat-reps']:
tmp_element_list.append(element)
return tmp_element_list
|
def generarValorA(valor):
"""
Genera el valor de L del string de color
param: valor: string de color
"""
L, a, b = valor.split("/")
return float(a)
|
def identical_item(list1, list2):
"""Returns the first common element.
Return values:
If a common element in list1 (<list>) and list2 (<list>) was
found, that element will be returned. Otherwise, <None> will be
returned.
"""
for item in list1:
for part in list2:
if item == part:
return item
return None
|
def szudzik_pairing_function(a, b):
"""
a, b >= 0
See Also
--------
http://stackoverflow.com/questions/919612/mapping-two-integers-to-one-in-a-unique-and-deterministic-way
Parameters
----------
a
b
Returns
-------
"""
if a >= b:
return a * a + a + b
return a + b * b
|
def to_camel_case(s):
"""Given a sring in underscore form, returns a copy of it in camel case.
eg, camel_case('test_string') => 'TestString'. """
return ''.join(map(lambda x: x.title(), s.split('_')))
|
def schedule_properties(period, periodUnit, startTime=None, endTime=None):
"""
:param period: period
:type id: int
:param periodUnit: periodUnit
:type id: string
:param startTime: startTime
:param endTime: endTime
:return: schedule
"""
schedule = {
'period': period,
'periodUnit': periodUnit,
}
if startTime is not None:
schedule['startTime'] = startTime
if endTime is not None:
schedule['endTime'] = endTime
return schedule
|
def hash_function(seed, tract, patch, band):
"""Generate a hash key given the base seed and metadata
"""
band_map = {"u": 1, "g": 2, "r": 3, "i": 4, "z": 5, "y": 6}
# Add a linear combination of metadata weighted by prime numbers
hash_key = seed + 131071*tract + 524287*patch + 8388607*band_map[band]
return hash_key
|
def construct_aggs(aggs, size=0):
"""
Construct the ElasticSearch aggretions query according to a list of
parameters that must be aggregated.
"""
data = {}
point = None
def join(field, point=None):
default = {
field: {
"terms": {
"field": field,
"size": size
},
"aggs": {
"access_total": {
"sum": {
"field": "access_total"
}
}
}
}
}
if point:
point['aggs'].setdefault(field, default[field])
return point['aggs'][field]
else:
data.setdefault('aggs', default)
return data['aggs'][field]
for item in aggs:
point = join(item, point=point)
return data
|
def validate_scope_OAuth2(required_scopes, token_scopes):
"""
Validate required scopes are included in token scope
:param required_scopes Required scope to access called API
:type required_scopes: List[str]
:param token_scopes Scope present in token
:type token_scopes: List[str]
:return: True if access to called API is allowed
:rtype: bool
"""
return set(required_scopes).issubset(set(token_scopes))
|
def set_offset_values(row,offset_dir,offset_amt,tbdata):
"""
Fill beam offset columns with values
This sets field values for the position offset based on what is
in the EAC macro log. It is a DSN extension of a GBT convention.
@param row : int
Table row to be filled
@param offset_dir : string
String as used in EAC logs: AZ, AZEL, etc.
@param offset_amt : float or list of two floats
Amount of offset in degrees. If a string is given, it is
converted to float.
@param tbdata : SDFITS table
@return: Boolean
True if the offset direction was recognized; otherwise False
"""
success = True
if offset_dir == "AZ":
tbdata.field('BEAMAOFF')[row] = float(offset_amt)
tbdata.field('BEAMXOFF')[row] = 0.
tbdata.field('BEAMEOFF')[row] = 0.
elif offset_dir == "XEL":
tbdata.field('BEAMAOFF')[row] = 0.
tbdata.field('BEAMXOFF')[row] = float(offset_amt)
tbdata.field('BEAMEOFF')[row] = 0.
elif offset_dir == "EL":
tbdata.field('BEAMAOFF')[row] = 0.
tbdata.field('BEAMXOFF')[row] = 0.
tbdata.field('BEAMEOFF')[row] = float(offset_amt)
elif offset_dir == "AZEL":
tbdata.field('BEAMAOFF')[row] = float(offset_amt[0])
tbdata.field('BEAMXOFF')[row] = 0.
tbdata.field('BEAMEOFF')[row] = float(offset_amt[1])
elif offset_dir == "XELEL":
tbdata.field('BEAMAOFF')[row] = 0.
tbdata.field('BEAMXOFF')[row] = float(offset_amt[0])
tbdata.field('BEAMEOFF')[row] = float(offset_amt[1])
elif offset_dir == None:
tbdata.field('BEAMAOFF')[row] = 0.
tbdata.field('BEAMXOFF')[row] = 0.
tbdata.field('BEAMEOFF')[row] = 0.
elif offset_dir == "HA":
tbdata.field('BEAMHOFF')[row] = float(offset_amt)
tbdata.field('BEAMCOFF')[row] = 0.
tbdata.field('BEAMDOFF')[row] = 0.
elif offset_dir == "XDEC":
tbdata.field('BEAMHOFF')[row] = 0.
tbdata.field('BEAMCOFF')[row] = float(offset_amt)
tbdata.field('BEAMDOFF')[row] = 0.
elif offset_dir == "DEC":
tbdata.field('BEAMHOFF')[row] = 0.
tbdata.field('BEAMCOFF')[row] = 0.
tbdata.field('BEAMDOFF')[row] = float(offset_amt)
elif offset_dir == "HADEC":
tbdata.field('BEAMHOFF')[row] = float(offset_amt[0])
tbdata.field('BEAMCOFF')[row] = 0.
tbdata.field('BEAMDOFF')[row] = float(offset_amt[1])
elif offset_dir == "XDECDEC":
tbdata.field('BEAMHOFF')[row] = 0.
tbdata.field('BEAMCOFF')[row] = float(offset_amt[0])
tbdata.field('BEAMDOFF')[row] = float(offset_amt[1])
else:
return False
|
def get_package(version):
"""Get package name based on version.
Package changed to orion instead of orion.core at 0.1.6
"""
if version >= '0.1.6':
return 'orion'
return 'orion.core'
|
def da(basic):
""" da Is 80% Of Basic Salary """
da = basic*80/100
return da
|
def operators_identical(op1, op2):
"""Checks if two BNICE EC operators are eachother's reverse/identical."""
for e1, e2 in zip(op1.split('.'), op2.split('.')):
if e1.lstrip('-') != e2.lstrip('-'):
return False
return True
|
def format_as_command(text: str) -> str:
"""
Make text uppercased and remove edge spaces
"""
return text.upper().strip()
|
def ParseIssueRef(ref_str):
"""Parse an issue ref string: e.g., 123, or projectname:123 into a tuple.
Raises ValueError if the ref string exists but can't be parsed.
"""
if not ref_str.strip():
return None
if ':' in ref_str:
project_name, id_str = ref_str.split(':', 1)
project_name = project_name.strip().lstrip('-')
else:
project_name = None
id_str = ref_str
id_str = id_str.lstrip('-')
return project_name, int(id_str)
|
def get_as_dict(x):
"""Return an object as a dictionary of its attributes"""
if isinstance(x, dict):
return x
else:
try:
return x._asdict()
except AttributeError:
return x.__dict__
|
def get_gts(gtypes):
"""Convert cyvcf2 genotypes (0,1,3,2=missing) to ALT allele dosage (0,1,2,-1=missing)"""
if 2 in gtypes:
return [-1]*len(gtypes)
else:
return [ ([0,1,3]).index(i) for i in gtypes ]
|
def sum_signs(exprs):
"""Give the sign resulting from summing a list of expressions.
Args:
shapes: A list of sign (is pos, is neg) tuples.
Returns:
The sign (is pos, is neg) of the sum.
"""
is_pos = all([expr.is_positive() for expr in exprs])
is_neg = all([expr.is_negative() for expr in exprs])
return (is_pos, is_neg)
|
def get_column(matrix, column):
"""
Get the column of a matrix ( List ) composed
of list or tuple that represents each row of
a matrix.
:param matrix: List cotaining [ row0, row1, ... rown] where row_i = [ ai0, ai1, ai2, ... ain]
:param column: Column number ( Example: k to get column k)
:return: Column k or [ a0k, a1k, a2k, ... aMk ]
Example:
x y z
5 43 83
52 99 70
78 27 86
26 84 49
Represented as:
[
(x0, y0, z0),
(x1, y2, z1),
(x2, y2, z2),
...
(xn, yn, zn)
]
[(5, 43, 83),
(52, 99, 70),
(78, 27, 86),
(26, 84, 49),]
Each List
>>> M = [(5.0, 52.0, 78.0, 26.0), (43.0, 99.0, 27.0, 84.0), (83.0, 70.0, 86.0, 49.0)]
>>> get_column(M, 0)
[5.0, 43.0, 83.0]
>>> get_column(M, 1)
[52.0, 99.0, 70.0]
>>> get_column(M, 2)
[78.0, 27.0, 86.0]
>>> get_column(M, 3)
[26.0, 84.0, 49.0]
"""
return list(map(lambda e: e[column], matrix))
|
def ERR_NOADMININFO(sender, receipient, message):
""" Error Code 423 """
return "ERROR from <" + sender + ">: " + message
|
def is_subclass(cls, class_or_tuple):
"""
Return whether 'cls' is a derived from another class or is the same class.
Does not raise `TypeError` if the given `cls` is not a class.
"""
try:
return issubclass(cls, class_or_tuple)
except TypeError:
return False
|
def getFreePlaceValues(freePlaceMap):
"""
Returns the Amount of Free Places for every Area Marked on the FreePlaceMap
by iterating over the Map and counting each Cell of each Area together
The Returned list starts with Area 1 on Index 0
:param freePlaceMap: The generated FreePlaceMap
:return: The Amounts of free Places for each Area on the FreePlaceMap
"""
if freePlaceMap is None or len(freePlaceMap) <= 1 or len(freePlaceMap[0]) <= 1:
return
values = []
# Iterate over the Map
for y in range(len(freePlaceMap)):
for x in range(len(freePlaceMap[0])):
pointValue = freePlaceMap[y][x]
if pointValue != -1:
# Add one to the Area-Counter
if len(values) >= pointValue:
values[pointValue - 1] += 1
else:
values.append(1)
return values
|
def get_allowed_actions(action_parameters):
"""
gets the name of each action and creates a list of them
which is later used as an whitelist of actions.
:param action_parameters:
:return:
"""
return [action_parameters[action_name]['action'] for action_name in action_parameters]
|
def split_sentence(sentence: str) -> list:
"""
Takes a sentence in IPA and parses it to individual words by breaking according to
the " # " IPA string pattern.
:sentence: sentence to parse
:returns: list of individual words
:rtype: list
"""
words = sentence.split(" # ")
return words
|
def rotate(seq, offset):
""" Rotates a seq to the left by offset places; the elements shifted off
are inserted back at the end. By setting offset to the negative number -N,
this function rotates the sequence N places.
Note: this algorithm is a modification of one originally provided by
Thorsten Kampe; this version handles zero sequences and right shifts. See:
http://bytes.com/topic/python/answers/36070-rotating-lists
"""
if (len(seq) == 0) or (offset == 0):
return seq;
else:
offsetmod = offset % len(seq);
return seq[offsetmod:] + seq[:offsetmod];
|
def isac(c):
"""
A simple function, which determine whether the
string element char c is belong to a decimal number
:param c: a string element type of char
:return: a bool type of determination
"""
try:
int(c)
return True
except:
if c == '.' or c == '-' or c == 'e':
return True
else:
return False
|
def show_transaction(transactions):
"""datetime, Rental(obj), status(str) -> None"""
trans_string = ""
for transaction in transactions:
trans_string += ("\nDatetime: " + str(transaction[0]) + "\nRental:" + str(transaction[1]) +
"\nstatus " + str(transaction[2]) + "\n")
return trans_string
|
def _violate_descending(values, descending):
"""
Check if values violate descending restrictions.
"""
for i in descending:
if values[i - 1] < values[i]:
return True
return False
|
def join_overlapping(s, e):
"""Join overlapping intervals.
Transforms a list of possible overlapping intervals into non-overlapping
intervals.
Parameters
----------
s : list
List with start of interval sorted in ascending order
e : list
List with end of interval.
Returns
-------
tuple
`tuple` (s, e) of non-overlapping intervals.
"""
rs = []
re = []
n = len(s)
if n == 0:
return (rs, re)
l = s[0]
r = e[0]
for i in range(1, n):
if s[i] > r:
rs.append(l)
re.append(r)
l = s[i]
r = e[i]
else:
r = max(r, e[i])
rs.append(l)
re.append(r)
return (rs, re)
|
def hasNumbers(inputString: str) -> bool:
"""
Check if the provision candidate contains a digit.
"""
return any(char.isdigit() for char in inputString)
|
def elasticsearch_type_family(mapping_type: str) -> str:
"""Get the family of type for an Elasticsearch mapping type."""
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-types.html
return {
# range types
"long_range": "range",
"double_range": "range",
"date_range": "range",
"ip_range": "range",
# text search types
"annotated-text": "text",
"completion": "text",
"match_only_text": "text",
"search-as_you_type": "text",
# keyword
"constant_keyword": "keyword",
"wildcard": "keyword",
# date
"date_nanos": "date",
# integer
"token_count": "integer",
"long": "integer",
"short": "integer",
"byte": "integer",
"unsigned_long": "integer",
# float
"double": "float",
"half_float": "float",
"scaled_float": "float",
}.get(mapping_type, mapping_type)
|
def _get_gammas(F_i, F_n, F_star):
""" Compute different gamma values """
gamma1, gamma2, gamma3 = (
F_i * (F_star - F_n),
F_star * (F_n - F_i),
F_n * (F_star - F_i),
)
return gamma1, gamma2, gamma3
|
def write_txt(w_path, val):
"""
Write a text file from the string input.
"""
with open(w_path, "w") as output:
output.write(val + '\n')
return None
|
def none(value, nonchar=''):
"""Converts ``None`` to ``''``.
Similar to ``|default('', true)`` in jinja2 but more explicit.
"""
if value is None:
return nonchar
return value
|
def get_spanner_instance_id(project_id, base_name):
""" Generate the instance URL """
return "projects/{}/instances/{}".format(project_id, base_name)
|
def make_dict_lowercase(dictionary):
"""Make dictionary lowercase"""
lowercase_dict = {
k.lower(): set(l.lower() for l in v) for k, v in dictionary.items()
}
return lowercase_dict
|
def intersect(lists):
"""
Return the intersection of all lists in "lists".
"""
if len(lists) == 0: return lists
if len(lists) == 1: return lists[0]
finalList = set(lists[0])
for aList in lists[1:]:
finalList = finalList & set(aList)
return list(finalList)
|
def prepare_timestamps(ts, session_key, event_type):
"""Prepares timestamps for insert with datajoint"""
ts_shutter_chan_start = ts[0::2]
ts_shutter_chan_stop = ts[1::2]
to_insert = [list(ts_shutter_chan_start), list(ts_shutter_chan_stop)]
to_insert = [[session_key, event_type, *i] for i in zip(*to_insert)] # transposes the list to get rows/cols right
if len(to_insert) != len(ts_shutter_chan_start):
to_insert.append([session_key, event_type, ts_shutter_chan_start[-1], ''])
return to_insert
|
def strip_pxr_namespace(s):
"""
Returns
-------
str
"""
if s.startswith('pxr::'):
return s[5:]
return s
|
def hms(seconds):
"""
Convert seconds to hours, minutes, seconds
:param seconds: number os seconds
:type seconds: int
:return: hours, minutes, seconds
:rtype: Tuple[int, int, int]
"""
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return hours, minutes, seconds
|
def get_colors(exp_list):
"""Get bar colors."""
colors = {'historical': '0.5',
'historical-rcp85': '0.5',
'historicalGHG': 'red',
'historicalMisc': 'blue',
'1pctCO2': 'orange'}
color_list = []
for experiment in exp_list:
color_list.append(colors[experiment])
return color_list
|
def mean(lst):
"""Throws a div by zero exception if list is empty"""
return sum(lst) / float(len(lst))
|
def linear(x0: float, x1: float, p: float):
"""
Interplates linearly between two values suchh that when p=0
the interpolated value is x0 and at p=1 it's x1
"""
return (1 - p) * x0 + p * x1
|
def convert_to_int(x):
"""
Try to convert input to type int in python.
:param x: The input instance.
:return: A int if succeed, else None.
"""
if isinstance(x, int):
return x
return None
|
def registryparse_reg_dword_datetime(datetime_int: int):
"""Takes the integer value from a REG_DWORD type datetime stamp from the Windows Registry and converts it to a datetime object.
Examples:
>>> from Registry.Registry import Registry\n
>>> reghive = Registry('SOFTWARE')\n
>>> key_path_raw = r'Microsoft\Windows NT\CurrentVersion'\n
>>> regkey = reghive.open(key_path_raw)\n
>>> install_date_entry = [x for x in regkey.values() if x.name() == 'InstallDate']\n
>>> install_date_entry\n
[RegistryValue(name="InstallDate", value="1458039846", type="RegDWord")]
>>> install_date_entry[0].value()\n
1458039846
>>> registryparse_reg_dword_datetime( 1458039846 )\n
datetime.datetime(2016, 3, 15, 4, 4, 6)\n
>>> str( registryparse_reg_dword_datetime( 1458039846 ) )\n
'2016-03-15 04:04:06'
Args:
datetime_int (int): Reference a valid datetime stamp from a REG_DWORD type.
Returns:
datetime.datetime: returns a datetime object.
"""
import datetime
datetime_obj = datetime.datetime.fromtimestamp(datetime_int)
return datetime_obj
|
def getRelative(points, pPrevious):
"""Calculates absolute coordinates by adding the previous point to the
relative coordinates."""
newPoints = []
for p in points:
newP = (p[0] + pPrevious[0], p[1] + pPrevious[1])
newPoints.append(newP)
return newPoints
|
def split_id(kg_id: str):
"""
Parameters:
- kg_id
Splitting the knowledge graph id into the schema and the id part.
Only the id part is needed as a path parameter.
"""
split_id = kg_id.split('/')
return {
'kg': {
'kgSchema': '/'.join(split_id[0:-1]),
'kgId': split_id[-1]
}
}
|
def move_child_to_line(point, change, old_line, orientation):
"""
A helper function that uses the triangle similarity
theorem which specifies that if one triangle
is just a sub triangle of another i.e. is just
one line drawn from any two sides it's sides
share a ratio.
:param point: coordinate to be moved
:type point: tuple
:param change: amount the original line was moved
as a length
:type change: float
:param old_line: original line which was moved
:type old_line: tuple
:param orientation: orientation of panel whose
line was moved therefore the line
:type orientation: str
:return: Amount the point needs to be adjusted
:rtype: float
"""
# If the line was moved vertically
if orientation == "h":
old_line_length = old_line[1][0] - old_line[0][0]
# change times the old point's x coordinate minus
# the old line's second point's x coordinate
# by the line's length
# In essence ratio of change/oldline times the difference
# between the point on the line's x coord i.e the length of
# the side of the inner triangle times the changed line's length
movement = (change*(point[0] - old_line[1][0]))/old_line_length
return movement
# Same as above but if the lien was moved horizontally
else:
old_line_length = (old_line[1][1] - old_line[0][1])
movement = (change*(point[1] - old_line[1][1]))/old_line_length
return movement
|
def lr_schedule(epoch):
"""Learning Rate Schedule
"""
l_r = 0.5e-2
if epoch > 180:
l_r *= 0.5e-3
elif epoch > 150:
l_r *= 1e-3
elif epoch > 60:
l_r *= 5e-2
elif epoch > 30:
l_r *= 5e-1
print(('Learning rate: ', l_r))
return l_r
|
def guess_cloudwatch_search_terms(alarm_name):
"""Guess some search terms that might be useful in CloudWatch."""
if alarm_name == "loris-alb-target-500-errors":
return ['"HTTP/1.0 500"']
if alarm_name.startswith("lambda"):
return ["Traceback", "Task timed out after"]
if alarm_name.startswith("catalogue-api") and alarm_name.endswith("-5xx-alarm"):
return ['"HTTP 500"']
return []
|
def format_with_braces(*vars: str):
"""This is a demo exercise of using format() with positional {} in order to insert variables into the string.
Examples:
>>> format_with_braces('hi','there','fella')\n
hi there fella\n
['hi', 'there', 'fella']\n
'blah hi blah there blah fella'
>>> format_with_braces('hi')\n
hi\n
['hi']\n
'blah hi blah default2 blah default3'
>>> format_with_braces('hi','there','fella','David')\n
hi there fella David\n
['hi', 'there', 'fella', 'David']\n
'blah hi blah there blah fella'
"""
print(*vars)
list_of_vars = [*vars]
print([*vars])
if len(list_of_vars) < 3:
while len(list_of_vars) < 3:
number = len(list_of_vars)
list_of_vars.append("default" + str(number + 1))
return "blah {} blah {} blah {}".format(*list_of_vars)
|
def normalize_stats(stats):
"""Normalize frequency statistics."""
return dict((chr(k) if k < 256 else k, v if v else 1) for k, v in stats.items())
|
def table2dict(data):
""" lupa._lupa._LuaTable to dict """
if str(type(data)) != "<class 'lupa._lupa._LuaTable'>":
return data
for k, v in data.items():
if str(type(v)) != "<class 'lupa._lupa._LuaTable'>":
continue
if 1 in list(v.keys()):
if k == "ingredients":
data[k] = {y[1]: y[2] for x, y in v.items()}
else:
data[k] = [table2dict(y) for x, y in v.items()]
else:
data[k] = table2dict(v)
return dict(data)
|
def find_max(dictIn, keyIn):
"""Get max value here."""
maxValue = max(dictIn[keyIn].values())
#print("max value is -", maxValue, "- among values ", dictIn[keyIn].values())
listMax = []
strTemp = ""
"""Add to the list of max pairs here."""
for k,v in dictIn[keyIn].items():
#print("v is ", v)
if v == maxValue:
"""Format according to spec."""
strTemp = "{}: {}".format(k, v)
listMax.append(strTemp)
return (listMax)
|
def numbersonly(line):
"""returns the lines containing only numbers. bad lines reported to stderr.
if any bad line is detected, exits with exitcode 2.
"""
if not line.isnumeric():
raise ValueError('{} is not a number'.format(line))
return line
|
def to_lowercase(words):
"""convert all chars to lowercase from tokenized word list"""
new_words = []
for word in words:
new_word = word.lower()
new_words.append(new_word)
return new_words
|
def getImageIndex(exportSettings, uri):
"""
Return the image index in the glTF array.
"""
if exportSettings['uri_data'] is None:
return -1
if uri in exportSettings['uri_data']['uri']:
return exportSettings['uri_data']['uri'].index(uri)
return -1
|
def _cached_call(cache, estimator, method, *args, **kwargs):
"""Call estimator with method and args and kwargs.
This code is private in scikit-learn 0.24, so it is copied here.
"""
if cache is None:
return getattr(estimator, method)(*args, **kwargs)
try:
return cache[method]
except KeyError:
result = getattr(estimator, method)(*args, **kwargs)
cache[method] = result
return result
|
def filter_url(pkg_type, url):
"""
Returns URL of specified file type
'source', 'egg', or 'all'
"""
bad_stuff = ["?modtime", "#md5="]
for junk in bad_stuff:
if junk in url:
url = url.split(junk)[0]
break
#pkg_spec==dev (svn)
if url.endswith("-dev"):
url = url.split("#egg=")[0]
if pkg_type == "all":
return url
elif pkg_type == "source":
valid_source_types = [".tgz", ".tar.gz", ".zip", ".tbz2", ".tar.bz2"]
for extension in valid_source_types:
if url.lower().endswith(extension):
return url
elif pkg_type == "egg":
if url.lower().endswith(".egg"):
return url
|
def distance(setmap, p1, p2):
"""
Compute distance between two platforms
"""
total = 0
for (pset, count) in setmap.items():
if (p1 in pset) or (p2 in pset):
total += count
d = 0
for (pset, count) in setmap.items():
if (p1 in pset) ^ (p2 in pset):
d += count / float(total)
return d
|
def blockshape_dict_to_tuple(old_chunks, d):
"""
>>> blockshape_dict_to_tuple(((4, 4), (5, 5)), {1: 3})
((4, 4), (3, 3, 3, 1))
"""
shape = tuple(map(sum, old_chunks))
new_chunks = list(old_chunks)
for k, v in d.items():
div = shape[k] // v
mod = shape[k] % v
new_chunks[k] = (v,) * div + ((mod,) if mod else ())
return tuple(new_chunks)
|
def max_continuous(l:list):
"""Counts continuous in sorted list"""
max_count = 1
count = 1
for i in range(1, len(l)):
if l[i] == l[i-1] + 1:
count += 1
else:
max_count = max(count, max_count)
count = 1
return max_count
|
def find_fired_conditions(conditions, guard=None, *args, **kwargs):
"""
For an iterable (e.g. list) of boolean functions, find a list of
functions returning ``True``.
If ``guard`` is given, it is applied to a function to get the
predicate - a function ``() -> bool``. If this predicate is
not ``None``, it is checked and the condition is then evaluated
if and only if the predicate returns ``True``. If ``guard`` is
not provided, or the predicate is ``None``, condition is tested
without additional check. Normally the predicate should be a
very short function allowing to test whether a complex condition
need to be evaluated.
Args:
conditions: an iterable of boolean functions
guard: a ``(condition) -> predicate`` function, where
``predicate`` is ``() -> bool``.
*args: positional arguments passed to each condition
**kwargs: keyword arguments passed to each condition
Returns: a list of conditions evaluated to ``True``
"""
fired = []
if guard is not None:
for condition in conditions:
g = guard(condition)
if g is not None and g():
if condition(*args, **kwargs):
fired.append(condition)
else:
if condition(*args, **kwargs):
fired.append(condition)
return fired
|
def hyphen_to_underscore_string(string):
"""
Convert hyphens to underscores in the given string
"""
return string.replace('-', '_')
|
def prepare_full_vrt_dicts(vrt_file_dict, vrt_dt_dict):
"""
takes vrt file and datetime file dicts and combines
into one final dict
"""
# imports
from collections import Counter
# notify
print('Combining vrt files and datetimes per band.')
# checks
# get list of band names in dict
file_bands = [band for band in vrt_file_dict]
dt_bands = [band for band in vrt_dt_dict]
# check if same bands lists identical
if Counter(file_bands) != Counter(dt_bands):
raise ValueError('VRT and datetime band names not identical.')
# iter vrt file dict and create as we go
vrt_dict = {}
for band in file_bands:
vrt_dict[band] = {
'vrt_datetimes': vrt_dt_dict[band],
'vrt_file': vrt_file_dict[band]}
# notify and return
print('Combined vrt files and datetimes per band successfully.')
return vrt_dict
|
def sequential_block(input, *layers):
"""helper function for sequential layers"""
for layer in layers:
layer_func, layer_conf = layer
input = layer_func(input, **layer_conf)
return input
|
def resources_vs_time(upgrade_cost_increment, num_upgrade):
"""
Build function that performs unit upgrades with specified cost increments
"""
current_time = 0
time_increment = 0
total_resources_generated = 0
current_generation_rate = 1
upgrade_cost = 1
list_pairs = []
for dummy_i in range(num_upgrade):
# Compute how long till the next upgrade is possible based on the current cost of an upgrade and the current resource generation rate
time_increment = upgrade_cost / current_generation_rate
current_time += time_increment
# add generated resources to total
total_resources_generated += time_increment* current_generation_rate
# Simulate purchasing the upgrade by increasing the resource generation rate appropriately and incrementing the current cost of an upgrade
current_generation_rate += 1
upgrade_cost += upgrade_cost_increment
list_pairs.append([current_time, total_resources_generated])
return list_pairs
|
def merge_sequentials_containing_ints(*args):
"""takes a bunch of sequential data (of the same type and structured as
<key,value>) and merges them together by adding (not concatenating) their
values. data should be structured homogeneously.
WARNING: duplicate items within an argument are also merged. strings not
allowed."""
if not len(args):
return ()
if isinstance(args[0], str):
raise ValueError("Strings are't allowed.")
res = {}
for arg in args:
for key, val in arg:
res[key] = val + res.get(key, 0)
return tuple(res.items())
|
def get_obj_attr(obj, attr):
"""
Allows us to access property by variable value inside our templates.
Example: `data={'monday': True}`, `day='monday'`, then we can do: `{{ data|get_obj_attr:day }}`
Parameters
----------
obj
attr
Returns
-------
Attribute of obj
"""
return getattr(obj, attr)
|
def condition_attribute_exists(attribute_name: str) -> dict:
"""You're probably better off with the more composable add_condition_attribute_exists"""
return dict(ConditionExpression=f"attribute_exists({attribute_name})")
|
def is_procedure(vba_object):
"""
Check if a VBA object is a procedure, e.g. a Sub or a Function.
This is implemented by checking if the object has a statements
attribute
:param vba_object: VBA_Object to be checked
:return: True if vba_object is a procedure, False otherwise
"""
if hasattr(vba_object, 'statements'):
return True
else:
return False
|
def preprocess_files_fast(infiles, tmpd) :
"""_Do not_ run the sound preprocessing stage.
Returns: outfile (== infiles)
"""
outfiles = infiles.copy()
return outfiles
|
def get_inst(cmd):
"""Function: get_inst
Description: Returns the module instance header.
Arguments:
(input) cmd -> Module library.
(output) -> Return module instance.
"""
sub = cmd
return sub
|
def _jq_format(code):
"""
DEPRECATED - Use re.escape() instead, which performs the intended action.
Use before throwing raw code such as 'div[tab="advanced"]' into jQuery.
Selectors with quotes inside of quotes would otherwise break jQuery.
This is similar to "json.dumps(value)", but with one less layer of quotes.
"""
code = code.replace('\\', '\\\\').replace('\t', '\\t').replace('\n', '\\n')
code = code.replace('\"', '\\\"').replace('\'', '\\\'')
code = code.replace('\v', '\\v').replace('\a', '\\a').replace('\f', '\\f')
code = code.replace('\b', '\\b').replace(r'\u', '\\u').replace('\r', '\\r')
return code
|
def get_knot_hash( input_str ):
"""
Calculates a knot hash based on arbitrary input string
"""
suffix = [ 17, 31, 73, 47, 23 ]
lengths = [ ord( x ) for x in input_str ] + suffix
data = [ x for x in range( 256 ) ]
pos = 0
skip = 0
for round_num in range( 64 ):
for length in lengths:
data2 = data + data
span = data2[ pos:pos+length ]
span.reverse( )
# replace this span in data
rpos = pos + 0
for val in span:
data[ rpos ] = val
rpos += 1
if rpos == len( data ):
rpos = 0
pos += length + skip
pos = pos % len( data )
skip += 1
dense_hash = [ ]
for i in range( 16 ):
start = i * 16
end = start + 16
xor = 0
for num in data[ start:end ]:
xor = xor ^ num
dense_hash.append( xor )
hex_hash = ''
for num in dense_hash:
hex_str = hex( num ).split( 'x' )[ 1 ]
hex_str = '0' + hex_str
hex_str = hex_str[ -2: ]
hex_hash += hex_str
return hex_hash
|
def num_in_box(board, row, col, num):
"""
row and col should be the top-left coordinates of the box
True if num is already in the 3x3 box, False otherwise
"""
return num in [board[row+i][col+j] for i in range(3) for j in range(3)]
|
def getDocUID(exp_uid,alg_uid=None):
"""
Each instance of an app (with an (app_id,exp_uid) pair) and an algorithm (with an (app_id,exp_uid,alg_id,alg_uid) tuple)
gets its own namespace. This method defines that namespace given the exp_uid, or (exp_uid,alg_uid)
Usage::\n
print utils.getDocUID(exp_uid)
>>> 'eee9d58c61d580029113ba593446d23a'
print utils.getDocUID(exp_uid,alg_uid)
>>> 'eee9d58c61d580029113ba593446d23a-f081d374abac6c009f5a74877f8b9f3c'
"""
if alg_uid==None:
return exp_uid
else:
return exp_uid + "-" + alg_uid
|
def filename(prefix, t, i):
""" Filename of a file with prefix and counter
"""
if (t == 0):
a = "-x509"
else:
a = "-precert"
return prefix + str(i) + a + ".der"
|
def gen_all_strings(word):
"""
Generate all strings that can be composed from the letters in word
in any order.
Returns a list of all strings that can be formed from the letters
in word.
This function should be recursive.
"""
if len(word) == 0:
return [""]
# step 1
first = word[0]
rest = word[1:]
# step 2
rest_strings = gen_all_strings(rest)
# step 3
strings = list(rest_strings)
for string in strings:
for position in range(len(string)):
new_string = string[:position] + first + string[position:]
rest_strings.append(new_string)
rest_strings.append(string + first)
return rest_strings
|
def get_display_name(record):
"""Get the display name for a record.
Args:
record
A record returned by AWS.
Returns:
A display name for the bucket.
"""
return record["Key"]
|
def get_conv_scale(convs):
"""
Determines the downscaling performed by a sequence of convolutional and pooling layers
"""
scale = 1.
for c in convs:
stride = getattr(c, 'stride', 1.)
scale /= stride if isinstance(stride, (int, float)) else stride[0]
return scale
|
def plusnone(a, b):
"""
Add a and b, returning None if either of them is None
:param a: The first summand
:param b: The second summand
:return: The sum
"""
if (a is None) or (b is None):
return None
return a + b
|
def has_c19_scope (scopes):
""" Check if the COVID-19 GLIDE number or HRP code is present """
for scope in scopes:
if scope.type == "1" and scope.vocabulary == "1-2" and scope.code.upper() == "EP-2020-000012-001":
return True
elif scope.type == "2" and scope.vocabulary == "2-1" and scope.code.upper() == "HCOVD20":
return True
return False
|
def count(grid, c):
"""
Count the occurrences
of an object "c" in
the 2D list "grid".
"""
acc = 0
for row in grid:
for elem in row:
acc += c == elem
return acc
|
def create_supplemental_metadata(metadata_columns, supplemental_metadata):
"""Function to identify supplemental metadata store them"""
for metadata_column_list in metadata_columns:
for column in metadata_column_list:
supplemental_metadata.pop(column, None)
return supplemental_metadata
|
def parse_photo_link(photo_url):
"""
Extracts the base URL (URL without query parameters) and the photo name from a Onedrive photo URL
:param photo_url: photo URL
:return: base URL and photo name
"""
base_url = photo_url.split('?')[0]
name = base_url.split('/')[-1]
return base_url, name
|
def reverse_graph(graph):
"""
reverse directed graph
"""
# intialize
graph_rev = {}
for u in graph:
graph_rev[u] = []
# add reversed edge v -> u
for u in graph:
for v in graph[u]:
graph_rev[v].append(u)
return graph_rev
|
def _check_old_barcode(barcode: str) -> str:
"""Check old barcode format."""
if len(barcode) > 11:
return "Barcode exceeds max length"
if len(barcode) < 10:
return "Barcode does not reach min length"
for char in barcode:
if not char.isalnum():
return f"Barcode contains invalid character: '{char}'"
if barcode[:2] not in ("MA", "MB", "ME"):
return f"Barcode contains invalid header: '{barcode[:2]}'"
if not barcode[2:4].isnumeric():
return f"Barcode contains invalid year: '{barcode[2:4]}'"
if not barcode[4:7].isnumeric() or int(barcode[4:7]) < 1 or int(barcode[4:7]) > 366:
return f"Barcode contains invalid Julian date: '{barcode[4:7]}'"
if not barcode[7:].isnumeric():
return f"Barcode contains nom-numeric string after Julian date: '{barcode[7:]}'"
return ""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.