content
stringlengths 42
6.51k
|
---|
def remove_nullchars(block):
"""Strips NULL chars taking care of bytes alignment."""
data = block.lstrip(b'\00')
padding = b'\00' * ((len(block) - len(data)) % 8)
return padding + data
|
def isPalindrome(x):
"""
Take integer number and determine if palindrome.
:type x: int
:rtype: bool
"""
if x < 0:
return False
# convert to string
x = str(x)
if x[-1] == '0' and len(x) > 1:
return False
if x == x[::-1]:
return True
return False
|
def read_file(file_path):
""" There are problems with pd.read_csv() for the retail.csv that's why this implementation.
:param file_path: path to file
:return: List of transactions with items
"""
dataset = []
with open(file_path, 'r') as f:
transactions = f.readlines()
for transaction in transactions:
items = transaction.split('\n')[0]
items = items.split(' ')
dataset.append(items)
f.close()
return dataset
|
def b2u(string):
""" bytes to unicode """
if isinstance(string, bytes):
return string.decode('utf-8')
return string
|
def flatten_dictionary(nested_dict, separator):
"""Flattens a nested dictionary.
New keys are concatenations of nested keys with the `separator` in between.
"""
flat_dict = {}
for key, val in nested_dict.items():
if isinstance(val, dict):
new_flat_dict = flatten_dictionary(val, separator)
for flat_key, inval in new_flat_dict.items():
new_key = key + separator + flat_key
flat_dict[new_key] = inval
else:
flat_dict[key] = val
return flat_dict
|
def select_params(params):
""" Grabs just the necessary keys from params """
return {
'Bucket': params['Bucket'],
'NotificationConfiguration': params['NotificationConfiguration']
}
|
def curl_field_vector(x, y, z, px, py, pz):
"""calculate a 3D velocity field using vector curl noise"""
eps = 1.0e-4
offset = 100.0
def deriv(a1, a2):
return (a1 - a2) / (2.0 * eps)
# x_dx = deriv(px(x + eps, y, z), px(x - eps, y, z))
x_dy = deriv(px(x, y + eps, z), px(x, y - eps, z))
x_dz = deriv(px(x, y, z + eps), px(x, y, z - eps))
y_dx = deriv(py(offset + x + eps, y, z), py(offset + x - eps, y, z))
# y_dy = deriv(py(offset + x, y + eps, z), py(offset + x, y - eps, z))
y_dz = deriv(py(offset + x, y, z + eps), py(offset + x, y, z - eps))
z_dx = deriv(pz(x + eps, offset + y, z), pz(x - eps, offset + y, z))
z_dy = deriv(pz(x, offset + y + eps, z), pz(x, offset + y - eps, z))
# z_dz = deriv(pz(x, offset + y, z + eps), pz(x, offset + y, z - eps))
return z_dy - y_dz, x_dz - z_dx, y_dx - x_dy
|
def hashable_key(key):
"""
Convert the key from couch into something hasable.
Mostly, just need to make it a tuple and remove the special
{} value.
"""
return tuple('{}' if item == {} else item for item in key)
|
def MathMLExtraction(s):
"""
Takes a MathML expression as string, returns the last mn or mo value.
To-do: @@ this is a very rough implementation based on string operations. Should have parsed the mathML
:param s: mathML string with mo or mn components
:return: the value of the last mn or mo element
"""
if(s.find('</mn></math>') != -1):
length = len(s.split('</mn></math>')[0].rsplit('<mn>',1))
return s.split('</mn></math>')[0].rsplit('<mn>', 1)[length - 1]
elif(s.find('</mo></math>') != -1):
length = len(s.split('</mo></math>')[0].rsplit('<mo>',1))
return s.split('</mo></math>')[0].rsplit('<mo>', 1)[length - 1]
|
def links(links_title, links=None, **kwargs):
"""
A section of the list. Please see 'link'.
:param links_title: A title of the section.
:param links: list of the links
:param kwargs: additional links defined is simplified form:
links(..., home="Go home", index="Main page")
"""
l = links or []
for url, title in kwargs.items():
l.append({"title": title, "url": url})
return {
"class": "links",
"title": links_title,
"links": l
}
|
def test_vertically(board: list) -> bool:
"""
test vertically
"""
i = 0
while i < 9:
col = []
for row in board:
col.append(row[i])
if sorted(col) != [1, 2, 3, 4, 5, 6, 7, 8, 9]:
return False
i += 1
return True
|
def remove_trailings(input_string):
"""
Remove duplicated spaces.
"""
output_string = " ".join(input_string.split())
return output_string
|
def generatePercentage (frequency, base_parameter):
"""Function that generate a frequency percentage and includes one global variable that determines
the proportionate of each frequency."""
# for each item of the function input list that is the incedent of a particular result, turn
# into a percentage of the total
for n in range(len(frequency)):
frequency[n] = "{:.2f}".format(((frequency[n]) / base_parameter * 100))
return frequency
|
def dict_from_list(keys, values):
"""
Generate a dictionary from a list of keys and values.
:param keys: A list of keys.
:param values: A list of values.
:returns: dict
Example::
>>> bpy.dict_from_list(['a', 'b', 'c'], [1, 2, 3])
{'a': 1, 'b': 2, 'c': 3}
"""
return dict(zip(keys, values))
|
def parse_requirements(filename):
""" load requirements from a pip requirements file """
lines = (line.strip() for line in open(filename))
return [line for line in lines if line and not line.startswith("#")]
|
def number_filter(value, fmt="{:,.0f}"):
"""
Format numbers, default format is integer with commas
"""
return fmt.format(float(value))
|
def song_has_info(db_session, song_id):
"""Check to see if a portion of the table is empty."""
try:
c = db_session.cursor()
c.execute("""SELECT 1 FROM %s WHERE song_id = ? LIMIT 1""" % 'songs', [song_id])
result = c.fetchone()
#db_session.commit()
c.close()
if result is None:
return False # not empty
except:
return False
return True
|
def portmerge(current, new):
"""Merge ports
Parameters
----------
current : str or List
Current set of ports
Returns:
str or List
The list will be a sorted unique set of ports
"""
if isinstance(current, str):
return current
if isinstance(new, str):
return new
return sorted(list(set(current + new)))
|
def load_json_file(infile):
"""Read file. Strip out lines that begin with '//'."""
lines = []
for line in infile:
if not line.lstrip().startswith('//'):
lines.append(line)
content = ''.join(lines)
return content
|
def shorten(text, length, placeholder):
"""
Truncate *text* to a maximum *length* (if necessary), indicating truncation
with the given *placeholder*.
The maximum *length* must be longer than the length of the *placeholder*.
Behaviour is slightly different than :py:func:`textwrap.shorten` which is
intended for shortening sentences and works at the word, not character,
level.
>>> shorten("foobar", 6, "...")
'foobar'
>>> shorten("foobarbaz", 6, "...")
'foo...'
>>> shorten("foobar", 3, "...")
Traceback (most recent call last):
...
ValueError: maximum length (3) must be greater than length of placeholder (3)
"""
if length <= len(placeholder):
raise ValueError(f"maximum length ({length}) must be greater than length of placeholder ({len(placeholder)})")
if len(text) > length:
return text[0:length - len(placeholder)] + placeholder
else:
return text
|
def find_data_path(bone_name_list, data_path):
"""find bone_name from from data_path"""
for x in bone_name_list:
if data_path == 'pose.bones["' + x + '"].location':
return True, x
return False, ""
|
def flip_bit(bit):
"""
:param bit: '0' or '1'
:return: '1' or '0', respectively
"""
return str(1 - int(bit))
|
def try_decode(obj: bytes, encoding="utf-8"):
"""
Try decode given bytes with encoding (default utf-8)
:return: Decoded bytes to string if succeeded, else object itself
"""
try:
rc = obj.decode(encoding=encoding)
except AttributeError:
rc = obj
return rc.strip()
|
def get_is_head_of_word(naive_tokens, sequence_tokens):
"""
Return a list of flags whether the token is head(prefix) of naively split tokens
ex) naive_tokens: ["hello.", "how", "are", "you?"]
sequence_tokens: ["hello", ".", "how", "are", "you", "?"]
=> [1, 0, 1, 1, 1, 0]
* Args:
naive_tokens: a list of tokens, naively split by whitespace
sequence_tokens: a list of tokens, split by 'word_tokenizer'
* Returns:
is_head_of_word: a list with its length the same as that of 'sequence_tokens'.
has 1 if the tokenized word at the position is head(prefix) of a `naive_token`
and 0 if otherwise.
"""
is_head_of_word = []
for naive_token in naive_tokens:
consumed_chars = 0
consumed_words = 0
for sequence_token in sequence_tokens:
if naive_token[consumed_chars:].startswith(sequence_token):
is_head_of_word.append(0 if consumed_chars else 1)
consumed_chars += len(sequence_token)
consumed_words += 1
else:
break
sequence_tokens = sequence_tokens[consumed_words:]
return is_head_of_word
|
def castable_to_int(s):
"""
Return True if the string `s` can be interpreted as an integer
"""
try:
int(s)
except ValueError:
return False
else:
return True
|
def offset_for_stacking(items, offset):
""" Remove offset items from the end and copy out items from the start
of the list to offset to the original length. """
if offset < 1:
return items
return [items[0] for _ in range(offset)] + items[:-offset]
|
def get_thresholds(col,act_threshold=0.995,attr_threshold=0.99):
"""
Defines a custom threshold function
"""
if col =='activity':
return act_threshold
else:
return attr_threshold
|
def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
# some default logic for simple actions
return {
'test': ['install'],
'test2': ['install'],
'test3': ['test2']
}
|
def get_rate_units(units, time_units, deriv=1):
"""
Return a string for rate units given units for the variable and time units.
Parameters
----------
units : str
Units of a given variable.
time_units : str
Time units.
deriv : int
If 1, provide the units of the first derivative. If 2,
provide the units of the second derivative.
Returns
-------
str
Corresponding rate units for the given variable.
"""
if deriv not in (1, 2):
raise ValueError('deriv argument must be 1 or 2.')
tu = time_units if deriv == 1 else '{0}**2'.format(time_units)
if units is not None and time_units is not None:
rate_units = '{0}/{1}'.format(units, tu)
elif units is not None:
rate_units = units
elif time_units is not None:
rate_units = '1.0/{0}'.format(tu)
else:
rate_units = None
return rate_units
|
def add_column(content, col_index, col_info=[]):
"""
From the position of the cursor add a column
Arguments:
- the table content, a list of list of strings:
- First dimension: the columns
- Second dimensions: the column's content
- cursor index for col
- optional information to append from command line
Returns:
- the table content, a list of list of strings:
- First dimension: the columns
- Second dimensions: the column's content
"""
col_size = len(content[0])
content.insert(col_index, [""] * col_size)
return content
|
def compare_rrs(expected, got):
""" Compare lists of RR sets, throw exception if different. """
for rr in expected:
if rr not in got:
raise Exception("expected record '%s'" % rr.to_text())
for rr in got:
if rr not in expected:
raise Exception("unexpected record '%s'" % rr.to_text())
if len(expected) != len(got):
raise Exception("expected %s records but got %s records "
"(a duplicate RR somewhere?)"
% (len(expected), len(got)))
return True
|
def tableau_string(text):
"""Transforms to a string representation in Tableau
"""
value = repr(text)
if isinstance(text, str):
return value[1:]
return value
|
def variance_covariance(data, population_variance=False):
"""
Returns the Variance-Covariance matrix for ``data``.
From: http://www.python-forum.org/pythonforum/viewtopic.php?f=3&t=17441
"""
N = len(data) # number of vectors
D = len(data[0]) # dimensions per vector
if population_variance:
denom = N
else:
denom = N-1.0
means = [0.0 for i in range(D)] # intialize 1xD mean vector
for i in range(N):
for j in range(D):
means[j] += data[i][j]
means = [i/N for i in means]
# print "Means:"," ".join(map(str,means)),"\n"
covar = [[0.0 for i in range(D)] for j in range(D)] # initialize DxD covariance matrix
for i in range(D):
for j in range(i+1): # covariance symmetric, only do lower triangle of matrix
sum = 0.0
for k in range(N):
sum += data[k][i]*data[k][j]
covar[i][j] = sum/denom - means[i]*means[j]*N/denom
for j in range(D):
for k in range(j+1):
covar[k][j] = covar[j][k]
# print "covariance:"
# for i in range(D):
# print " ".join(map(str,covar[i]))
# print ""
return covar
|
def human_friendly_temp(my_temperature_f):
"""Rounds a decimal fahrenheit temperature to the nearest whole degree, adds degree symbol"""
degree_sign = u"\N{DEGREE SIGN}"
return f"{round(my_temperature_f)} {degree_sign}F"
|
def _transform_column(raw, type_overrides):
"""helper function to facilitate converting specs to SQL
"""
output = {}
output['name'] = raw['field']
output['type'] = type_overrides.get(raw['type'], raw['type'])
return output
|
def calc_spiral_size(number):
"""
Tries to calculate the maximum size of the spiral.
"""
size = 1
found = False
while not found:
if (size * size) < number:
size += 1
else:
found = True
return size
|
def getQStr(dateStr) :
"""
Converts a date in YYYYMMDD format to YYYY/QTRn/
where n is the quarter number from 1 to 4."
"""
return dateStr[:4] + '/QTR' + str((int(dateStr[4:6])+2) // 3) + '/'
|
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
|
def GetLapicVersionFields(reg_val):
""" Helper function for DoLapicDump that prints the fields of the
version register.
Params:
reg_val: int - the value of the version register to print
Returns:
string showing the fields
"""
lvt_num = (reg_val >> 16) + 1
version = reg_val & 0xff
return "[VERSION={:d} MaxLVT={:d}]".format(lvt_num, version)
|
def get_head(phrase):
"""Retrieve a phrase head."""
src, tgt, rela = phrase
if type(tgt) == int:
return tgt
else:
return get_head(tgt)
|
def teorijska_hrapavost(posmak, radijus_alata):
"""
"""
return posmak**2./8./radijus_alata
|
def cmp(x, y): # from: https://portingguide.readthedocs.io/en/latest/comparisons.html
"""
Replacement for built-in function cmp that was removed in Python 3
Compare the two objects x and y and return an integer according to
the outcome. The return value is negative if x < y, zero if x == y
and strictly positive if x > y.
"""
return (x > y) - (x < y)
|
def round_blend_step(step):
"""
get the blendshape weight value
:param step:
:return:
"""
return int(step * 1000) / 1000.0
|
def get_supported_os(scheduler):
"""
Return a tuple of the os supported by parallelcluster for the specific scheduler.
:param scheduler: the scheduler for which we want to know the supported os
:return: a tuple of strings of the supported os
"""
return "alinux" if scheduler == "awsbatch" else "alinux", "centos6", "centos7", "ubuntu1604", "ubuntu1804"
|
def getStatus(sig, det_status):
"""
Determine a signal's detection status based on the status
of its detectors
Parameters
----------
sig : dict | (required)
A signal record dict generated from a Knack.View instance
det_status : dict | (required)
A lookup dictionary generated from method groupBySignal()
Returns
-------
value : string
A detection status string of BROKEN, UNKNOWN, NO DETECTIO, OK
"""
sig_id = "${}".format(sig["SIGNAL_ID"])
if sig_id in det_status:
# any broken detector, status is BROKEN
if "BROKEN" in det_status[sig_id]["statuses"]:
return "BROKEN"
# detection must be OK
return "OK"
else:
# no detectors at signal
return "NO DETECTION"
|
def xgcd(a, b):
"""
Extended Euclid GCD algorithm.
Return (x, y, g) : a * x + b * y = gcd(a, b) = g.
"""
if a == 0:
return 0, 1, b
if b == 0:
return 1, 0, a
px, ppx = 0, 1
py, ppy = 1, 0
while b:
q = a // b
a, b = b, a % b
x = ppx - q * px
y = ppy - q * py
ppx, px = px, x
ppy, py = py, y
return ppx, ppy, a
|
def cumulativeMass(listOfPlanets):
"""
Calculates the total mass of all given masses
@param listOfMasses: A list that contains all the different masses
@return totalMass:
>>> lop = list()
>>> lop.append(Planet(1, 500, 0, numpy.array([5, 7, 3]), 0))
>>> lop.append(Planet(2, 600, 0, numpy.array([1, 3, 5]), 0))
>>> lop.append(Planet(3, 900, 0, numpy.array([7, 8, 9]), numpy.array([1, 2, 3])))
>>> cumulativeMass(lop)
2000
"""
totalMass = 0
for planet in listOfPlanets:
totalMass += planet.mass
return totalMass
|
def exact_matches(data_1, data_2, match_fields):
""" Identifies exact and non-exact matches between data sets. """
nonexact_1 = {}
nonexact_2 = {}
exact_pairs = []
redundant = {}
for key, record in data_1.items():
record_hash = hash(tuple(record[f] for f in match_fields))
redundant[record_hash] = key
for key_2, record in data_2.items():
record_hash = hash(tuple(record[f] for f in match_fields))
if record_hash in redundant:
key_1 = redundant[record_hash]
exact_pairs.append(((key_1, key_2), 1.0))
del redundant[record_hash]
else:
nonexact_2[key_2] = record
for key_1 in redundant.values():
nonexact_1[key_1] = data_1[key_1]
return nonexact_1, nonexact_2, exact_pairs
|
def flatten(ls):
"""flatten a list one level
>>> flatten([[1,2],[3,4],[5,6]])
[1, 2, 3, 4, 5, 6]
"""
return [v for sub in ls for v in sub]
|
def normaliseFrequency(f, timespan):
"""Convert a frequency in the units of the data into normalised frequency.
Useful for computing input arguments in {high|low}Pass()
Inputs:
f (float) Frequency in, e.g, Hz
timespace (float) Range of data in , eg, seconds. This is the time
interval between the last and first data points in your
set.
"""
return f/float(timespan)
|
def merge_two_lists(t1, t2):
"""
Merge two tuples of lists
Args:
t1: the tuple of the first lists
t2: the tuple of the second lists
Returns:
a new tuple of the merged lists
"""
a1, b1 = t1
a2, b2 = t2
return a1 + a2, b1 + b2
|
def convolution_math(in_features, filter_size, out_features):
"""
Convolution math: Implement how parameters scale as a function of feature maps
and filter size in convolution vs depthwise separable convolution.
Args:
in_features: number of input features
filter_size: size of the filter
out_features: number of output features
"""
# calculate the number of parameters for regular convolution
conv_parameters = in_features * filter_size * filter_size * out_features
# calculate the number of parameters for depthwise separable convolution
depthwise_conv_parameters = in_features * filter_size * filter_size + in_features * out_features
print('Depthwise separable: {} parameters'.format(depthwise_conv_parameters))
print('Regular convolution: {} parameters'.format(conv_parameters))
return None
|
def get_sec(time_str):
"""Get Seconds from time."""
h, m, s = time_str.split(':')
return int(h) * 3600 + int(m) * 60 + float(s)
|
def _normalize_activity_share_format(share):
"""
Convert the input share format to the internally format expected by FTS3
{"A": 1, "B": 2} => [{"A": 1}, {"B": 2}]
[{"A": 1}, {"B": 2}] => [{"A": 1}, {"B": 2}]
"""
if isinstance(share, list):
return share
new_share = list()
for key, value in share.iteritems():
new_share.append({key: value})
return new_share
|
def is_empty_line(line: str) -> bool:
"""
Tell if a line is empty.
Arguments:
line: The line to check.
Returns:
True if the line is empty or composed of blanks only, False otherwise.
"""
return not line.strip()
|
def validate_input_command(input_val):
"""Validates that the Command line includes
numbers"""
val = [input_val[0]]
if input_val[0].upper() == 'B':
for i in input_val[1:-1]:
try:
val.append(int(i))
except ValueError:
print('Parameters must be numeric')
return []
val.append(input_val[-1])
else:
for i in input_val[1:]:
try:
val.append(int(i))
except ValueError:
print('Parameters must be numeric')
return []
return val
|
def get_job_def(env_filename, args, sig_owners):
"""Returns the job definition given the env_filename and the args."""
return {
'scenario': 'kubernetes_e2e',
'args': ['--env-file=%s' % env_filename] + args,
'sigOwners': sig_owners or ['UNNOWN'],
# Indicates that this job definition is auto-generated.
'tags': ['generated'],
'_comment': 'AUTO-GENERATED - DO NOT EDIT.'
}
|
def create_tweet_history_msg(tweet_content: str) -> str:
"""
Create history message based on tweet content.
Arguments:
tweet_content (str): Content string of the tweet.
Returns:
str: Single line log message for the tweet. Log messages should be
single line for readability and extraction. Therefore, newline
characters are replaced with spaces.
"""
return tweet_content.replace("\n", " ")
|
def choose_index(keys, df):
"""
Chooses key from a list of keys. Order of priority:
1) shortest length
2) has "id" in some form in name of an attribute
3) has attribute furthest to the left in table
Arguments:
keys (list[set[str]]) : list of keys to choose from
df (pd.DataFrame) : pandas dataframe keys are for
Returns:
index (list[str]) : chosen key
"""
sort_key = sorted(keys, key=len)
m = len(sort_key[0])
options = [key for key in sort_key if len(key) == m]
for key in options:
for attr in key:
if "_id" in attr.lower() or " id" in attr.lower() or "id _" in attr.lower() or "id " in attr.lower():
return list(key)
if df is None:
return list(options[0])
for col in df.columns:
includes = [option for option in options if col in option]
if len(includes) == 1:
return list(includes[0])
if len(includes) > 1:
options = includes
return list(options[0])
|
def _get_pipeline_configs_for_project(project_id, data):
"""
Given a project id, return a list of associated pipeline configurations.
Based on the Shotgun cache data, generates a list of project root locations.
These are then compared (case insensitively) against the given path and
if it is determined that the input path belongs to any of these project
roots, the list of pipeline configuration objects for that root is returned.
the return data structure is a list of dicts, each dict containing the
following fields:
- id
- code
- windows_path
- linux_path
- mac_path
- project
- project.Project.tank_name
:param project_id: Project id to look for
:param data: Cache data chunk, obtained using _get_pipeline_configs()
:returns: list of pipeline configurations matching the path, [] if no match.
"""
matching_pipeline_configs = []
for pc in data["pipeline_configurations"]:
# note the null check - in the future, the site configs will
# have null values for project.
if pc["project"] and pc["project"]["id"] == project_id:
matching_pipeline_configs.append(pc)
return matching_pipeline_configs
|
def calc(x, y, serial):
"""
Find the fuel cell's rack ID, which is its X coordinate plus 10.
Begin with a power level of the rack ID times the Y coordinate.
Increase the power level by the value of the grid serial number (your puzzle
input).
Set the power level to itself multiplied by the rack ID.
Keep only the hundreds digit of the power level (so 12345 becomes 3; numbers
with no hundreds digit become 0).
Subtract 5 from the power level.
"""
rackId = x + 10
rackLvl = rackId * y
temp = (rackLvl + serial) * rackId
temp = temp // 100 - (temp // 1000 * 10)
return temp - 5
|
def get_detection_rate(stats):
"""Get detection rate as string."""
return f'{stats["malicious"]}/{sum(stats.values())}'
|
def trunc(x, y, w, h):
"""Truncates x and y coordinates to live in the (0, 0) to (w, h)
Args:
x: the x-coordinate of a point
y: the y-coordinate of a point
w: the width of the truncation box
h: the height of the truncation box.
"""
return min(max(x, 0), w - 1), min(max(y, 0), h - 1)
|
def triplets_with_sum(number):
"""
Get all triplets
"""
result = []
for _a in range(1, number - 1):
if _a ** 2 % (number - _a) == 0:
if (number - _a + (_a ** 2) // (number - _a)) % 2 == 0:
_c = (number - _a + (_a ** 2) // (number - _a)) // 2
_b = number - _a - _c
if _b > 0 and _c > 0 and _a < _b < _c:
result.append([_a, _b, _c])
return result
|
def adjusted_r2(ord_r2, sample_size, n_of_feat):
"""
This function calcualted the adjusted R-squared value from the ordinary
R-squared value reported from sklearn
"""
adj_r2 = 1-(1-ord_r2)*(sample_size-1)/(sample_size-n_of_feat-1)
return adj_r2
|
def sigmoid(x):
"""Numerically-stable sigmoid function."""
import math
if x >= 0:
z = math.exp(-x)
return 1 / (1 + z)
else:
z = math.exp(x)
return z / (1 + z)
|
def update_dict(old_data, new_data):
"""
Overwrites old usa_ids, descriptions, and abbreviation with data from
the USA contacts API
"""
old_data['usa_id'] = new_data.get('usa_id')
if new_data.get('description') and not old_data.get('description'):
old_data['description'] = new_data.get('description')
if new_data.get('abbreviation') and not old_data.get('abbreviation'):
old_data['abbreviation'] = new_data['abbreviation']
return old_data
|
def scramble_soft(bits, seq):
"""Convert between type 4 and type 5 soft bits"""
return bits ^ (seq * 0xFF)
|
def calc_elo(elo, true, expected):
"""
Calculates the increase/decrease in ELO,
with a K value of 32.
"""
return elo + 32 * (true - expected)
|
def summary_ranges(nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
res = []
if len(nums) == 1:
return [str(nums[0])]
i = 0
while i < len(nums):
num = nums[i]
while i+1 < len(nums) and nums[i+1] - nums[i] == 1:
i += 1
if nums[i] != num:
res.append(str(num) + "->" + str(nums[i]))
else:
res.append(str(num))
i += 1
return res
|
def Lambda(zhub, IECedition=3):
"""
IEC length scale. Lambda = 0.7*min(Zhat,zhub)
Where: Zhat = 30,60 for IECedition = 2,3, respectively.
"""
if IECedition <= 2:
return 0.7 * min(30, zhub)
return 0.7 * min(60, zhub)
|
def get_num_species(dim, q):
"""Return number of mixture species."""
return len(q) - (dim + 2)
|
def point_avg(points):
"""
Accepts a list of points, each with the same number of dimensions.
NB. points can have more dimensions than 2
Returns a new point which is the center of all the points.
"""
dimensions = len(points[0])
new_center = []
for dimension in range(dimensions):
dim_sum = 0 # dimension sum
for p in points:
dim_sum += p[dimension]
# average of each dimension
new_center.append(dim_sum / float(len(points)))
return new_center
|
def parse_image_url(url):
"""
Parses a url continaing the image id to return the image id.
"""
# Check if it was a url or actual image id
# eg: https://map.openaerialmap.org/#/.../59e62b9a3d6412ef7220a51f?_k=vy2p83
image_id = url.split("/")[-1].split("?")[0]
return image_id
|
def is_lambda_reduce(mod):
"""check for the presence of the LambdReduce block used by the
torch -> pytorch converter
"""
return 'LambdaReduce' in mod.__repr__().split('\n')[0]
|
def parse_vref_from_product(product):
"""
:param product: Product name (typically from satimg.parse_metadata_from_fn)
:type product: str
:return: vref_name: Vertical reference name
:rtype: vref_name: str
"""
# sources for defining vertical references:
# AW3D30: https://www.eorc.jaxa.jp/ALOS/en/aw3d30/aw3d30v11_format_e.pdf
# SRTMGL1: https://lpdaac.usgs.gov/documents/179/SRTM_User_Guide_V3.pdf
# SRTMv4.1: http://www.cgiar-csi.org/data/srtm-90m-digital-elevation-database-v4-1
# ASTGTM2/ASTGTM3: https://lpdaac.usgs.gov/documents/434/ASTGTM_User_Guide_V3.pdf
# NASADEM: https://lpdaac.usgs.gov/documents/592/NASADEM_User_Guide_V1.pdf !! HGTS is ellipsoid, HGT is EGM96 geoid !!
# ArcticDEM (mosaic and strips): https://www.pgc.umn.edu/data/arcticdem/
# REMA (mosaic and strips): https://www.pgc.umn.edu/data/rema/
# TanDEM-X 90m global: https://geoservice.dlr.de/web/dataguide/tdm90/
# COPERNICUS DEM: https://spacedata.copernicus.eu/web/cscda/dataset-details?articleId=394198
if product in ['ArcticDEM/REMA','TDM1','NASADEM-HGTS']:
vref_name = 'WGS84'
elif product in ['AW3D30','SRTMv4.1','SRTMGL1','ASTGTM2','NASADEM-HGT']:
vref_name = 'EGM96'
elif product in ['COPDEM']:
vref_name = 'EGM08'
else:
vref_name = None
return vref_name
|
def approx_second_derivative(f,x,h):
"""
Numerical differentiation by finite differences. Uses central point formula
to approximate second derivative of function.
Args:
f (function): function definition.
x (float): point where second derivative will be approximated
h (float): step size for central differences. Tipically less than 1
Returns:
ddf (float): approximation to second_derivative.
"""
ddf =(f(x+h) - 2.0*f(x) + f(x-h))/h**2
return ddf
|
def atexit_shutdown_grace_period(grace_period=-1.0):
"""Return and optionally set the default worker cache shutdown grace period.
This only affects the `atexit` behavior of the default context corresponding to
:func:`trio_parallel.run_sync`. Existing and future `WorkerContext` instances
are unaffected.
Args:
grace_period (float): The time in seconds to wait for workers to
exit before issuing SIGKILL/TerminateProcess and raising `BrokenWorkerError`.
Pass `math.inf` to wait forever. Pass a negative value or use the default
value to return the current value without modifying it.
Returns:
float: The current grace period in seconds.
.. note::
This function is subject to threading race conditions."""
global ATEXIT_SHUTDOWN_GRACE_PERIOD
if grace_period >= 0.0:
ATEXIT_SHUTDOWN_GRACE_PERIOD = grace_period
return ATEXIT_SHUTDOWN_GRACE_PERIOD
|
def _unescape_nl(text):
"""Convert escaped newline characters ``\\n`` back into newlines"""
return str(text).replace('\\n', '\n')
|
def get_filename(url):
"""
get_filename(string) -> string
extracts the filename from a given url
"""
pos = (url.rfind('/')+1)
return url[pos:]
|
def compute_f1_score(precision, recall):
"""
Computes the F1-score between the precision and recall
:param precision: float number
:param recall: float number
:return: The float corresponding to F1-score
"""
if precision == 0 and recall == 0:
return 0
else:
return 2 * precision * recall / (precision + recall)
|
def url(endpoint, path):
"""append the provided path to the endpoint to build an url"""
return f"{endpoint.rstrip('/')}/{path}"
|
def is_bool(s):
"""
Returns True if the given object has None type or False otherwise
:param s: object
:return: bool
"""
return isinstance(s, bool) or str(s).lower() in ['true', 'false']
|
def exc_msg_str(exception, default="") -> str:
"""
Extract the exception's message, or its str representation, or the default message, in order of
priority.
"""
try:
msg = exception.args[0]
except (AttributeError, IndexError):
msg = None
if not msg or not isinstance(msg, str):
msg = str(exception).strip()
return msg if msg else default
|
def batch_axis(input_shape):
"""Get the batch axis entry of an input shape.
Parameters
----------
input_shape : tuple
The data shape related to dataset.
Returns
-------
axis : int
The batch axis entry of an input shape.
"""
idx = [i for i, s in enumerate(input_shape) if s == -1]
assert len(idx) == 1
return idx[0]
|
def convert_atid_to_key(atid_name_list):
"""Removes last letter from all strings in list."""
key_name_list = [atid_name[:-1] for atid_name in atid_name_list]
return key_name_list
|
def except_or(f, exception, *default):
"""Catch Exception and Return Default."""
try:
return f()
except exception:
if default:
return default[0]
return
|
def vectorize(dictionary, words):
"""
Converts a list of words into a list of frequency position numbers.
Args:
dictionary(dict): Dictionary containing the words in the vocabulary together
with their frequency position.
words(list): List of words that are to be converted.
Returns:
A list of frequency position numbers in place of the actual words in the list.
"""
data = list()
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0
data.append(index)
return data
|
def files_have_same_point_format_id(las_files):
"""Returns true if all the files have the same points format id"""
point_format_found = {las.header.point_format_id for las in las_files}
return len(point_format_found) == 1
|
def single_number(integers):
"""
Naive version: Given a non-empty array of integers, every element appears
twice except for one. Find that single one.
Runtime: O(n), Space: O(n)
"""
seen = set()
for integer in integers:
if integer in seen:
seen.remove(integer)
else:
seen.add(integer)
return seen.pop()
|
def read_whole_file(filename: str) -> str:
"""
reads the whole file into a string, for example
>>> read_whole_file("README.md").split("\\n")[2]
'# Neural Semigroups'
:param filename: a name of the file to read
:returns: whole contents of the file
"""
with open(filename, "r", encoding="utf-8") as input_file:
text = input_file.read()
return text
|
def flatten(list_, ltypes=(list, tuple)):
"""Flatten lists of list into a list."""
ltype = type(list_)
list_ = list(list_)
i = 0
while i < len(list_):
while isinstance(list_[i], ltypes):
if not list_[i]:
list_.pop(i)
i -= 1
break
else:
list_[i:i + 1] = list_[i]
i += 1
return ltype(list_)
|
def _calculate_step_sizes(x_size, y_size, num_chunks):
""" Calculate the strides in x and y.
Notes
-----
Calculate the strides in x and y to achieve at least the
`num_chunks` pieces.
Direct copy from ccdproc:
https://github.com/astropy/ccdproc/blob/b9ec64dfb59aac1d9ca500ad172c4eb31ec305f8/ccdproc/combiner.py#L500
"""
# First we try to split only along fast x axis
xstep = max(1, int(x_size / num_chunks))
# More chunks are needed only if xstep gives us fewer chunks than
# requested.
x_chunks = int(x_size / xstep)
if x_chunks >= num_chunks:
ystep = y_size
else:
# The x and y loops are nested, so the number of chunks
# is multiplicative, not additive. Calculate the number
# of y chunks we need to get at num_chunks.
y_chunks = int(num_chunks / x_chunks) + 1
ystep = max(1, int(y_size / y_chunks))
return xstep, ystep
|
def check_required_hash(md5_hash):
"""Check if the Md5 hash satisfies the required conditions"""
# Check if MD5 hash starts with 5 zeroes
if md5_hash.find('00000') == 0:
return True
return False
|
def get_hidden_state(cell_state):
""" Get the hidden state needed in cell state which is
possibly returned by LSTMCell, GRUCell, RNNCell or MultiRNNCell.
Args:
cell_state: a structure of cell state
Returns:
hidden_state: A Tensor
"""
if type(cell_state) is tuple:
cell_state = cell_state[-1]
if hasattr(cell_state, "h"):
hidden_state = cell_state.h
else:
hidden_state = cell_state
return hidden_state
|
def _get_sentences_with_word_count(sentences, words):
""" Given a list of sentences, returns a list of sentences with a
total word count similar to the word count provided.
"""
word_count = 0
selected_sentences = []
# Loops until the word count is reached.
for sentence in sentences:
words_in_sentence = len(sentence.text.split())
# Checks if the inclusion of the sentence gives a better approximation
# to the word parameter.
if abs(words - word_count - words_in_sentence) > abs(words - word_count):
return selected_sentences
selected_sentences.append(sentence)
word_count += words_in_sentence
return selected_sentences
|
def demo(x, y) -> bool:
"""
Demo function which prints output to console
"""
product = x * y
if product < 10:
return product * 2
else:
return product
|
def kappa_no_prevalence_calc(overall_accuracy):
"""
Calculate Kappa no prevalence.
:param overall_accuracy: overall accuracy
:type overall_accuracy: float
:return: Kappa no prevalence as float
"""
try:
result = 2 * overall_accuracy - 1
return result
except Exception:
return "None"
|
def from_pairs(pairs):
"""
Implementation of ramda from_pairs Converts a list of pairs or tuples of pairs to a dict
:param pairs:
:return:
"""
return {k: v for k, v in pairs}
|
def factory_test_account(is_model=False):
"""JSON data to create TestAccount."""
test_accounts = {
'testAccounts': 'SS4BPS201,98901,ONE,SS4BPS Felecia,F,4732 Easy Street,,V9B 3V9,1998-04-30\nSS4BPS999,' +
('98989' if is_model else '') +
',TWO,SS4BPS Benjamin,M,308-2464 Crimson Vale,Penticton BC V2A 5N1,V2A 5N1,2000-11-18'
}
return test_accounts
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.