content
stringlengths 42
6.51k
|
---|
def find_1d_data(hash_table, buf):
""" THIS HAS NOT BEEN TESTED DUE TO NO FILES WITH 1D DATA AVAILABLE."""
item = hash_table.pop("/MetaData/MeasurementSettings/SamplesToLog", None)
if item is None:
return None
else:
raise NotImplementedError
|
def point_in_rectangle(px, py, rectangle):
"""
Check if a point is included into a regular rectangle that aligned with the x-y axis
:param px: x value of the point
:param py: y value of the point
:param rectangle: the coordinates of the 4 corners of the rectangle
:return: True or False depending if the point is in poly or not
"""
max_x_val = float("-inf")
max_y_val = float("-inf")
min_x_val = float("inf")
min_y_val = float("inf")
assert (len(rectangle) == 4)
for p in rectangle:
if p[0] >= max_x_val:
max_x_val = p[0]
if p[1] >= max_y_val:
max_y_val = p[1]
if p[0] <= min_x_val:
min_x_val = p[0]
if p[1] <= min_y_val:
min_y_val = p[1]
return px >= min_x_val and px <= max_x_val and py >= min_y_val and py <= max_y_val
|
def is_error(value):
"""
Checks if `value` is an ``Exception``.
Args:
value (mixed): Value to check.
Returns:
bool: Whether `value` is an exception.
Example:
>>> is_error(Exception())
True
>>> is_error(Exception)
False
>>> is_error(None)
False
.. versionadded:: 1.1.0
"""
return isinstance(value, Exception)
|
def fib(x):
"""
assumes x an int >= 0
returns Fibonacci of x
"""
if x == 0 or x == 1:
return 1
else:
return fib(x-1) + fib(x-2)
|
def array_to_grader(array, epsilon=1e-4):
"""Utility function to help preparing Coursera grading conditions descriptions.
Args:
array: iterable of numbers, the correct answers
epslion: the generated expression will accept the answers with this absolute difference with
provided values
Returns:
String. A Coursera grader expression that checks whether the user submission is in
(array - epsilon, array + epsilon)"""
res = []
for element in array:
if isinstance(element, int):
res.append("[{0}, {0}]".format(element))
else:
res.append("({0}, {1})".format(element - epsilon, element + epsilon))
return " ".join(res)
|
def calcDivisor(factors):
"""
Return the number from its prime factors.
Args:
factors: Dictionary of prime factors (keys) and their exponents (values).
Returns:
An integer. The number made of the prime factors.
Usage:
>>> calcDivisor({5: 3, 7: 2})
6125
>>> calcDivisor({2: 3, 5: 2, 7: 1})
1400
"""
acc = 1
for f,e in factors.items():
acc *= f**e
return acc
|
def _is_dictionary_with_tuple_keys(candidate):
"""Infer whether the argument is a dictionary with tuple keys."""
return isinstance(candidate, dict) and all(
isinstance(key, tuple) for key in candidate
)
|
def getFileNameOnly(fileName):
"""
return the file name minus the trailing suffix
"""
return '.'.join(fileName.split('/')[-1].split('.')[:-1])
|
def strtolist(string):
"""
Converts a string to a list with more flexibility than ``string.split()``
by looking for both brackets of type ``(,),[,],{,}`` and commas.
**Parameters**
string: *string*
The string to be split.
**Returns**
split list: *list*
The split list
**Examples**
::
>>> strtolist('[(12.3,15,256.128)]')
[12.3, 15, 256.128]
"""
out = []
temp = ''
brackets = ['(', ')', '[', ']', '{', '}']
for char in list(string):
if char not in brackets and char != ',':
temp += char
if char == ',':
try:
out.append(int(temp))
except ValueError:
out.append(float(temp))
temp = ''
if len(temp) != 0: # if there is a weird ending character
try:
out.append(int(temp))
except ValueError:
out.append(float(temp))
return out
|
def _wlen(s):
"""Return number of leading whitespace characters."""
return len(s) - len(s.lstrip())
|
def sum_non_abundant(limit=28124):
"""
A perfect number is a number for which the sum of its proper divisors is exactly equal to the number.
For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28,
which means that 28 is a perfect number.
A number n is called deficient if the sum of its proper divisors is less than n and
it is called abundant if this sum exceeds n.
As 12 is the smallest abundant number,
1 + 2 + 3 + 4 + 6 = 16,
the smallest number that can be written as the sum of two abundant numbers is 24.
By mathematical analysis,
it can be shown that all integers greater than 28123 can be written as the sum of two abundant numbers.
However, this upper limit cannot be reduced any further by analysis
even though it is known that the greatest number
that cannot be expressed as the sum of two abundant numbers is less than this limit.
"""
divisor_sum = [0] * limit
for i in range(1, limit):
for j in range(i * 2, limit, i):
divisor_sum[j] += i
abundant_nums = [i for (i, x) in enumerate(divisor_sum) if x > i]
expressible = [False] * limit
for i in abundant_nums:
for j in abundant_nums:
if i + j < limit:
expressible[i + j] = True
else:
break
ans = sum(i for (i, x) in enumerate(expressible) if not x)
return ans
|
def _get_youtube_y(insta_y, fa_insta_height, font_size):
""" Get YouTube icon's y position given Instagram y
for centre-alignment.
"""
return insta_y + fa_insta_height + font_size//10
|
def diff1(array):
"""
:param array: input x
:return: processed data which is the 1-order difference
"""
return [j-i for i, j in zip(array[:-1], array[1:])]
|
def unformat_metric(metric):
"""Unformat a single metric.
Parameters:
metric (dict): The metric to unformat
Returns:
metric (dict): The new metric in
the format needed for the log_event
API.
"""
metric_keys = metric.keys()
metric["metric_name"] = metric.pop("MetricName")
if "Dimensions" in metric_keys:
for dimension in metric["Dimensions"]:
dimension["name"] = dimension.pop("Name")
dimension["value"] = dimension.pop("Value")
metric["dimensions"] = metric.pop("Dimensions")
if "Timestamp" in metric_keys:
metric["timestamp"] = metric.pop("Timestamp")
if "Value" in metric_keys:
metric["value"] = metric.pop("Value")
else:
metric["StatisticValues"]["sample_count"] =\
metric["StatisticValues"].pop("SampleCount")
metric["StatisticValues"]["sum"] =\
metric["StatisticValues"].pop("Sum")
metric["StatisticValues"]["minimum"] =\
metric["StatisticValues"].pop("Minimum")
metric["StatisticValues"]["maximum"] =\
metric["StatisticValues"].pop("Maximum")
metric["statistic_values"] = metric.pop("StatisticValues")
if "Unit" in metric_keys:
metric["unit"] = metric.pop("Unit")
if "StorageResolution" in metric_keys:
metric["storage_resolution"] = metric.pop("StorageResolution")
return metric
|
def _format_offset(off):
"""
Format a timedelta into "[+-]HH:MM" format or "" for None
"""
if off is None:
return ''
mins = off.days * 24 * 60 + off.seconds // 60
sign = '-' if mins < 0 else '+'
return sign + '%02d:%02d' % divmod(abs(mins), 60)
|
def make_identifier(classname):
"""
Given a class name (e.g. CountryData) return an identifer
for the data-table for that class.
"""
identifier = [ classname[0].lower() ]
for c in classname[1:]:
if c.isupper():
identifier.extend(["_", c.lower()])
else:
identifier.append(c)
return "".join(identifier)
|
def validate_color(color):
"""
Returns whether or not given color is valid in the hexadecimal format.
"""
return isinstance(color, str) and len(color) == 7 and color[0] == "#"
|
def xor(first: bool, second: bool) -> bool:
"""Return exclusive OR for boolean arguments"""
return (first and not second) or (not first and second)
|
def _camel2snake(s: str) -> str:
"""from [1]
[1]:https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
"""
return "".join(["_" + c.lower() if c.isupper() else c for c in s]).lstrip("_")
|
def insert_fixed_parameters(parameters_dict, fixed_parameters):
"""Insert the fixed parameters into the parameters_dict."""
if fixed_parameters is not None:
for key, value in fixed_parameters.items():
parameters_dict[key] = value
return parameters_dict
|
def find_mismatches(listings, assessments, accuracy=.01):
"""Finds any listings that are listed as having different assessments.
:param listings: article listings as generated by parse_article
:param assessments: dict of assessments of all listed articles
:param accuracy: minimum ratio of assessments to qualify as a mismatch
:return: array of mismatches
"""
mismatches = []
for l in listings:
if l["title"] in assessments:
article_assessments = assessments[l["title"]]
processed_assessments = list(map(str.lower, filter(lambda c: c != '', article_assessments)))
# We don't have a good solution if there are no project ratings
if len(processed_assessments) != 0:
assessment_ratio = processed_assessments.count(l["assessment"].lower()) / len(processed_assessments)
if assessment_ratio < accuracy:
mismatches.append({
"title": l["title"],
"listed_as": l["assessment"],
"current": article_assessments
})
else:
mismatches.append({
"title": l["title"],
"listed_as": l["assessment"],
"current": None
})
return mismatches
|
def convert_to_string(value):
"""Convert File,Directory or [File,Directory] into string or a list of string"""
if type(value) is dict and value.get("class") in ["File", "Directory"]:
return value["path"]
elif type(value) is list or type(value) is tuple:
converted_list = []
for item in value:
if type(item) is dict and item.get("class") in ["File", "Directory"]:
converted_list.append(item["path"])
else:
converted_list.append(item)
return converted_list
else:
return value
|
def spl1n(string, sep):
"""Splits string once on the first occurance of sep
returns [head, tail] if succesful, and
returns (string, None) if not.
Intended for scenarios when using unpacking with an unknown string.
"""
r = string.split(sep, 1)
if len(r) > 1:
return r
else:
return string, None
|
def recursion_limit(cnt=14, msgobj=None):
"""
node01 $ robustness recursion_limit 15
exec_lm_core LM_robustness->recursion_limit: maximum recursion depth exceeded
:param cnt: 14
"""
if cnt > 0:
remain = recursion_limit(cnt-1)
if msgobj is not None:
msgobj("recalled {}".format(cnt))
else:
remain = 0
return remain
|
def compute_log_zT(log_rho, log_seebeck, log_kappa, log_temperature):
"""Returns the log of the thermoelectric figure of merit."""
return 2 * log_seebeck + log_temperature - log_rho - log_kappa
|
def range_geometric_row(number, d, r=1.1):
"""Returns a list of numbers with a certain relation to each other.
The function divides one number into a list of d numbers [n0, n1, ...], such
that their sum is number and the relation between the numbers is defined
with n1 = n0 / r, n2 = n1 / r, n3 = n2 / r, ...
"""
if r <= 0:
raise ValueError("r must be > 0")
n0 = number / ((1 - (1 / r)**d) / (1 - 1 / r))
numbers = [n0]
for i in range(d - 1):
numbers.append(numbers[-1] / r)
return numbers
|
def operation_dict(ts_epoch, request_dict):
"""An operation as a dictionary."""
return {
"model": request_dict,
"model_type": "Request",
"args": [request_dict["id"]],
"kwargs": {"extra": "kwargs"},
"target_garden_name": "child",
"source_garden_name": "parent",
"operation_type": "REQUEST_CREATE",
}
|
def sq_km(value):
"""value is a float representing a surface using square kilometers as unit
this tag simply format mille with space and leave only two digits in decimals"""
if not isinstance(value, str):
value = str(value)
try:
integer, decimal = value.split(".")
except ValueError:
integer = value
decimal = "00"
if not integer.isdigit() or not decimal.isdigit():
return value
chunks = []
while integer:
if len(integer) >= 3:
chunks.insert(0, integer[-3:])
integer = integer[:-3]
else:
chunks.insert(0, integer)
integer = None
integer = " ".join(chunks)
return f"{integer},{decimal[:2]}"
|
def _fill_common_details(host, port, message):
"""
fill ip, port and message for the connection.
"""
cert_details = {}
cert_details['ssl_src_port'] = str(port)
cert_details['error'] = message
cert_details['ssl_src_host'] = str(host)
return cert_details
|
def calculate_damage(program):
"""Calculate the damage done by program."""
strength, damage = 1, 0
for instruction in program:
if instruction == 'C':
strength *= 2
else:
damage += strength
return damage
|
def midpoint(a, b):
"""Calculate midpoint between two points."""
middle = []
for i in range(len(a)):
middle.append(round((a[i]+b[i])/2))
return tuple(middle)
|
def plist_data_to_rbx_dict(plist_data: dict) -> dict:
"""
Converts data from a Roblox macOS .plist file into a dictionary.
"""
rbx_dict: dict = {}
for raw_key, raw_value in plist_data.items():
split_key = raw_key.split(".")
key_piece = rbx_dict
for i, key_bit in enumerate(split_key):
key_bit = key_bit.replace("\u00b7", ".")
if i == len(split_key) - 1:
if isinstance(raw_value, bytes):
raw_value = str(raw_value)
key_piece[key_bit] = raw_value
continue
if key_piece.get(key_bit):
key_piece = key_piece[key_bit]
else:
new_piece = {}
key_piece[key_bit] = new_piece
key_piece = new_piece
return rbx_dict
|
def min_edit(s1, s2):
"""
Return the Levenshtein distance between two strings.
"""
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for index2, char2 in enumerate(s2):
newDistances = [index2 + 1]
for index1, char1 in enumerate(s1):
if char1 == char2:
newDistances.append(distances[index1])
else:
newDistances.append(
1
+ min((distances[index1], distances[index1 + 1], newDistances[-1]))
)
distances = newDistances
return distances[-1]
|
def _changes(path):
"""
>>> _changes([2, 3, 0, 5])
[2, 1, -3, 5]
>>> _changes([2])
[2]
"""
res = [path[0]]
res += [p - p_prev for p, p_prev in zip(path[1:], path)]
return res
|
def find_outer_most_last_parenthesis(line):
"""
Find outer-most last parenthesis of a line statement
Used to identify the argument list in a function call
:param line: a string representing a statement
:return: a string representing this parenthesis with its content
"""
# Find last closing parenthesis
# It might not be last position as in the following statement:
# invoke void @_Z12(%"class.std::basic_string"* nonnull sret %source_str, %"class.std::basic_string"* nonnull %agg)
# to label %invoke.cont.276 unwind label %lpad.275
end = 0
if line[-1] == ')':
# this corresponds to most cases
start = len(line) - 1
else:
# look for the last closing parenthesis
start = line.rfind(')')
end = start
if start == '-1':
assert True, "Could not find right-most closing parenthesis in\n" + line
# Stack opening and closing parenthesis to find the correct one
close_bracket_count = -1
for pos in range(start - 1, 0, -1):
char = line[pos]
if char == ')':
close_bracket_count -= 1
elif char == '(':
close_bracket_count += 1
else:
pass
if close_bracket_count == 0:
start = pos
break
if end == 0:
return line[start:]
else:
return line[start:end+1]
|
def filter_future_act(acts, future_frame):
"""Get future activity ids.
future activity from the data is all the future activity,
here we filter only the activity in pred_len,
also add the current activity that is still not finished
Args:
acts: a tuple of (current_actid_list, current_dist_list,
future_actid_list, future_dist_list)
future_frame: how many frame until the future
Returns:
future activity ids
"""
current_actid_list, current_dist_list, \
future_actid_list, future_dist_list = acts
# leave the actid happens at future_frame
actids = []
for act_id, dist_to_finish in zip(current_actid_list, current_dist_list):
if act_id == 0:
continue
if future_frame <= dist_to_finish:
actids.append(act_id)
for act_id, dist_to_start in zip(future_actid_list, future_dist_list):
if act_id == 0:
continue
if future_frame >= dist_to_start:
actids.append(act_id)
if not actids:
actids.append(0) # BG class
return actids
|
def adjusted_rand_index_pair_counts(a, b, c, d):
"""
Compute the adjusted Rand index from pair counts; helper function.
Arguments:
a: number of pairs of elements that are clustered in both partitions
b: number of pairs of elements that are clustered in first but not second partition
c: number of pairs of elements that are clustered in second but not first partition
d: number of pairs of elements that are clustered in neither partiition
Example usage:
In [1]: a = 1
In [2]: b = 2
In [3]: c = 2
In [4]: d = 10
In [5]: adjusted rand_index_pair_counts(a, b, c, d)
Out[5]: 0.16666666666666666
"""
if a*(b+c+2*d)+b*(b+d)+c*(c+d)!=0:
return float(2*(a*d-b*c))/float(a*(b+c+2*d)+b*(b+d)+c*(c+d))
else:
return 1.0
|
def fdr(pvals, a=0.05, n=None):
"""
Implementation of the Benjamini-Hochberg procedure.
Takes a list of p-values and returns a list of the indices of those p-values that pass.
Does not adjust p-values.
See http://sas-and-r.blogspot.sg/2012/05/example-931-exploring-multiple-testing.html
for pseudocode.
Test data from : http://udel.edu/~mcdonald/statmultcomp.html
>>> import random
>>> pvals = [0.6, 0.07, 0.49, 0.2, 0.48, 0.74, 0.68, 0.01, 0.97, 0.38, 0.032, 0.07]
>>> random.shuffle(pvals)
>>> sorted([pvals[i] for i in fdr(pvals, a=0.20)])
[0.01, 0.032]
>>> fdr([])
[]
>>> fdr([1])
[]
"""
if n != None:
assert n>=len(pvals)
else:
n=len(pvals)
sorted_pvals_indices = sorted(range(len(pvals)), key=lambda k:pvals[k])
t = next((rank for rank, spi in zip(range(len(pvals), 0, -1),
reversed(sorted_pvals_indices))
if pvals[spi] < rank*a/n), None)
if t:
return sorted_pvals_indices[:t]
return []
|
def remove_all_linebreaks(comment):
"""Remove trailing linebreak."""
return comment.replace("\n", "")
|
def add_last_timestamp(posts):
"""
add last updated liveblog timestamp
"""
# Currently we are leaning towards grabbing
# the last published post timestamp
timestamp = None
if posts:
timestamp = posts[0]['timestamp']
return timestamp
|
def __pyexpander_helper(*args, **kwargs):
"""a helper function needed at runtime.
This evaluates named arguments.
"""
if len(args)==1:
fn= args[0]
elif len(args)>1:
raise ValueError("only one unnamed argument is allowed")
else:
fn= None
return(fn, kwargs)
|
def is_palindrome(n):
"""function helper for fourth Euler problem."""
rn = 0
on = n
while n != 0:
remainder = int(n % 10)
rn = (rn * 10) + remainder
n = int(n / 10)
return on == rn
|
def get_parameter_label(parameter):
"""
Get a number in a unified format as label for the hdf5 file.
"""
return "{:.5f}".format(parameter)
|
def integer_to_octet_string_primitive(length: int, integer: int) -> bytes:
"""Convert a nonnegative integer to an octet string."""
# https://tools.ietf.org/html/rfc8017#section-4.1
if integer >= 256 ** length:
raise ValueError(f"{integer} >= 256 ** {length}")
index = 0
digits = [0] * length
while integer:
digits[index] = integer % 256
integer //= 256
index += 1
return bytes(reversed(digits))
|
def clean_item(item, artist, languages):
"""
Leave only needed fields from the respsonse
:param item: raw json item
:param artist: dict artist info
:param languages: str language
:return: dict
"""
return {
"id": item["id"],
"danceStyle": item["fields"]["Name"],
"dateTime": item["fields"]["DateTime(GMT)"],
"artists": artist,
"language": languages
}
|
def p2f(p):
"""Convert a path (including volume name) into a filename)
"""
i = p.find(":")
assert i!=(-1), \
"Unexpected path format: %s, expecting volume name" % p
return p[i+1:]
|
def canonical_base_url(base_path):
"""
Make given "basePath" a canonical base URL which can be prepended to paths starting with "/".
"""
return base_path.rstrip('/')
|
def parse_local_mounts(xs):
"""process block device info returned by device-scanner to produce
a legacy version of local mounts
"""
return [(d["source"], d["target"], d["fs_type"]) for d in xs]
|
def h(level: int, text: str) -> str:
"""Wrap text into an HTML `h` tag."""
return f"<h{str(level)}>{text}</h{level}>"
|
def clean_float(v):
"""Remove commas from a float"""
if v is None or not str(v).strip():
return None
return float(str(v).replace(',', '').replace(' ',''))
|
def replace_simple_tags(string, from_tag="italic", to_tag="i", to_open_tag=None):
"""
Replace tags such as <italic> to <i>
This does not validate markup
"""
if to_open_tag:
string = string.replace("<" + from_tag + ">", to_open_tag)
elif to_tag:
string = string.replace("<" + from_tag + ">", "<" + to_tag + ">")
string = string.replace("<" + from_tag + "/>", "<" + to_tag + "/>")
else:
string = string.replace("<" + from_tag + ">", "")
string = string.replace("<" + from_tag + "/>", "")
if to_tag:
string = string.replace("</" + from_tag + ">", "</" + to_tag + ">")
else:
string = string.replace("</" + from_tag + ">", "")
return string
|
def waveindex(value):
"""converts a wavenumber to index. Assuming there are 6000 cm^-1 and
8 points/cm^-1.
"""
index = int((6000-value)*8)
return index
|
def rotate_list(in_list, n):
"""
Rotates the list. Positive numbers rotate left. Negative numbers rotate right.
"""
return in_list[n:] + in_list[:n]
|
def impedance(vp,rho):
"""
This function calculates an impedance value
"""
ai = vp*rho
return ai
|
def is_list_of_dict(l):
"""
Checks if a list is entirely composed of dictionaries
l ([]): List of any type
returns (bool): True if l is entirely composed of dicts
"""
return all(type(x) == dict for x in l)
|
def sort_and_count_inversions(aList):
"""Return an inversion count and sorted list"""
inversionCount = 0
sortedList = []
n = len(aList)
# Check base case
if n <= 1:
# If the list has 1 or 0 elements, there are no inversions
# and nothing to sort
return 0, aList
# Recursively call for first half of list
firstCount, firstList = sort_and_count_inversions(aList[0:int(n/2)])
# Recursively call for second half of list
secondCount, secondList = sort_and_count_inversions(aList[int(n/2):])
# Merge the two lists together while looking for split inversions
firstLength = len(firstList)
secondLength = len(secondList)
i = 0
j = 0
for z in range(n):
# Make sure we won't try to access past the end of the array
# If we've reachd the end of the first array, then
# add the element from the second array.
if i == firstLength:
sortedList.append(secondList[j])
j += 1
# If we've reached the end of the second array, then add
# the element from the first array
elif j == secondLength:
sortedList.append(firstList[i])
i += 1
# The normal case (before we've reached the end of the arrays)
elif firstList[i] < secondList[j]:
sortedList.append(firstList[i])
i += 1
else:
sortedList.append(secondList[j])
j += 1
# Here are some split inversions!
# ...which is equal to the number of items remaining
# in the first list.
inversionCount += firstLength - i
# Add the non-split inversions for the final total of inversions
inversionCount += firstCount + secondCount
return inversionCount, sortedList
|
def group_normalized_count(arr):
"""
aggregation: inverse of array length
"""
return 1.0/float(len(arr))
|
def city_ids(request):
"""Return a list of OpenWeatherMap API city ID."""
return (6434841, 2992790,)
|
def gravity_to_deg_plato(sg):
"""Convert gravity to degrees Plato.
Parameters
----------
sg : float
Original gravity, like 1.053
Returns
-------
deg_plato : float
Degrees Plato, like 13.5
"""
return 250. * (sg - 1.)
|
def lookup_clutter_geotype(clutter_lookup, population_density):
"""
Return geotype based on population density
Parameters
----------
clutter_lookup : list
A list of tuples sorted by population_density_upper_bound ascending
(population_density_upper_bound, geotype).
population_density : float
The current population density requiring the lookup.
"""
highest_popd, highest_geotype = clutter_lookup[2]
middle_popd, middle_geotype = clutter_lookup[1]
lowest_popd, lowest_geotype = clutter_lookup[0]
if population_density < middle_popd:
return lowest_geotype
elif population_density > highest_popd:
return highest_geotype
else:
return middle_geotype
|
def add_description(citation, location):
"""Create metadata for additional descriptions,
currently one for local location and one for preferred citation
Parameters
----------
citation : str
The preferred citation
location : str
Information on hpe to access data locally
Returns
-------
descriptions : list(dict)
List with the two addtional description dictionaries
"""
descriptions = [
{ "description": citation,
"lang": { "id": "eng", "title": {"en": "English"} },
"type": { "id": "citation-access",
"title": {"en": "Citation and access information"} }
},
{ "description": location,
"lang": { "id": "eng", "title": {"en": "English"} },
"type": { "id": "location", "title": {"en": "Local host"} }
}]
return descriptions
|
def build_id(*elements, delimiter='.'):
"""
:param elements:
:param str delimiter:
:return: Strings joined by delimiter "."
:rtype: str
"""
return delimiter.join(map(str, elements))
|
def get_registration_response_key(question):
"""
For mapping schemas to schema blocks:
Answer ids will map to the user's response
"""
return question.get('qid', '') or question.get('id', '')
|
def combine(cr1, cr2):
""" Combine a Curation metadata and a Canvas metadata based Curation search
result.
"""
if cr1['curationHit']:
has_cur = cr1
has_can = cr2
else:
has_cur = cr2
has_can = cr1
has_cur['canvasHit'] = has_can['canvasHit']
return has_cur
|
def _format_items_to_db(item_list: list) -> str:
"""
Returns a database-ready string containing all the items
"""
db_items = ""
for item in item_list:
db_items += item.name + ";"
# remove the last ;
return db_items[: len(db_items) - 1]
|
def _handle_kind(raw):
"""Get kind."""
if 'eeg' in raw and ('ecog' in raw or 'seeg' in raw):
raise ValueError('Both EEG and iEEG channels found in data.'
'There is currently no specification on how'
'to handle this data. Please proceed manually.')
elif 'meg' in raw:
kind = 'meg'
elif 'ecog' in raw or 'seeg' in raw:
kind = 'ieeg'
elif 'eeg' in raw:
kind = 'eeg'
else:
raise ValueError('Neither MEG/EEG/iEEG channels found in data.'
'Please use raw.set_channel_types to set the '
'channel types in the data.')
return kind
|
def _GetPackageUri(package_name):
"""Returns the URI for the specified package name."""
return 'fuchsia-pkg://fuchsia.com/%s' % (package_name)
|
def title(snake_case: str):
"""Format snake case string as title."""
return snake_case.replace("_", " ").strip().title()
|
def create_envvar_file(env_file_path, envvars):
"""
Writes envvar file using env var dict
:param env_file_path: str, path to file to write to
:param envvars: dict, env vars
:return:
"""
with open(env_file_path, "w+") as f:
for key, value in envvars.items():
f.write("{}={}\n".format(key, value))
return True
|
def weighting_syntactic_role(entity_role):
"""
Return weight given an entity grammatical role.
+-----------+--------+
| EGrid Tag | Weight |
+===========+========+
| S | 3 |
+-----------+--------+
| O | 2 |
+-----------+--------+
| X | 1 |
+-----------+--------+
| dash | 0 |
+-----------+--------+
"""
if entity_role == u"S":
return 3
elif entity_role == u"O":
return 2
elif entity_role == u"X":
return 1
return 0
|
def format_process_state(process_state):
"""
Return a string formatted representation of the given process state
:param process_state: the process state
:return: string representation of process state
"""
return '{}'.format(process_state.capitalize() if process_state else None)
|
def absolute_path(dependency, current_path):
"""
Refactors `self::` and `super::` uses to absolute paths.
:param dependency:
:param current_path:
:return:
"""
path_elements = dependency.split('::')
if path_elements[0] == 'self':
path_elements[0] = current_path
elif path_elements[0] == 'super':
# count number of supers
count = 0
while path_elements[0] == 'super':
path_elements.pop(0)
count += 1
path_elements.insert(0, '::'.join(current_path.split('::')[:-count]))
else:
return dependency
return '::'.join(path_elements)
|
def strescape(txt):
"""
Convert bytes or text to a c-style escaped string.
"""
if type(txt) == bytes:
txt = txt.decode("cp1251")
txt = txt.replace("\\", "\\\\")
txt = txt.replace("\n", "\\n")
txt = txt.replace("\r", "\\r")
txt = txt.replace("\t", "\\t")
txt = txt.replace('"', '\\"')
return txt
|
def _fix_basebox_url(url):
"""
Kinda fix a basebox URL
"""
if not url.startswith('http'):
url = 'http://%s' % url
if not url.endswith('/meta'):
url += '/meta'
return url
|
def build_insert(table, to_insert):
"""
Build an insert request.
Parameters
----------
table : str
Table where query will be directed.
to_insert: iterable
The list of columns where the values will be inserted.
Returns
-------
str
Built query string.
"""
return (
f"INSERT INTO \"{table}\" (" + ", ".join(f"{w}" for w in to_insert) +
") VALUES (" + ", ".join(f":{w}" for w in to_insert) + ")"
)
|
def check_mantarray_serial_number(serial_number: str) -> str:
"""Check that a Mantarray Serial Number is valid."""
if len(serial_number) > 9:
return "Serial Number exceeds max length"
if len(serial_number) < 9:
return "Serial Number does not reach min length"
if serial_number[:2] != "M0":
return f"Serial Number contains invalid header: '{serial_number[:2]}'"
for char in serial_number[2:]:
if not char.isnumeric():
return f"Serial Number contains invalid character: '{char}'"
if int(serial_number[2:4]) < 20:
return f"Serial Number contains invalid year: '{serial_number[2:4]}'"
if int(serial_number[4:7]) < 1 or int(serial_number[4:7]) > 366:
return f"Serial Number contains invalid Julian date: '{serial_number[4:7]}'"
return ""
|
def get_damptime(conf) :
"""Parse pressure damp from configuration file
Args:
conf: Configuration data.
Returns:
dampt1: Pressure damp in processing.
dampt2: Pressure damp in post-processing.
"""
try :
damptime=float(conf['Reactor']['damptime'])
except :
damptime=1.0
if damptime>1E-13 :
dampt1="*(1-exp(-"+"{:.6E}".format(damptime)+"*t))^2"
dampt2="*(1-exp(-"+"{:.6E}".format(damptime)+"*timei))^2"
else :
dampt1=""
dampt2=""
return dampt1, dampt2
|
def build_training_response(mongodb_result, hug_timer, remaining_count):
"""
For reducing the duplicate lines in the 'get_single_training_movie' function.
"""
return {'imdbID': mongodb_result['imdbID'],
'plot': mongodb_result['plot'],
'year': mongodb_result['year'],
'poster_url': mongodb_result['poster'],
'actors': mongodb_result['actors'],
'director': mongodb_result['director'],
'genres': mongodb_result['genre'],
'title': mongodb_result['title'],
'imdbRating': mongodb_result['imdbRating'],
'remaining': remaining_count,
'success': True,
'valid_key': True,
'took': float(hug_timer)}
|
def trapezoid_area(base_minor, base_major, height):
"""Returns the area of a trapezoid"""
return height*((base_minor + base_major)/2)
|
def reduceWith(reducer, seed, iterable):
"""
reduceWith takes reducer as first argument, computes a reduction over iterable.
Think foldl from Haskell.
reducer is (b -> a -> b)
Seed is b
iterable is [a]
reduceWith is (b -> a -> b) -> b -> [a] -> b
"""
accumulation = seed
for value in iterable:
accumulation = reducer(accumulation, value)
return accumulation
|
def calc_AIC_base10_checksum(code):
"""Check if a string checksum char in the base10 representation is correct.
Parameters
----------
AIC : str
The string containing the code used to generate the checksum
Returns
-------
str
The checksum value.
"""
xn = [2*int(code[i]) for i in (1,3,5,7)]
p = 0
for x in xn:
p = p + (x // 10) + (x % 10)
d = 0
for i in (0,2,4,6):
d = d + int(code[i])
return str((p + d)%10)
|
def get_bad_query_summary(queries_above_threshold, total):
"""generates the bad query summary"""
total_suspect = len(queries_above_threshold)
percent = 0.0
if total_suspect > 0:
percent = (float(total_suspect) / float(total)) * float(100.0)
return "\nsuspect queries totals: %i/%i - %.2f%%\n" % (
total_suspect,
total,
percent,
)
|
def remove_key_from_numpy_docstring_section(numpy_docstring_section, key):
"""Function to remove a specific key from a section of a numpy docstring.
For example when combining docstrings with ``combine_split_mixin_docs``
both docstrings may have a particular attribute listed so it is
neccessary to remove one when merging the two.
"""
docstring_section_split = numpy_docstring_section.split("\n")
key_location = -1
# first find the location of the list element containing "key :"
for docstring_section_single_no, docstring_section_single in enumerate(
docstring_section_split
):
key_location_section_single = docstring_section_single.find(f"{key} :")
if key_location_section_single >= 0:
if key_location >= 0:
raise ValueError(
f"key (specifically '{key } :') found twice in numpy_docstring_section"
)
else:
key_location = docstring_section_single_no
if key_location < 0:
raise ValueError(
f"key (specifically '{key } :') is not present in numpy_docstring_section"
)
delete_keys = []
# next find the elements after the "key :" element which are the description
# for the key
# note, this can be multiple elements as the description can be split over
# multiple lines
# search from key_location until the next "" value or end of the list
for docstring_section_single_no in range(
key_location, len(docstring_section_split) - 1
):
delete_keys.append(docstring_section_single_no)
if docstring_section_split[docstring_section_single_no] == "":
break
# delete the key name and the key description lines
for delete_key in reversed(delete_keys):
del docstring_section_split[delete_key]
modified_docstring_section = "\n".join(docstring_section_split)
return modified_docstring_section
|
def persistence(n):
"""
Write a function, persistence, that takes in a positive parameter num and returns its multiplicative persistence,
which is the number of times you must multiply the digits in num until you reach a single digit.
:param n: an integer value.
:return: the numbers of times the digits in 'n' need to be multiplied to get to a single digit.
"""
if n < 10:
return 0
total = 1
for i in str(n):
total = total * int(i)
return 1 + persistence(total)
|
def find_all_available_seats(seats):
"""Find and return seat numbers that are unassigned.
:param seats: dict - seating chart.
:return: list - list of seat numbers available for reserving..
"""
available = []
for seat_num, value in seats.items():
if value is None:
available.append(seat_num)
return available
|
def _SortSimilarPatterns(similar_patterns):
"""Sorts a list of SimilarPatterns, leaving the highest score first."""
return sorted(similar_patterns, key=lambda x: x.score, reverse=True)
|
def encode_shift(s: str):
"""
returns encoded string by shifting every character by 5 in the alphabet.
"""
return "".join([chr(((ord(ch) + 5 - ord("a")) % 26) + ord("a")) for ch in s])
|
def averageScore(userScoreD):
"""
userScoreD is the dictionary that contains user scores.
Returns the average user's score on a single item
"""
sum = 0
len = 0
# go over scores in {userID:score,...} dictionary
for score in userScoreD.values():
sum += score
len += 1
# return average users score for an item
return sum/len
|
def align_down(alignment, x):
"""Rounds x down to nearest multiple of the alignment."""
a = alignment
return (x // a) * a
|
def despace(line):
"""
Removes single spaces from column names in string.
e.g., "CONTAINER ID NAME" -> "CONTAINERID NAME"
This is done so that pandas can read space-delimited files without
separating headers with names containing spaces.
"""
parts = line.split(' ')
parts = [p if p else ' ' for p in parts]
return ''.join(parts)
|
def is_first_line(line):
"""
Returns true if line is the first line of a new file
"""
return line.split("|")[0].strip() == "1"
|
def card_matches(card, hand):
"""Checks if the colour/value matches the card in the players hand"""
wilds = ["wild", "wild+4"]
matches = False
for player_card in hand:
if matches:
break
elif player_card[0] in wilds:
matches = True
elif card[0] in wilds and player_card[0] == card[1]:
matches = True
elif player_card[0] == card[0] or player_card[1] == card[1]:
matches = True
return matches
|
def get_fem_word(masc_word, words):
"""
:param masc_word: masculine word to convert
:param words: list of English-Femining-Masculine word triples
:return: feminine inflection of masculine word
"""
for _, fem_word, word in words:
if masc_word == word:
return fem_word
raise ValueError(masc_word)
|
def split_at(string, sep, pos):
"""
Return string splitted at the desired separator.
Args:
string (str): The supplied string that will be splitted.
sep (str): The desired separator to use for the split.
pos (int): The desired occurence of the defined separator
within the supplied string and hence the point of the split
operation.
Returns:
list: A list of the splitted string
Examples:
>>> isc_string = 'option domain-name "example.org";'
>>> shared.utils.split_at(isc_string, ' ', 2)
['option domain-name', '"example.org";']
"""
string = string.split(sep)
return [sep.join(string[:pos]), sep.join(string[pos:])]
|
def vue(value: str) -> str:
"""Encierra el valor en doble llaves 'value' -> '{{ value }}'."""
return "{{ %s }}" % value
|
def facilityidstringtolist(facilityidstring):
"""
This method receives a string with the facility id's and converts it to a list with 1 facility id at each index.
:param facilityidstring: string with all the facility id's
:return: a list containing the facility id's as strings.
"""
facilityidlist = facilityidstring.split(",")
return facilityidlist
|
def correct_other_vs_misc(rna_type, _):
"""
Given 'misc_RNA' and 'other' we prefer 'other' as it is more specific. This
will only select 'other' if 'misc_RNA' and other are the only two current
rna_types.
"""
if rna_type == set(['other', 'misc_RNA']):
return set(['other'])
return rna_type
|
def num_splits(s, d):
"""Return the number of ways in which s can be partitioned into two
sublists that have sums within d of each other.
>>> num_splits([1, 5, 4], 0) # splits to [1, 4] and [5]
1
>>> num_splits([6, 1, 3], 1) # no split possible
0
>>> num_splits([-2, 1, 3], 2) # [-2, 3], [1] and [-2, 1, 3], []
2
>>> num_splits([1, 4, 6, 8, 2, 9, 5], 3)
12
"""
"*** YOUR CODE HERE ***"
def helper(s, diff):
if not s:
return 1 if abs(diff) <= d else 0
first_el = s[0]
s = s[1:]
return helper(s, diff + first_el) + helper(s, diff - first_el)
return helper(s, 0) // 2
|
def cond(predicate, consequence, alternative=None):
"""
Function replacement for if-else to use in expressions.
>>> x = 2
>>> cond(x % 2 == 0, "even", "odd")
'even'
>>> cond(x % 2 == 0, "even", "odd") + '_row'
'even_row'
"""
if predicate:
return consequence
else:
return alternative
|
def SelectColumn(lig_dict, colname):
"""
Prune the dictionary, only attribute in colname will be left.
:param lig_dict: a tree like dictionary
:param colname: what attribute you want to keep.
:return: a new dictionary
"""
lig_new = dict()
for k in lig_dict:
lig_new[k] = {sk:v for sk, v in lig_dict[k].items() if sk in colname}
return lig_new
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.