content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def round_digits(
v: float, num_digits: int = 2, use_thousands_separator: bool = False
) -> str:
"""
Round digit returning a string representing the formatted number.
:param v: value to convert
:param num_digits: number of digits to represent v on
None is (Default value = 2)
:param use_thousands_separator: use "," to separate thousands (Default value = False)
:returns: str with formatted value
"""
if (num_digits is not None) and isinstance(v, float):
fmt = "%0." + str(num_digits) + "f"
res = float(fmt % v)
else:
res = v
if use_thousands_separator:
res = "{0:,}".format(res) # type: ignore
res_as_str = str(res)
return res_as_str
|
38873d5dd6d1cee6cf48b9ff2f046e59007822fa
| 34,206 |
import click
def format_files(filenames):
"""Formats a list of file names as terminal output."""
return "\n".join([click.format_filename(fn) for fn in filenames])
|
3957fc5266ab9d355475cb3dd5c3171f928514e3
| 507,356 |
from typing import Tuple
from typing import List
def _parse_prefix(prefix: str) -> Tuple[str, List[int]]:
"""Parse a --muscle-prefix argument.
This is like a Reference, but not quite, because the
initial identifier may be omitted. That is, [1][2] is
also a valid prefix.
This parses an initial identifier, subsequent identifiers
separated by periods, then a list of square-bracketed integers.
Args:
prefix: The prefix to parse.
Returns:
The identifier sequence and the list of ints.
"""
def parse_identifier(prefix: str, i: int) -> Tuple[str, int]:
name = str()
while i < len(prefix) and prefix[i] not in '[.':
name += prefix[i]
i += 1
return name, i
def parse_number(prefix: str, i: int) -> Tuple[int, int]:
number = str()
while i < len(prefix) and prefix[i] in '0123456789':
number += prefix[i]
i += 1
return int(number), i
name = str()
index = list() # type: List[int]
i = 0
if i == len(prefix):
return name, index
idt, i = parse_identifier(prefix, i)
name += idt
while i < len(prefix) and prefix[i] == '.':
name += '.'
part, i = parse_identifier(prefix, i + 1)
name += part
while i < len(prefix) and prefix[i] == '[':
nmb, i = parse_number(prefix, i + 1)
index.append(nmb)
if prefix[i] != ']':
raise ValueError('Missing closing bracket in'
' --muscle-prefix.')
i += 1
if i < len(prefix):
raise ValueError(('Found invalid extra character {} in'
' --muscle-prefix.').format(prefix[i]))
return name, index
|
7647aa55244d7191edd189743dec15e4a61526db
| 380,933 |
def has_finished(directory):
"""Returns True if the experiments in this directory appear to have
finished.
"""
if (directory / "result.txt").exists():
return True
if (directory / "evaluation.json").exists():
return True
|
771078580dfb054d28b44d307d0c57fbfbc0d40e
| 145,839 |
def moyenne_trois_nb(a : float, b : float, c : float) -> float:
"""Retourne la moyenne arithmétique des trois nombres a, b et c.
"""
return (a + b + c) / 3.0
|
e86f22f5ef9fbee2420cd11341272f6e8c54d895
| 657,311 |
def __pydonicli_declare_args__(var_dict):
"""
Limit a dictionary, usually `locals()` to exclude modules and functions and thus contain
only key:value pairs of variables.
"""
vars_only = {}
for k, v in var_dict.items():
dtype = v.__class__.__name__
if dtype not in ['module', 'function']:
vars_only[k] = v
return vars_only
|
0b38f0df8eb72993ee895a9a85c8abdc92e426cb
| 665,982 |
def ceildiv(a, b):
"""Returns the ceiling of a / b, equivalent to math.ceil(a / b)"""
return (a + b - 1) // b
|
dcd080ba8f3c1d813050672264b3da007f2d7189
| 356,303 |
import math
import random
def optimizar(dominio, temperatura = 10e32, tasa_enfriamiento = 0.95):
"""Algoritmo de optimización estocástica simulated annealing.
Entradas:
dominio (Dominio)
Un objeto que modela el dominio del problema que se quiere aproximar.
temperatura (float/int)
Temperatura inicial del algoritmo, se recomienda un número alto
tasa_enfriamiento (float)
Porcentaje de enfriamiento de la temperatura durante cada iteración, el valor
por defecto es 0.95, lo que indica una tasa de enfriamiento del 5%.
Salidas:
(estructura de datos) Estructura de datos según el dominio, que representa una
aproximación a la mejor solución al problema.
"""
sol = dominio.generar()
costo = dominio.fcosto(sol)
while temperatura > 0.01:
solTemp = dominio.vecino(sol)
costoTemp = dominio.fcosto(solTemp)
p = math.exp(-abs(costoTemp-costo)/temperatura)
pAzar = random.uniform(0, 1)
if (costoTemp < costo) or (pAzar <= p):
sol = solTemp
costo = costoTemp
temperatura *= tasa_enfriamiento
return sol
|
0ccb5bde943bc7c60127da0007e3adbd6af2c0a6
| 674,776 |
def _get_ent_parameters(ent):
"""Gathers float and boolean parameters for the given entity.
"""
param_ids = type(ent).float_ids + type(ent).bool_ids
return {param_id: getattr(ent, param_id) for param_id in param_ids}
|
c7fe76be022a4c523db528e319d4d1c740f36861
| 191,276 |
def clean_invalid(df, flag='flag'):
""" Remove rows where flag is True. Is `flag` is not in the columns, return
the same dataframe.
Parameters
----------
flag : str
Column used as flag. Default if `flag`.
"""
df_new = df.copy()
_meta = df._metadata
if flag in df.columns:
filtr = (df.flag == False)
df_new = df_new[filtr]
df_new._metadata = _meta
return df_new
|
17149d5f72cb8ca03bea52e429d0f15717a96b85
| 134,656 |
def run_result_key_to_result_summary_key(run_result_key):
"""Returns the TaskResultSummary ndb.Key for this TaskRunResult.key.
"""
assert run_result_key.kind() == 'TaskRunResult', run_result_key
return run_result_key.parent()
|
085acfb0b5530c7c5260e1905ef3171ccd1e7d53
| 274,332 |
import warnings
def compute_accuracy(true_positives, n_ref, n_est):
"""Compute accuracy metrics.
Parameters
----------
true_positives : np.ndarray
Array containing the number of true positives at each time point.
n_ref : np.ndarray
Array containing the number of reference frequencies at each time
point.
n_est : np.ndarray
Array containing the number of estimate frequencies at each time point.
Returns
-------
precision : float
``sum(true_positives)/sum(n_est)``
recall : float
``sum(true_positives)/sum(n_ref)``
acc : float
``sum(true_positives)/sum(n_est + n_ref - true_positives)``
"""
true_positive_sum = float(true_positives.sum())
n_est_sum = n_est.sum()
if n_est_sum > 0:
precision = true_positive_sum/n_est.sum()
else:
warnings.warn("Estimate frequencies are all empty.")
precision = 0.0
n_ref_sum = n_ref.sum()
if n_ref_sum > 0:
recall = true_positive_sum/n_ref.sum()
else:
warnings.warn("Reference frequencies are all empty.")
recall = 0.0
acc_denom = (n_est + n_ref - true_positives).sum()
if acc_denom > 0:
acc = true_positive_sum/acc_denom
else:
acc = 0.0
return precision, recall, acc
|
8dbd7cfc38af89d823b6e7051d94667523350d28
| 475,084 |
def _find_closing_brace(string, start_pos):
"""Finds the corresponding closing brace after start_pos."""
bracks_open = 1
escaped = False
for idx, char in enumerate(string[start_pos:]):
if char == '(':
if not escaped:
bracks_open += 1
elif char == ')':
if not escaped:
bracks_open -= 1
if not bracks_open:
return start_pos + idx + 1
if char == '\\':
escaped = not escaped
else:
escaped = False
|
857d2cd17cca001866e1b8a6689d788057ab0440
| 215,338 |
def _to_list(x, n):
"""Converts x into list by repeating it n times.
If x is already a list and it has length n, returns x.
Else, if x is a list and has different length, raises ValueError."""
if isinstance(x, list):
if len(x) != n:
raise ValueError(
'''If list is passed, it should have len(x) == n.
Found: n = %d, len(x) = %d''' % (n, len(x))
)
return x
return [x] * n
|
61b3720882e78347d6a20faa946274b02eab809d
| 518,615 |
def _get_lock_speed(postgame, de_data):
"""Get lock speed flag."""
if de_data is not None:
return de_data.lock_speed
if postgame is not None:
return postgame.lock_speed
return None
|
629cff33d180ef777d793bc2be2f9a3e7877481d
| 611,904 |
def build(name, builder):
"""Wrapper to turn (name, ctx) -> val method signatures into (ctx) -> val."""
return lambda ctx: builder(name, ctx)
|
2a09dc8685f8423b61c8ca70cde7746054af8384
| 111,264 |
def rectangle_intersects(recta, rectb):
"""
Return True if ``recta`` and ``rectb`` intersect.
>>> rectangle_intersects((5,5,20, 20), (10, 10, 1, 1))
True
>>> rectangle_intersects((40, 30, 10, 1), (1, 1, 1, 1))
False
"""
ax, ay, aw, ah = recta
bx, by, bw, bh = rectb
return ax <= bx + bw and ax + aw >= bx and ay <= by + bh and ay + ah >= by
|
e434f7e5fe88e3cd4192cf0a76fba2e54ca04481
| 489,726 |
def pop_target(df, target_col, to_numpy=False):
"""Extract target variable from dataframe and convert to nympy arrays if required
Parameters
----------
df : pd.DataFrame
Dataframe
target_col : str
Name of the target variable
to_numpy : bool
Flag stating to convert to numpy array or not
Returns
-------
pd.DataFrame/Numpy array
Subsetted Pandas dataframe containing all features
pd.DataFrame/Numpy array
Subsetted Pandas dataframe containing the target
"""
df_copy = df.copy()
target = df_copy.pop(target_col)
if to_numpy:
df_copy = df_copy.to_numpy()
target = target.to_numpy()
return df_copy, target
|
53607b39b338056a3265ff3dc1cfa1e4a2d9f397
| 553,159 |
import math
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
r = 6371000 # Radius of earth in kilometers. Use 3956 for miles
return c * r
|
04d79cc70d33f8a1e220334adcc73cc6e84dc8bd
| 195,052 |
import torch
def qzCx_sample(params):
"""
Sample from posterior q(z|x).
Assume normal with diagonal covariance.
Params: [B, LD, 2]
Expect mu = params[..., 0]
logvar = params[..., 1]
"""
mu = params.select(-1, 0)
logvar = params.select(-1, 1)
sigma_sqrt = torch.exp(0.5 * logvar)
epsilon = torch.randn_like(sigma_sqrt)
return mu + sigma_sqrt * epsilon
|
6f82b6737a491b1e995b9e430cfbf993a19b838c
| 163,657 |
def apply_tf(data, tf):
"""Apply function `tf` (transformation function) if specified
and return result. Return unmodified data in other case"""
if tf:
return tf(data)
else:
return data
|
86d381f5df2362cd614ec252fd2650f2e0086d0d
| 40,878 |
def lines_to_list(list, columns, delimiter):
"""
convert a list of lines read from a file to a list of list by specify the number of columns and the delimiter
parameters:
list: list, lines returned by open_text()
columns: integer, number of columns.
delimiter: the delimiter to split a line
"""
new_list = [list[i:i+columns] for i in range(0, len(list), columns)]
# new_list = zip(*[iter(list)]*columns)
header = []
for h in new_list[0]:
header.append(str(h.decode()).split(delimiter)[0])
content = [header]
for l in new_list:
line = []
for c in l:
line.append(''.join(str(c).split(delimiter)[1:]))
content.append(line)
return content
|
896351f2149f668259c807f1eec33cc0e1cd8ad7
| 402,844 |
def _generate_output_dataframe(data_subset, defaults):
"""
Generates an output dataframe from the given subset of user-provided
data, the given column names, and the given default values.
Parameters
----------
data_subset : DataFrame
A DataFrame, usually from an AssetData object,
that contains the user's input metadata for the asset type being
processed
defaults : dict
A dict where the keys are the names of the columns of the desired
output DataFrame and the values are a function from dataframe and
column name to the default values to insert in the DataFrame if no user
data is provided
Returns
-------
DataFrame
A DataFrame containing all user-provided metadata, and default values
wherever user-provided metadata was missing
"""
# The columns provided.
cols = set(data_subset.columns)
desired_cols = set(defaults)
# Drop columns with unrecognised headers.
data_subset.drop(cols - desired_cols,
axis=1,
inplace=True)
# Get those columns which we need but
# for which no data has been supplied.
for col in desired_cols - cols:
# write the default value for any missing columns
data_subset[col] = defaults[col](data_subset, col)
return data_subset
|
c5caa9ac6eb19eb6e6e9467ab59684aa8210fb60
| 585,075 |
def try_open_handle_file(filename: str) -> str:
"""Try to open & read a file. Panic with a helpful error message if the file cannot be found or opened.
:param filename: the filename of the file to open
:returns: the body of the file, if successful
"""
try:
with(open(filename)) as f:
return f.read()
except FileNotFoundError:
print(
f'Fatal: The file `{filename}` does not exist and cannot be opened. Please check the filename!')
exit(1)
except IOError as error:
print(
f'Fatal: The file `{filename} cannot be opened! An exception was raised:\n\n', str(error))
exit(1)
|
858a7bd62cc6cf8c4c054db0b003a77c6e282c1f
| 246,572 |
def _format_float(input_float):
"""
Takes a float and returns a formatted String
:param input_float: Floating point number
:return: String from a floating point number,
rounded to two decimals
"""
rounded = round(input_float, 2)
as_string = str(rounded)
return as_string
|
ad994c97ef2a13192453d35f8174aeeef31d3bfc
| 192,284 |
def sum_rec(nums):
"""
Returns the sum of a list of numbers using recursion
Examples:
>>> sum_rec([3,1,4,1,5,9,2,6,5])
36
>>> sum_rec(range(101))
5050
>>> sum_rec(range(901))
405450
>>> sum_rec([x**3 - 2*x**2 + x - 13 for x in range(901)])
163903285937
>>> sum_rec([-1 for x in range(900)])
-900
"""
if nums == []:
return 0
else:
return sum_rec(list(nums[1:])) + nums[0]
|
ab6ff3f0203b844f2afc53af4d00be72d004c753
| 329,402 |
def choose(n, r):
"""
Returns: How many ways there are to choose a subset of n things from a set of r things.
Computes n! / (r! (n-r)!) exactly. Returns a python long int.
From: http://stackoverflow.com/questions/3025162/statistics-combinations-in-python
"""
assert n >= 0
assert 0 <= r <= n
c = 1
for num, denom in zip(range(n, n-r, -1), range(1, r+1, 1)):
c = (c * num) // denom
return c
|
a902324905a8e97e51460ae67a6bee4f13f533e5
| 251,693 |
import time, psutil
import warnings
def usage_log(pid, interval=1):
"""Regularly write resource usage to stdout."""
# local imports make function self-sufficient
if psutil.MACOS:
warnings.warn('Disk I/O stats are not available on MacOS.')
p = psutil.Process(pid)
def get_io():
if psutil.MACOS:
# io_counters() not available on MacOS
return (0, 0, 0, 0)
else:
x = p.io_counters()
return (x.read_bytes, x.read_chars, x.write_bytes, x.write_chars)
print('time,cpu,memory,read_bytes,read_chars,write_bytes,write_chars')
p.cpu_percent()
io_before = get_io()
while True:
time.sleep(interval)
io_after = get_io()
io_rate = tuple((x1 - x0) / interval for x0, x1 in zip(io_before, io_after))
io_before = io_after
line = (time.time(), p.cpu_percent(), p.memory_info().rss) + io_rate
print(','.join(str(x) for x in line))
|
c9aa685bcfc9c9046b919309e12297c94d8e2a2d
| 405,214 |
def parse_csv_line(line):
"""
Splits a line of comma seperated values into a list of
values. Removes leading and trailing quotes if there are
any.
"""
parsed_line = []
for x in line.split(","):
if x[0] == "\"" and x[-1] == "\"":
x = x[1:-1]
parsed_line.append(x.strip())
return parsed_line
|
3120e3e04480bceedf03a92ac457cebe5cbd545e
| 118,257 |
import copy
def merge_id_dicts(dicttuple):
"""
Merges two id dictionary (efficiently).
Args:
dicttuple (tuple): Tuple of two id dictionaries.
Returns:
dict: New merged dictionary
"""
dict1, dict2 = dicttuple
new = copy.deepcopy(dict1)
for key, value in dict2.items():
new.setdefault(key, []).extend(value)
return new
|
39df02dcea2c6df35a7ef9fdd95b3076d753e657
| 579,705 |
def _dict_iteritems(dictionary):
"""Get an iterator or view on the items of the specified dictionary.
This method is Python 2 and Python 3 compatible.
"""
try:
return dictionary.iteritems()
except AttributeError:
return dictionary.items()
|
9bb95f263035bac6a2989eeb817c81ab15a4957f
| 109,767 |
def is_number(s):
"""
check if a string represents an integer or float number
parameter
--------
s: a string
return
------
return True iff S represents a number (int or float)
"""
try:
float(s)
return True
except ValueError:
return False
|
428f71bf35df4ea99a68959ba5a20d35948a94c3
| 323,504 |
from typing import Mapping
from typing import Optional
from typing import Dict
def offsets_to_slices(
offsets: Mapping[str, int],
sizes: Mapping[str, int],
base: Optional[Mapping[str, int]] = None,
) -> Dict[str, slice]:
"""Convert offsets into slices with an optional base offset.
Args:
offsets: integer offsets from the origin along each axis.
sizes: dimension sizes for the corresponding chunks.
base: optional base-offset to subract from this key. This allows for
relative indexing, e.g., into a chunk of a larger Dataset.
Returns:
Slices suitable for indexing with xarray.Dataset.isel().
Raises:
ValueError: if an offset is specified for a dimension where there is no
corresponding size specified.
Example usage::
>>> offsets_to_slices({'x': 100}, sizes={'x': 10})
{'x': slice(100, 110, 1)}
>>> offsets_to_slices({'x': 100}, sizes={'x': 10}, base={'x': 100})
{'x': slice(0, 10, 1)}
"""
if base is None:
base = {}
slices = {}
for k in offsets.keys():
if k not in sizes:
raise ValueError(f"An offset was specified for dimension {k}, but we don't "
"have a chunk size for this dimension.")
for k, size in sizes.items():
offset = offsets.get(k, 0) - base.get(k, 0)
slices[k] = slice(offset, offset + size, 1)
return slices
|
a2b91b5491892bea50b70a2408b69ceacbf86d1c
| 517,453 |
def generateD3Object(wordCounts, objectLabel, wordLabel, countLabel):
"""
Generates a properly formatted JSON object for d3 use.
Args:
objectLabel: The label to identify this object.
wordLabel: A label to identify all "words".
countLabel: A label to identify all counts.
Returns:
The formatted JSON object.
"""
JSONObject = {}
JSONObject['name'] = str(objectLabel.encode('utf-8'))
JSONObject['children'] = []
for word, count in wordCounts.items():
JSONObject['children'].append({wordLabel: word.encode('utf-8'), countLabel: count})
return JSONObject
|
46895d25b2e713023f2cbebbd08e8683a32e7f31
| 611,034 |
def parse_generic(data, key):
"""
Returns a list of (potentially disabled) choices from a dictionary.
"""
choices = []
for k, v in sorted(data[key].iteritems(), key=lambda item: item[1]):
choices.append([v, k])
return choices
|
90d2f2188d5cca7adb53eebca80a80f2c46b04a7
| 13,986 |
def ret2dict(estimationresult):
"""
Convert the estimationresult to a single dictionary of key/value pairs.
The estimationresult is a list of dicts, where each dict corresponds to a head and has a `task_name`
key. The return value is a merged dict which has all keys prefixed with the task name and the task name
keys removed, as well as the keys "report" and "confusion" and all other entries where the value is a list or dict.
"""
new = {}
for d in estimationresult:
name = d["task_name"]
for k, v in d.items():
if k != "task_name" and k != "report" and not k.startswith("confusion") and not isinstance(v, (list, dict)):
new[name+"_"+k] = v
return new
|
07ca212ce97f919145066036a418946df2e157a7
| 287,602 |
def make_response(data):
"""
Creates a 200-status JSON response with the provided data
and returns it to the caller
:data the data to include in the response
"""
response = { "meta": { 'status': 200 }, "data": data }
return response
|
5bfdbb137e2527ae046a48b47175147e80ad51fb
| 369,714 |
import requests
def get_status_code(host, path="/", auth=None):
""" This function retreives the status code of a website by requesting
HEAD data from the host. This means that it only requests the headers.
If the host cannot be reached or something else goes wrong, it returns
None instead.
"""
try:
if not auth:
r = requests.get(host + path)
else:
r = requests.get(host + path, auth=auth)
return r.status_code
except Exception:
return None
|
cdb0c2fd46c0f5cae6252cf13cf13670b62f0370
| 614,198 |
def fwd(coeffs, x, y):
""" Transforms projected coordinates from the Old Greek datum to GGRS87.
Params:
coeffs (float sequence): the 12 coefficients of the current Hatt map block [A0...B0...]
x, y (floats): Easting, Northing Hatt coordinates of a point
Returns:
(float tuple): Easting, Northing GGRS87 coordinates
"""
#
# | E | | A0 | | A1 + A3*x ' A2 + A4*y + A5*x | | x |
# | | = | | + |---------- ' ---------------- | | |
# | N | | B0 | | B1 + B3*x ' B2 + B4*y + B5*x | | y |
#
if len(coeffs) != 12:
raise ValueError("Wrong number of hatt coefficients.")
x2 = pow(x,2)
y2 = pow(y,2)
xy = x * y
e = coeffs[0] + coeffs[1]*x + coeffs[2]*y + coeffs[3]*x2 + coeffs[4]*y2 + coeffs[5]*xy
n = coeffs[6] + coeffs[7]*x + coeffs[8]*y + coeffs[9]*x2 + coeffs[10]*y2 + coeffs[11]*xy
return (e, n)
|
aa3df01059efffdc6833af43155abf228d016b8b
| 439,439 |
import re
def sanitize_id(x):
"""
Sanitize an ID similar to github_sanitize_id, but with the
following differences:
* no downcasing
* dots (.) are replaced with hyphens (which helps Python module
namespaces look better)
"""
return re.sub(r'[^-\w ]', '', x.replace('.', '-'), re.U).replace(' ', '-')
|
06248c0a4ae8924fe26299ecec9dceba75df26c6
| 488,334 |
def get_max_image_dimensions(img_list):
"""Find the maximum width and height of all images
Parameters
----------
img_list : list
List of images
Returns
-------
max_wh : tuple
Maximum width and height of all images
"""
shapes = [img.shape[0:2] for img in img_list]
all_w, all_h = list(zip(*shapes))
max_wh = (max(all_w), max(all_h))
return max_wh
|
f0810e3db6ce0823e560d3a0451a066e6869e0d2
| 339,843 |
import re
def _convert_number_string(number_string):
"""
Function to convert mixed number character strings to a number.
Mapping: ``{'K': 1000, 'M': 1000000}``
Parameters
----------
number_string : str
Number string to be mapped
Returns
-------
number : int or float
Converted number, tries to return a integer if possible.
Examples
--------
>>> _convert_number_string('64K')
64000 # 64 * 1000
>>> _convert_number_string('0.2M')
200000 # 0.2 * 1000000
"""
map_number = {'K': 1000, 'M': 1000000}
pure_number = re.sub('[A-Z]', '', number_string)
rv = int(pure_number) if pure_number.isdigit() else float(pure_number)
for i in number_string:
if i.isalpha():
rv *= map_number[i]
return rv
|
1c9afd75bbefd2877d641c3b3cf3397ae0d41c56
| 104,041 |
import torch
def local_to_global(R, t, p):
"""
Description:
q <- Rp + t
Args:
R: (N, L, 3, 3).
t: (N, L, 3).
p: Local coordinates, (N, L, ..., 3).
Returns:
q: Global coordinates, (N, L, ..., 3).
"""
assert p.size(-1) == 3
p_size = p.size()
N, L = p_size[0], p_size[1]
p = p.view(N, L, -1, 3).transpose(-1, -2) # (N, L, *, 3) -> (N, L, 3, *)
q = torch.matmul(R, p) + t.unsqueeze(-1) # (N, L, 3, *)
q = q.transpose(-1, -2).reshape(p_size) # (N, L, 3, *) -> (N, L, *, 3) -> (N, L, ..., 3)
return q
|
dc8ac861c265b186c52a93ed8c755754309fe59f
| 457,601 |
def tokenizer(sentence):
"""Split header text to tokens, add newline symbols"""
return " \n ".join(sentence.split("\n")).split(" ")
|
b64e29f0fcfa1c8cb21a101af57ac78337916d68
| 93,599 |
def aggregate_hourly(feature_df, aggs=["mean", "std"]):
"""Aggregates features to the floor of each hour using mean and standard deviation.
e.g. All values from "11:00:00" to "11:59:00" will be aggregated to "11:00:00".
"""
# group by the floor of each hour use timedelta index
agged = feature_df.groupby([feature_df.index.floor("H")]).agg(aggs)
# flatten hierachical column index
agged.columns = ["_".join(x) for x in agged.columns]
return agged
|
f6674d7d290c6002103dd9e3df0b26aab21a9f52
| 494,712 |
def fix_checkbox(params):
"""
Replace param_checkbox with param-indexed.
"""
pol_params = {}
# drop checkbox parameters.
for param, data in params.items():
if param.endswith("checkbox"):
base_param = param.split("_checkbox")[0]
pol_params[f"{base_param}-indexed"] = data
else:
pol_params[param] = data
return pol_params
|
92d7490d191a7708160802cf8ec565da8c6718f1
| 617,765 |
def r_size(x):
"""Returns the difference between the largest and smallest value in a collection.
Parameters
----------
x : collection of numbers
Examples
--------
>>> r_size([2,4,6,8])
6
>>> r_size({3,6,9,11})
8
"""
return max(x) - min(x)
|
9608c8e642225838155918cf188b814b24e7948f
| 510,136 |
import typing
def __read_commands(stream: typing.TextIO, n: int, func: typing.Callable[[str], str]) -> list[str]:
"""Read the last N commands from the stream excluding the current process' command.
:param stream: the TextIO object to read.
:param n: the amount off lines to read.
:param func: a function to apply to each of the returned lines to extract a command, the string passed to func
will have any trailing whitespace stripped.
:return: the unprocessed last n lines from the file.
"""
return list(map(lambda cmd: func(cmd.rstrip()),
stream.readlines()[-(n + 1):-1]))
|
c044b48db87593e786e1cdac4f86fdf1aa4612fa
| 563,347 |
from typing import Dict
from typing import Any
def convert_avg_params(checkpoint: Dict[str, Any]) -> Dict[str, Any]:
"""Converts average state dict to the format that can be loaded to a model.
Args:
checkpoint: model.
Returns:
Converted average state dict.
"""
state_dict = checkpoint["state_dict"]
avg_weights = {}
for k, v in state_dict.items():
if "avg_model" in k:
avg_weights[k[10:]] = v
return avg_weights
|
5e6406fb9e30c9c01fe828892c9f4f507bd43b6a
| 219,639 |
from typing import Sequence
from typing import Optional
from typing import Tuple
import itertools
def dirac_notation(state: Sequence,
decimals: int = 2,
qid_shape: Optional[Tuple[int, ...]] = None) -> str:
"""Returns the wavefunction as a string in Dirac notation.
For example:
state = np.array([1/np.sqrt(2), 1/np.sqrt(2)], dtype=np.complex64)
print(dirac_notation(state)) -> 0.71|0⟩ + 0.71|1⟩
Args:
state: A sequence representing a wave function in which the ordering
mapping to qubits follows the standard Kronecker convention of
numpy.kron.
decimals: How many decimals to include in the pretty print.
Returns:
A pretty string consisting of a sum of computational basis kets
and non-zero floats of the specified accuracy.
"""
if qid_shape is None:
qid_shape = (2,) * (len(state).bit_length() - 1)
digit_separator = '' if max(qid_shape, default=0) < 10 else ','
perm_list = [
digit_separator.join(seq) for seq in itertools.product(*(
(str(i) for i in range(d)) for d in qid_shape))
]
components = []
ket = "|{}⟩"
for x in range(len(perm_list)):
format_str = "({:." + str(decimals) + "g})"
val = (round(state[x].real, decimals) +
1j * round(state[x].imag, decimals))
if round(val.real, decimals) == 0 and round(val.imag, decimals) != 0:
val = val.imag
format_str = "{:." + str(decimals) + "g}j"
elif round(val.imag, decimals) == 0 and round(val.real, decimals) != 0:
val = val.real
format_str = "{:." + str(decimals) + "g}"
if val != 0:
if round(state[x].real, decimals) == 1 and \
round(state[x].imag, decimals) == 0:
components.append(ket.format(perm_list[x]))
else:
components.append((format_str + ket).format(val, perm_list[x]))
if not components:
return '0'
return ' + '.join(components).replace(' + -', ' - ')
|
67fd30779848dbcbf818f3e34da38ece434c4ed1
| 544,550 |
from typing import Iterable
from typing import Tuple
import functools
def min_max(x: Iterable[int]) -> Tuple[int, int]:
""" Returns both the min and max of a sequence.
This, rather than calling min(x), max(x), will work when a generator (or other single-use iterator) is passed in
"""
try:
seq = iter(x)
start = next(seq)
except StopIteration:
raise ValueError('min_max() arg is an empty sequence')
return functools.reduce(lambda left, right: (min(left[0], right), max(left[1], right)), seq, (start, start))
|
381be58127d0e610bd9c3ac8998cda56fa172c6f
| 593,944 |
def rescale(
value: float, old_min: float, old_max: float, new_min: float, new_max: float
) -> float:
"""Map value to a new range.
Args:
value (float): Value to be mapped
old_min (float)
old_max (float)
new_min (float)
new_max (float)
Returns:
float:
"""
return (value - old_min) * (new_max - new_min) / (old_max - old_min) + new_min
|
8c91238e4555df442fad4acfba718ead02a08d20
| 397,696 |
def snap_to_cube(q_start, q_stop, chunk_depth=16, q_index=1):
"""
For any q in {x, y, z, t}
Takes in a q-range and returns a 1D bound that starts at a cube
boundary and ends at another cube boundary and includes the volume
inside the bounds. For instance, snap_to_cube(2, 3) = (1, 17)
Arguments:
q_start (int): The lower bound of the q bounding box of volume
q_stop (int): The upper bound of the q bounding box of volume
chunk_depth (int : CHUNK_DEPTH) The size of the chunk in this
volume (use ``get_info()``)
q_index (int : 1): The starting index of the volume (in q)
Returns:
2-tuple: (lo, hi) bounding box for the volume
"""
lo = 0
hi = 0
# Start by indexing everything at zero for our own sanity
q_start -= q_index
q_stop -= q_index
if q_start % chunk_depth == 0:
lo = q_start
else:
lo = q_start - (q_start % chunk_depth)
if q_stop % chunk_depth == 0:
hi = q_stop
else:
hi = q_stop + (chunk_depth - q_stop % chunk_depth)
return [lo + q_index, hi + q_index + 1]
|
ca4b1bc09d289ac3ecad4d8c0ceedb23ab121882
| 654,076 |
import pickle
def predict(dataX):
""" Predict dependent variable from a features vector """
# load model
model = pickle.load(open('model.pickle', 'rb'))
# predict
predY = model.predict(dataX.values.reshape(-1, dataX.shape[1]))
return predY
|
85505d3435ad8593542851680aef7491058a0239
| 24,642 |
import six
def satisfies_requirements(obj, requirements):
"""
Checks that an object satifies given requirements.
Every key-value pair in the requirements object must be present in the
target object for it to be considered satisfied.
Returns True/False.
"""
for key, value in six.iteritems(requirements):
if key not in obj:
return False
if obj[key] != value:
return False
return True
|
780850f970e6320e2f388fc4e8ea5e9c77f144d1
| 386,129 |
import inspect
def bind_kwargs(kallable, **kwargs):
"""Bind keyword arguments to a callable and return as a dictionary
Args:
callable (callable): any callable
**kwargs: keyword arguments to bind
Returns: (dict)
"""
call_signature = inspect.signature(kallable).bind_partial(**kwargs).arguments
if 'kwargs' in call_signature:
passed_kwargs = call_signature['kwargs']
else:
passed_kwargs = call_signature
return passed_kwargs
|
005fe60a6aa418274ea2af187b3d18e392129d04
| 204,287 |
def cached_property(*args, **kwargs):
"""
Decorator that creates a property that is cached.
Keyword Arguments
-----------------
use_cache_ctrl : bool
If True, the property is cached using the Node cache_ctrl. If False, the property is only cached as a private
attribute. Default False.
expires : float, datetime, timedelta
Expiration date. If a timedelta is supplied, the expiration date will be calculated from the current time.
Ignored if use_cache_ctrl=False.
Notes
-----
Podpac caching using the cache_ctrl will be unreliable if the property depends on any non-tagged traits.
The property should only use node attrs (traits tagged with ``attr=True``).
Examples
--------
>>> class MyNode(Node):
# property that is recomputed every time
@property
def my_property(self):
return 0
# property is computed once for each object
@cached_property
def my_cached_property(self):
return 1
# property that is computed once and can be reused by other Nodes or sessions, depending on the cache_ctrl
@cached_property(use_cache_ctrl=True)
def my_persistent_cached_property(self):
return 2
"""
use_cache_ctrl = kwargs.pop("use_cache_ctrl", False)
expires = kwargs.pop("expires", None)
if args and (len(args) != 1 or not callable(args[0])):
raise TypeError("cached_property decorator does not accept any positional arguments")
if kwargs:
raise TypeError("cached_property decorator does not accept keyword argument '%s'" % list(kwargs.keys())[0])
def d(fn):
key = "_podpac_cached_property_%s" % fn.__name__
@property
def wrapper(self):
if hasattr(self, key):
value = getattr(self, key)
elif use_cache_ctrl and self.has_cache(key):
value = self.get_cache(key)
setattr(self, key, value)
else:
value = fn(self)
setattr(self, key, value)
if use_cache_ctrl:
self.put_cache(value, key, expires=expires)
return value
return wrapper
if args:
return d(args[0])
else:
return d
|
a7ba100956e42961670b679853f7090604974e72
| 455,416 |
def create_output(instance, item):
"""Create Terraform Module for Output the Defined Parameters."""
value = "${module.%s.%s}" % (instance, item)
tf_output = {"value": [value]}
return tf_output
|
6dc09767450e50d24647ef289b23d07f6b39dea6
| 112,321 |
def normalize_price(price: str) -> float:
"""Normalizes USD price with thousand separator into float value"""
return float(price.strip().replace(',', ''))
|
56d14d05ab7ef4490f4ec96b65f300558bc0cd4a
| 601,919 |
def get_exploding_values(list_of_values: list) -> tuple:
"""
This function will determine which value is the largest in the list, and
will return a tuple containing the information that will have the largest percentage
within the pie chart pop off.
:param list_of_values:
:return: A tuple of 'exploding' values for a pie chart.
"""
exploding_values = [0.0 for value in list_of_values]
largest_value = list_of_values[0]
pop_out_index = 0
# iterate through the list of values and find the index that contains the largest value.
for i, value in enumerate(list_of_values):
if value > largest_value:
largest_value = value
pop_out_index = i
# set the popout value
exploding_values[pop_out_index] = 0.1
return tuple(exploding_values)
|
aebb9bf98c969871b99c4772ee55876431510b26
| 674,950 |
def get_terms_for_artist(conn,artistid):
"""
Returns the list of terms for a given artist ID
"""
q = "SELECT term FROM artist_term WHERE artist_id='"+artistid+"'"
res = conn.execute(q)
return map(lambda x: x[0],res.fetchall())
|
b0b3b09fabe90fd17373a38c6745f982fabe0729
| 68,114 |
def con_joule_to_kwh(energy_joule):
"""
Converts energy value from Joule to kWh
Parameters
----------
energy_joule : float
Energy demand value in Joule
Returns
-------
energy_kwh : float
Energy demand value in kWh
"""
energy_kwh = energy_joule / (1000 * 3600)
return energy_kwh
|
11e267ee63d691502b6eee63f61ed3f847d4dee3
| 587,251 |
def get_num_replicas_in_sync(strategy):
""" Returns the number of replicas in sync. """
try:
return strategy.num_replicas_in_sync
except AttributeError:
return 1
|
7eec2fb00ec02c11dc729e185b15c9cbf01aa1e5
| 415,746 |
import re
def _checkremove_que(word):
"""If word ends in -que and if word is not in pass list, strip -que"""
in_que_pass_list = False
que_pass_list = [
"atque",
"quoque",
"neque",
"itaque",
"absque",
"apsque",
"abusque",
"adaeque",
"adusque",
"denique",
"deque",
"susque",
"oblique",
"peraeque",
"plenisque",
"quandoque",
"quisque",
"quaeque",
"cuiusque",
"cuique",
"quemque",
"quamque",
"quaque",
"quique",
"quorumque",
"quarumque",
"quibusque",
"quosque",
"quasque",
"quotusquisque",
"quousque",
"ubique",
"undique",
"usque",
"uterque",
"utique",
"utroque",
"utribique",
"torque",
"coque",
"concoque",
"contorque",
"detorque",
"decoque",
"excoque",
"extorque",
"obtorque",
"optorque",
"retorque",
"recoque",
"attorque",
"incoque",
"intorque",
"praetorque",
]
if word not in que_pass_list:
word = re.sub(r"que$", "", word)
else:
in_que_pass_list = True
return word, in_que_pass_list
|
599404cf6b219c8578428fcd34eb87e6981d72c7
| 230,686 |
import re
def remove_amp_references(txt: str) -> str:
"""Remove references to amp."""
txt = re.sub("^amp/", "", txt, flags=re.MULTILINE)
txt = re.sub("/amp/", "/", txt, flags=re.MULTILINE)
txt = re.sub("/amp:", ":", txt, flags=re.MULTILINE)
return txt
|
7fee823504caf3bffb76540764a83ea2e4699950
| 646,406 |
def chao1_uncorrected(observed, singles, doubles):
"""Calculates chao1 given counts. Eq. 1 in EstimateS manual.
Formula: chao1 = S_obs + N_1^2/(2*N_2) where N_1 and N_2 are
count of singletons and doubletons respectively.
Note: this is the original formula from Chao 1984, not bias-corrected,
and is Equation 1 in the EstimateS manual.
"""
return observed + singles**2/float(doubles*2)
|
0f72c090d333b05f01c634aad7d3fd51b556946b
| 457,220 |
def user_to_color(user) -> int:
"""Gets the color for a given user.
Example:
`discord.Embed(color=user_to_color(ctx.author))`
:param user: Any user.
:type user: discord.User
:return: Any color.
:rtype: int
"""
return int(int(user.discriminator) / 9999 * 0xffffff)
|
32a2a7bc6936ecec2f9e8b7385a1726590b2a06d
| 219,619 |
from typing import Dict
def find_security(content: Dict, scheme: str) -> bool:
"""Check if security scheme is used in the provided content.
Arguments
---------
content
OpenAPI document to be cleaned up.
scheme
Security scheme to be searched.
Returns
-------
Flag determining presence of the security scheme in the content.
"""
if isinstance(content, list):
for item in content:
if find_security(item, scheme):
return True
if isinstance(content, dict):
for key, value in content.items():
if key == 'security':
for security in value:
if isinstance(security, dict) and scheme in security:
return True
if find_security(value, scheme):
return True
return False
|
0fe2485893fbf520cce10c7002346792bdd541e1
| 84,972 |
def get_class_name_attr(class_obj):
"""
Given a class object returns attribute that contains the full name or None.
:param class_obj: Class object (Class)
:return: full name attr or None (str/None)
"""
cl_full_name = None
for key in class_obj.__dict__:
if 'full_name' in key:
cl_full_name = key
break
return cl_full_name
|
99facff0bec35ee993dfcd4ca5cf72f65e97824b
| 158,606 |
def _cache(f):
"""Decorator to cache return value of a function."""
cached = {}
def cached_function():
"""Cache return value in closure before calling function."""
if 'value' not in cached:
cached['value'] = f()
return cached['value']
return cached_function
|
cf25a68f4ac7a5cb94e1db7c3ef36e36002fa422
| 650,007 |
import random
def get_random_smallint() -> str:
"""
Returns random smallint number
"""
number = str(random.randint(-32767, 32767))
return number
|
c620e7972f776381879a10bd4cdb20a7ac21b14e
| 269,550 |
def getStringParams(param_str):
"""Determines which keys are required if param_str.format() function is used.
Returns the required keys as a list.
"""
params = []
index = 0
while True:
index = param_str.find("{", index)
if index >= 0:
end_index = param_str.find("}", index)
if end_index > index:
double_colon_index = param_str.find(":", index)
if index < double_colon_index < end_index:
params.append(param_str[index+1:double_colon_index])
index += 1
else:
break
return params
|
7ef1f1ca89a0d11a0711d3a837226e16e4bad762
| 370,298 |
def get_collection_and_suffix(collection_name):
"""
Lookup collection and suffix based on the name of the collection as used by GEE
Parameters
==========
collection_name: str, GEE name of the collection, eg. 'COPERNICUS/S2'
Returns
=======
collection: str, user-friendly name of the collection, e.g. 'Sentinel2'
suffix: str, contraction of collection name, used in the filenames of plots
"""
if collection_name == 'COPERNICUS/S2':
collection = 'Sentinel2'
satellite_suffix = 'S2'
elif collection_name == 'LANDSAT8':
collection = 'Landsat8'
satellite_suffix = 'L8'
elif collection_name == 'LANDSAT7':
collection = 'Landsat7'
satellite_suffix = 'L7'
elif collection_name == 'LANDSAT5':
collection = 'Landsat5'
satellite_suffix = 'L5'
elif collection_name == 'LANDSAT4':
collection = 'Landsat4'
satellite_suffix = 'L4'
else:
raise RuntimeError("Unknown collection_name {}".format(collection_name))
return collection, satellite_suffix
|
c5b2a29c54cd582d0603725e99b6be07e347b323
| 242,541 |
def csWater45Me_308K_p(p=1):
"""
Calculates chemical shift of Chemical shift of water line in 45% MeOH at 308K according to pressure (Maciej).
"""
# Chemical shift of water line in 45% MeOH at 308K according to pressure (Maciej)
cs = -4.324584E-05*p + 4.661946E+00
return(cs)
|
7c9fee3a216957fcdbaba61a5f67e55ff6ef69c4
| 315,392 |
def _full_table_name(schema_name, name):
"""
Return the full name of a table, which includes the schema name.
"""
return "{}.{}".format(schema_name, name)
|
66092bf6fc3cc4a4bc7cb01dfc8de3689544b6cc
| 152,353 |
def ounces_to_gramms(oz: float) -> float:
"""
Calculate ounces to gramms
"""
return oz * 28.3495
|
366003c4424cd693a0607236c02373301ee2229b
| 269,527 |
def filter_location(job):
"""Filter out job location."""
location_container = job.find('div', attrs={'class': '-location'})
company_location = location_container.text.strip().lstrip('-').strip()
return company_location
|
29e73ac354893b80b15ec087005d8c0135001c6e
| 463,672 |
def getCourseNums(majorNums, majorPages):
"""
Makes a set containing all Course Numbers.
"""
courseNums = set()
for majorNum in majorNums:
courses = [x[:x.index('"')] for x in majorPages[majorNum].split('<a name="')[1:]]
for course in courses:
courseNums.add(course)
return courseNums
|
daa687b98718402daf05299f1f098cacc082d397
| 378,898 |
def marginal_topic_distrib(doc_topic_distrib, doc_lengths):
"""
Return marginal topic distribution ``p(T)`` (topic proportions) given the document-topic distribution (theta)
`doc_topic_distrib` and the document lengths `doc_lengths`. The latter can be calculated with
:func:`~tmtoolkit.bow.bow_stats.doc_lengths`.
:param doc_topic_distrib: document-topic distribution; shape NxK, where N is the number of documents, K is the
number of topics
:param doc_lengths: array of size N (number of docs) with integers indicating the number of terms per document
:return: array of size K (number of topics) with marginal topic distribution
"""
unnorm = (doc_topic_distrib.T * doc_lengths).sum(axis=1)
return unnorm / unnorm.sum()
|
b9593782a176d4f98eeaf606dee8f0ab1a82f22b
| 366,050 |
def port_check(serverport):
""" Taken from EARCIS code.
Check if the server port passed in is a valid TCP port if not, return False; otherwise, return True.
Input: (serverport) % (int)
Output: Boolean
"""
if not (0 < serverport < 65536):
return False
return True
|
69baa747cb36ba77fd6ef388e95d27df9489af5a
| 265,492 |
def check_key_value(data, key, value):
"""Checks a key for the given value within a dictionary recursively."""
if isinstance(key, dict):
for k, v in key.items():
return check_key_value(data[k], v, value)
if data[key] == value:
return True
return False
|
e4a519cd6bff73cc3a077ca98550c130b9653f0f
| 94,587 |
import torch
def choose_forward_optimizer(args, net):
"""
Return the wished optimizer (based on inputs from args).
Args:
args: cli
net: neural network
Returns: optimizer
"""
if args.network_type in ('BP'):
if args.shallow_training:
print('Shallow training')
forward_params = net.layers[-1].parameters()
elif args.only_train_first_layer:
print('Only training first layer')
forward_params = net.layers[0].parameters()
else:
forward_params = net.parameters()
else:
if args.only_train_first_layer:
print('Only training first layer')
forward_params = net.get_forward_parameter_list_first_layer()
else:
forward_params = net.get_forward_parameter_list()
if args.optimizer == 'SGD':
print('Using SGD optimizer')
forward_optimizer = torch.optim.SGD(forward_params,
lr=args.lr, momentum=args.momentum,
weight_decay=args.forward_wd)
elif args.optimizer == 'RMSprop':
print('Using RMSprop optimizer')
forward_optimizer = torch.optim.RMSprop(
forward_params,
lr=args.lr,
momentum=args.momentum,
alpha=0.95,
eps=0.03,
weight_decay=args.forward_wd,
centered=True
)
elif args.optimizer == 'Adam':
print('Using Adam optimizer.')
forward_optimizer = torch.optim.Adam(
forward_params,
lr=args.lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon,
weight_decay=args.forward_wd
)
else:
raise ValueError('Provided optimizer "{}" is not supported'.format(
args.optimizer
))
return forward_optimizer
|
457d6e2d8b5badc89c31b7a7d0ebab516407c6d0
| 388,611 |
def resolver_has_tag(f, tag):
"""
Checks to see if a function has a specific tag.
"""
if not hasattr(f, '_resolver_tags'):
return False
return tag in f._resolver_tags
|
50257542d32da3aa25b3f4d3890e900bdd44e376
| 161,753 |
def train_model(model, data, epochs=1000):
"""Training Routine
:param model: Model
:type model: tf.keras.Model object
:param data: Data
:type data: tuple
:param epochs: Number of Epochs, defaults to 1000
:type epochs: int, optional
:return: History of training
:rtype: dict
"""
# Data
x_train = data['x_train']
y_train = data['y_train']
x_test = data['x_test']
y_test = data['y_test']
# Compile
model.compile(
loss='mse',
optimizer='adam',
metrics=['mae'],
)
# Train
history = model.fit(
x=x_train,
y=y_train,
batch_size=20,
epochs=epochs,
validation_data=(x_test, y_test),
# callbacks=[TqdmCallback()],
verbose=0,
)
return history
|
8c825d3572c4143aed5cb0edf146fc3661f724a7
| 78,430 |
def get_many(d, required=[], optional=[], one_of=[]):
"""
Returns a predictable number of elements out of ``d`` in a list for auto-expanding.
Keys in ``required`` will raise KeyError if not found in ``d``.
Keys in ``optional`` will return None if not found in ``d``.
Keys in ``one_of`` will raise KeyError if none exist, otherwise return the first in ``d``.
Example::
uid, action, limit, offset = get_many(request.params, required=['uid', 'action'], optional=['limit', 'offset'])
Note: This function has been added to the webhelpers package.
"""
d = d or {}
r = [d[k] for k in required]
r += [d.get(k)for k in optional]
if one_of:
for k in (k for k in one_of if k in d):
return r + [d[k]]
raise KeyError("Missing a one_of value.")
return r
|
de52c9e243f06c5594e09df96e9ebb2c491cc9e2
| 470,375 |
def to_networkx(g, node_attrs=None, edge_attrs=None):
"""Convert to networkx graph.
The edge id will be saved as the 'id' edge attribute.
Parameters
----------
g : DGLGraph or DGLHeteroGraph
For DGLHeteroGraphs, we currently only support the
case of one node type and one edge type.
node_attrs : iterable of str, optional
The node attributes to be copied. (Default: None)
edge_attrs : iterable of str, optional
The edge attributes to be copied. (Default: None)
Returns
-------
networkx.DiGraph
The nx graph
"""
return g.to_networkx(node_attrs, edge_attrs)
|
3e23046eb39c41b69c721819aa41fba0b42fec27
| 421,299 |
import time
def elapsed_time(text: str,
start_t: int,
sept: int = 70
) -> str:
"""
Return elapsed time.
Parameters
----------
:text: str
Text to be printed
:start_t: int
Generated from time.time_ns()
:sept: int
Length of text
Returns
-------
str
A string containing arg "text" followed by hours, minutes, seconds,
milliseconds.
Example usage
-------------
>>> import time
>>> start = time.time_ns()
>>> time.sleep(2)
>>> elapsed_time("Time taken:", start)
Time taken: 00:00:02 000 ms
"""
second, ms = divmod(int(round((time.time_ns() / 1e6) - (start_t / 1e6),
0)), 1000)
minute, second = divmod(second, 60)
hour, minute = divmod(minute, 60)
fn_op = f'{text}' + f'{hour:02}:{minute:02}:{second:02} {ms:03} ms'\
.rjust(sept - len(text))
return fn_op
|
f127324ebf1459abcd817b6f35768488056067a9
| 254,400 |
def get_records(client, stream_name):
"""Return a list of records from given stream."""
shard_id = client.describe_stream(
StreamName=stream_name
)['StreamDescription']['Shards'][0]['ShardId']
shard_iterator = client.get_shard_iterator(
StreamName=stream_name,
ShardId=shard_id,
ShardIteratorType='TRIM_HORIZON'
)['ShardIterator']
result = client.get_records(
ShardIterator=shard_iterator,
Limit=1000
)
return result['Records']
|
ecd56d20e382dee8d83cb34c426e85c5b1f572e0
| 454,656 |
def set_index(df, index, append=False, inplace=True):
"""
Turns one, or multiple dataframe columns into an index
Arguments are the dataframe to be modified, as well as a column name
or multiple column names in a list
Optional arguments are;
- append (default False, can be True or False) which determines whether
to append the index to the existing index
- inplace (default True, can be True or False) which determines whether
to change the existing dataframe, or return a new dataframe
Note that if append is not true, the original index will be dropped
"""
df = df.set_index(index, append=append, inplace=inplace)
return df
|
5cecf419c6bbd282a741ac125295ce870f0be867
| 336,150 |
def get_time_type(var):
"""
Inspect a variable and return it's time type, which can be either
None: not a time variable
'datetime': is a date/time variable
'delta': is a timedelta variable
"""
vartype = str(type(var.values.ravel()[0]))
if 'time' in vartype:
if 'delta' in vartype:
return 'delta'
else:
return 'datetime'
else:
return None
|
700338470496d62f6f2d7d19e5077e107a22c0b3
| 409,036 |
def get_yes_no_answer(question):
"""
Ask user a yes/no-question.
:param question: The question.
:type question: str
:return: The answer.
:rtype: bool
"""
while True:
answer = input(question + " [y/n] ").lower()
if answer == "y" or answer == "yes":
return True
elif answer == "n" or answer == "no":
return False
else:
print("Invalid answer.")
|
5847e9aeccf80cbb45abe5188199a981be887035
| 475,890 |
def replace_inline(match):
"""Replace an inline unit expression, e.g. ``(1 m)``, by valid Python code
using a Quantity call.
"""
return '(Quantity(\'' + match.group(1) + '\'))'
|
6b7c186e3c5548c03653f1e3f05f04940b4841ec
| 366,945 |
def take_test_board(test, idx): # pylint: disable=redefined-outer-name
"""Take state from a test board of a test list from tests module
Args:
test (list): Test board list
idx (int): Test board id
Returns:
list: A list of lists defining state
"""
board = test[idx]
e = board[0][1] # pylint: disable=redefined-outer-name disable=invalid-name
state = list() # pylint: disable=redefined-outer-name
for row in board[1:]:
state.append(row)
return state, e
|
6ba15bad313794f621ef709736eaee0df7985764
| 139,390 |
import glob
def find_excel(directory="./"):
""" Find the excel report. """
fcybl_files = glob.glob('{}/{}'.format(directory, './FCYBL*.xlsx'))
if (len(fcybl_files) != 1):
raise RuntimeError("Expected to find 1 FCYBL file, but found the following: {}".format(fcybl_files))
return fcybl_files[0]
|
1912527593d60586930c7272bc1a0defc966f157
| 497,307 |
import json
def read_file(filename):
"""
Return contents of a file as list.
"""
lines = []
try:
f = open(filename, 'r')
except (SystemExit, KeyboardInterrupt):
raise
if filename.split('.')[-1] == 'json':
with open(filename, 'r') as f:
lines = json.load(f)
else:
with open(filename, 'r') as f:
while True:
line = f.readline()
if not line:
break
lines.append(line.strip())
return lines
|
1d539ec17f7003fa37ae3189b7bfd5ea51a3451e
| 458,354 |
def map_keys(function, dictionary):
"""
Transform each key using the given function. Return a new dict with transformed keys.
:param function: values map function
:param dictionary: dictionary to mapping
:return: dict with changed (mapped) keys
"""
return {function(key): value for key, value in dictionary.items()}
|
d6c294c3888e7e758a7c68ee850cad94af776149
| 262,056 |
def is_non_descending(knot_vector):
"""
Check whether a knot vector is in non-descending order.
Arguments
---------
knot_vector : array_like
The knot vector.
Returns
-------
bool
True if the knot vector is in non-descending order or False otherwise.
"""
for i in range(len(knot_vector) - 1):
if knot_vector[i + 1] < knot_vector[i]:
return False
return True
|
51c9823a69472028bcd60b77c37b8b8298c0c26f
| 252,929 |
def _part(f):
"""
Return a character representing a partly-filled cell with proportion
`f` (rounded down to width of nearest available character).
"""
return [" ", "▏", "▎", "▍", "▌", "▋", "▊", "▉", "█"][int(9*f)]
|
5919707099b8bd09467056a770a6b8904a991c32
| 681,855 |
def topitems(iterable):
""" Last (top) items from a list of lists, useful to get 'top' items from a list of stacks e.g.
from a list of locations on a stackable game board.
"""
return [x[-1] for x in iterable]
|
252173360c4941cfc02622ccff51acefc67563f0
| 370,797 |
import json
def open_json(json_file_path):
"""
:param json_file_path: path to open inference json file
:returns: the json data dictionary of localized polygon and their classifications
"""
with open(json_file_path) as jf:
json_data = json.load(jf)
inference_data = json_data["features"]["xy"]
return inference_data
|
9e513026364688b69ec5f01d11e05bcadfef5558
| 450,496 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.