content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def save_onnx_model(model, filename=None):
"""
Saves a model as a file or bytes.
:param model: *ONNX* model
:param filename: filename or None to return bytes
:return: bytes
"""
content = model.SerializeToString()
if filename is not None:
if hasattr(filename, 'write'):
filename.write(content)
else:
with open(filename, "wb") as f:
f.write(content)
return content
|
b15cf60e82163ce6dea0bf8618e723ca108e3f6b
| 306,070 |
def _median(lst):
"""Returns the median of the input list.
Args:
lst: the input list.
Returns:
The median of the list, or None if the list is empty/None.
"""
sorted_lst = sorted(lst)
length = len(sorted_lst)
if length % 2:
return sorted_lst[length // 2]
return (sorted_lst[length // 2 - 1] + sorted_lst[length // 2]) / 2
|
66924bd3220b42c171d72d001f3f4b2059ac0737
| 560,583 |
import locale
def get_day_name(day):
"""
Get the short day name in the currently activated locale.
:param day:
:return:
day 0 = Sunday
"""
# Locale keys are 1=Sun, 2=Mon, ...
locale_key = "ABDAY_%d" % (1 + day)
option = getattr(locale, locale_key)
day_name = locale.nl_langinfo(option)
return day_name
|
b3326f4a1cab553fdd92a033002e70f4f69765c2
| 645,943 |
def get_domain(url):
""" Return the domain part of an url """
# Taken from https://www.quora.com/How-do-I-extract-only-the-domain-name-from-an-URL
if url is None:
return None
return url.split('//')[-1].split('/')[0]
|
ece089253dab8acd2c48564c1ad9837f7843e134
| 497,016 |
def le(x, y):
"""Implement `le`."""
return x <= y
|
e3c0601970460c90824a18be8a9ea2474f16e5a1
| 67,879 |
def normalize(r, construct=range):
"""
Return an object which is the normalization of range *r*.
The normalized range is either None (if r.start >= r.stop), or an object
constructed using *construct* with the arguments r.start, r.stop.
In case construct == range we try to avoid constructing new objects.
"""
if r is None:
return None
if r.stop <= r.start:
return None
else:
if construct == range and isinstance(r, range) and r.step == 1:
return r
else:
return construct(r.start, r.stop)
|
81348dcf40dc1ee87d6a7fcc144cc32c4686deec
| 188,340 |
import gzip
import bz2
import lzma
def open_to_read(file, mode='rt'):
"""Version of 'open' that is smart about different types of compressed files
and file-like objects that are already open to read. 'mode' has to be a
valid read mode such as 'r', 'rb' or 'rt'.
"""
assert mode in ('r', 'rb', 'rt'), 'illegal read mode'
enc = 't' in mode and 'utf8' or None
if isinstance(file, str) and file.endswith('.gz'):
return gzip.open(file, mode, encoding=enc)
elif isinstance(file, str) and file.endswith('.bz2'):
return bz2.open(file, mode, encoding=enc)
elif isinstance(file, str) and file.endswith('.xz'):
return lzma.open(file, mode, encoding=enc)
elif hasattr(file, 'read'):
return file
else:
return open(file, mode)
|
ddc8522bee92a3de3193acf79bd04073df8ccd4d
| 221,370 |
def possible_names(*names, last_name = 'Elizabeth'):
"""
>>> possible_names('Cindy', 'Lucy', last_name = 'F')
{'baby1': 'Cindy F', 'baby2': 'Lucy F'}
>>> possible_names('Cindy', 'Lucy')
{'baby1': 'Cindy Elizabeth', 'baby2': 'Lucy Elizabeth'}
"""
name = enumerate(names)
dct = {}
for i in name:
dct['baby' + str(i[0] + 1)] = i[1] + ' ' + last_name
return dct
|
4f730d2d3beb67fb163e00cfdf3a2dd817c46683
| 150,556 |
def en_segundos(tiempo: str) -> int:
"""Convierte un tiempo en segundos
:param tiempo: Tiempo expresado en dias:horas:minutos:segundos
:tiempo type: str
:return: Tiempo en segundos
:rtype: int
.. Nota::
u: unidad
t: tiempo(int)
>>> en_segundos('1:0:0:0')
86400
>>> en_segundos('1:0:10:4')
87004
>>> en_segundos('2:12:46:29')
218789
"""
unidades = [60*60*24, 60*60, 60, 1]
return sum([u*t for u, t in zip(unidades, map(int, tiempo.split(":")))])
|
03192b2b2bd528352ed5e997688f123054cca4ce
| 278,646 |
def minMax(a, b):
"""\
Return a tuple with the min value first, then the max value.
"""
return (a, b) if a <= b else (b, a)
|
ee5facb4154aa54d7aa8c66437d43a31276df5cf
| 445,044 |
def printout(data):
"""Print and then return the given data."""
print(data.decode('utf-8'))
return data
|
a14426d55fe62ceecd928dc27807185acd4c19ab
| 408,380 |
def ret(returnCode):
"""Return True if *returnCode* is 0."""
if returnCode == 0:
return True
else:
return False
|
2fede3a66dfa55f868f97fd8d1f59c7d0d580e52
| 512,230 |
from urllib.parse import urlencode
def oauth_url(client_id, permissions=None, server=None, redirect_uri=None):
"""A helper function that returns the OAuth2 URL for inviting the bot
into servers.
Parameters
-----------
client_id : str
The client ID for your bot.
permissions : :class:`Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
server : :class:`Server`
The server to pre-select in the authorization screen, if available.
redirect_uri : str
An optional valid redirect URI.
"""
url = 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot'.format(client_id)
if permissions is not None:
url = url + '&permissions=' + str(permissions.value)
if server is not None:
url = url + "&guild_id=" + server.id
if redirect_uri is not None:
url = url + "&response_type=code&" + urlencode({'redirect_uri': redirect_uri})
return url
|
bf7ca1957153ff938334927744804891010c0c26
| 20,579 |
import csv
def read_symbols(symbol_file='symbols.csv'):
"""
Read symbols.
Parameters
----------
symbol_file : str
Path to a CSV (with ',' as delimiter) which contains one symbol label
in the second colum
Returns
-------
list
Of symbol labels
"""
with open(symbol_file) as fp:
reader = csv.reader(fp, delimiter=',', quotechar='"')
data_read = [row for row in reader]
return [el[1] for el in data_read]
|
65cc8c41f860c779ab8d8a1f94266fbc9930ea5a
| 633,874 |
from typing import List
def get_primes(n: int) -> List[int]:
"""Return a list of all prime numbers less than n.
This function determines if each integer is prime with efficiency
O(sqrt(n)). Memory consumption for each prime is O(1).
Arguments:
n: int
Maximum number for the prime search
Returns:
List[int]: list of prime numbers
"""
prime_list = []
for i in range(2, n):
is_prime = True
for j in range(2, round(i ** (1 / 2)) + 1):
if i % j == 0:
is_prime = False
if is_prime:
prime_list.append(i)
return prime_list
|
ca5ddad965042477c313e7614947334888642e24
| 245,462 |
def detect_protocol_and_get_its_field_names(packet, field_names):
"""
Tries to find first protocol with defined field names.
:param packet: Packet from Packets Parser stage.
:param field_names: Dictionary with known protocols and their field names.
:return: Protocol field names of the first detected protocol, None otherwise.
"""
for field_name in field_names:
if field_name in packet:
return field_names[field_name]
return None
|
db62ca5426e76e16c648bba7e0238be16e338394
| 561,406 |
import math
def cone(individual, position, height, width):
"""The cone peak function to be used with scenario 2 and 3.
:math:`f(\mathbf{x}) = h - w \sqrt{\sum_{i=1}^N (x_i - p_i)^2}`
"""
value = 0.0
for x, p in zip(individual, position):
value += (x - p)**2
return height - width * math.sqrt(value)
|
02ed317b900b34e7d9dee1847a164df9a54417e6
| 594,357 |
def make_grid_row(length):
"""Create a row of length with each element set to value."""
row = []
for _ in range(0, length):
row.append(False)
return row
|
1aae7acdd314ac525e367400409740a22e3a1b27
| 249,857 |
def boxSeries(ax,ser):
"""
This function draws a vertical box and whisker plot
:Arguments:
:type ax: matplotlib Axis2D
:param ax: Axis on which bar graph will be drawn
:type ser: pandas Series
:param ser: data to be plotted
:Return:
:type ax: Matplotlib Axis
:param ax: axis with bars graphed onto it
"""
bp = ax.boxplot(ser,showfliers=True)
for flier in bp['fliers']:
flier.set(marker='+', color='#FFFFFF')
return ax
|
f26c14ca48916e3748651ac2bc6017f1a785d246
| 96,194 |
import math
def isfinite(n):
"""Return True if x is neither an infinity nor a NaN, and False otherwise.
(Note that 0.0 is considered finite.)
Backported from python 3
"""
return not (math.isinf(n) or math.isnan(n))
|
b3ced8dc58c4bced4ca16892f7be509408b4bb10
| 163,067 |
from typing import List
import itertools
def mark_and_toys(prices: List, budget: int):
"""
Compute max toys mark can buy.
:param prices: tpy prices.
:param budget: max budget.
:return: No of toys that can be bought.
"""
prices.sort()
accumulated_prices = list(itertools.accumulate(prices))
return next(
agg_price[0]
for agg_price in enumerate(
accumulated_prices) if agg_price[1] > budget)
|
64828fa322098267b3be33eeff01fc3f1dc53395
| 295,454 |
def extract_subID(list_):
"""
Function to extract subject IDs
"""
res = []
for item in list_:
sub = item.split('_')[1]
res.append(sub)
return res
|
0594c3307988fd7cd1837f1ef04393d0b23a85b4
| 278,414 |
def result_for_runners(connection_observer):
"""
When runner takes result from connection-observer it should not
modify ConnectionObserver._not_raised_exceptions
:param connection_observer: observer to get result from
:return: result or raised exception
"""
if connection_observer._exception:
raise connection_observer._exception
return connection_observer.result()
|
0e640297504621101cd5901706524863c0a6677e
| 271,677 |
from pathlib import Path
def get_tlt_file(alignment_directory: Path, tilt_series_id: str) -> Path:
"""Get the tlt file containing tilt angles from an IMOD directory."""
return alignment_directory / f'{tilt_series_id}.tlt'
|
3580e90228464727bde158706665d35d1c1d4f47
| 438,957 |
def _draw_circle(data, obj, draw_options):
"""Return the PGFPlots code for circles.
"""
x, y = obj.center
cont = "\\draw[%s] (axis cs:%.15g,%.15g) circle (%.15g);\n" % (
",".join(draw_options),
x,
y,
obj.get_radius(),
)
return data, cont
|
684d6fbd06942de103dd17cbf52735cb60aef9d2
| 170,991 |
import torch
def is_complex_symmetric(z: torch.Tensor, atol=3e-5, rtol=1e-5):
"""
Return whether the complex symmetric matrices are symmetric or not.
Parameters
----------
z : tensor
Complex symmetric matrix
atol : float
absolute tolerance for allclose
rtol : float
relative tolerance for allclose
Returns
-------
boolean
whether the points in x are complex symmetric or not
"""
real_z, imag_z = z.real, z.imag
return torch.allclose(
real_z, real_z.transpose(-1, -2), atol=atol, rtol=rtol
) and torch.allclose(imag_z, imag_z.transpose(-1, -2), atol=atol, rtol=rtol)
|
2e05087104253a8a25a7445263be3c969de9d474
| 155,491 |
def all_agents(stmts):
"""Return a list of all of the agents from a list of statements.
Only agents that are not None and have a TEXT entry are returned.
Parameters
----------
stmts : list of :py:class:`indra.statements.Statement`
Returns
-------
agents : list of :py:class:`indra.statements.Agent`
List of agents that appear in the input list of indra statements.
"""
agents = []
for stmt in stmts:
for agent in stmt.agent_list():
# Agents don't always have a TEXT db_refs entry (for instance
# in the case of Statements from databases) so we check for this.
if agent is not None and agent.db_refs.get('TEXT') is not None:
agents.append(agent)
return agents
|
6c4dc156a26efbc4f6d1945e2fc23b1a423d8432
| 133,121 |
def contar_caracteres(s):
"""Função que conta os caracteres de uma string
ex.:
>>> contar_caracteres('ola')
{'o': 1, 'l': 1, 'a': 1}
>>> contar_caracteres('maca')
{'m': 1, 'a': 2, 'c': 1}
:param s: string a ser contada
"""
resultado={}
for caracter in s:
resultado[caracter] = resultado.get(caracter, 0) + 1 # se o item dentro do dicionario não estiver disponivel ele inicia a contagem com 0 e ioncrementa
return resultado
|
9a7dea4e30052b2f5a1255bcb3db57c8c98246c4
| 113,543 |
from typing import Union
from typing import Mapping
from typing import Any
from typing import Sequence
def is_request_credentialed(headers: Union[Mapping[str, Any], Sequence[str]]) -> bool:
"""Utility function that checks if headers indicate credentialed request.
This is done only by inspecting header fields so it does not takes SSL
client certificate authentication into account. The argument may be either
headers dictionary-like object or a sequence of headers field names.
:param headers: header fields
:type headers: Union[Mapping[str, Any], Sequence[str]]
:return: flag indicating credentialed status
:rtype: bool
"""
return 'Cookie' in headers or 'Authorization' in headers
|
c0e391e4cc9af75084982769ef82582498117421
| 551,042 |
def download_comments(comments_url: str, request: dict, args: dict) -> list:
"""
Recursively download comments from a given GitHub URL
:param comments_url: GitHub URL containing comments
:param request: HTTP request parameters (headers, token, etc)
:param args: arguments passed on the command line + session
:return: list of strings containing comments
Using data in request connect and session in args['session'] connect to comments_url and download its content. Based
on GH documentation the content is a JSON which will be processed and the text of each comment will be saved to ret.
If there is 'Link' HTTP header containing the word 'next' then recursively follow the URL associated with the header
and use this URL as new comments_url.
"""
ret = []
response = args['session'].get(comments_url, headers=request['headers'])
if 'link' in response.headers:
links = response.headers['link'].split(',')
links = {x.split("rel=")[1].strip('"'): x.split(';')[0].strip('<').strip('>') for x in links}
if 'next' in links:
ret += download_comments(links['next'], request, args)
for comment in response.json():
ret += [comment['body']]
return ret
|
203de9ff80bf10acc67f4b4847ad1bf55b909434
| 504,453 |
def count_non_zeros(*iterables):
"""Returns total number of non 0 elements in given iterables.
Args:
*iterables: Any number of the value None or iterables of numeric values.
"""
result = 0
for iterable in iterables:
if iterable is not None:
result += sum(1 for element in iterable if element != 0)
return result
|
28d24fddc1cb307812f8c69dd858362ea76b1506
| 215,079 |
def set_size(width, fraction=1):
"""Set figure dimensions to avoid scaling in LaTeX.
Parameters
----------
width: float
Document textwidth or columnwidth in pts
fraction: float, optional
Fraction of the width which you wish the figure to occupy
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
# Width of figure (in pts)
fig_width_pt = width * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
# https://disq.us/p/2940ij3
golden_ratio = (5**.5 - 1) / 1.5 #2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio
fig_dim = (fig_width_in, fig_height_in)
return fig_dim
|
6f6bb99ed6bb5562ab322d38ce1238e94f974f29
| 524,087 |
def ffmt(val):
"""format None or floating point as string"""
if val is not None:
try:
return "%.5g" % val
except:
pass
return repr(val)
|
60c69268e4bfdc7dd62cab4b394ec835496184dc
| 167,391 |
def is_temp_disabled(abbr, temp_disabled_groups):
"""if abbr is temporarily disabled"""
for g in abbr.groups:
if g in temp_disabled_groups:
return True
return False
|
7bc40b06a7df0584d4d511ec87d06c4609390f27
| 487,607 |
def namesplit(s):
"""
Split package name at the position of dot '.'.
Examples:
'module.submodule' => ['module', 'submodule']
'module' => ['module']
'' => []
"""
rslt = []
# Ensure that for empty string '' an empty list is returned.
if s:
rslt = s.split('.')
return rslt
|
93533e828358bcfc2a966a96944815002ddeba16
| 142,709 |
def format_board(state):
"""Format the board"""
cells = []
for i, char in enumerate(state, start=1):
cells.append(str(i) if char == '.' else char)
bar = '-------------'
cells_tmpl = '| {} | {} | {} |'
return '\n'.join([
bar,
cells_tmpl.format(cells[0], cells[1], cells[2]), bar,
cells_tmpl.format(cells[3], cells[4], cells[5]), bar,
cells_tmpl.format(cells[6], cells[7], cells[8]), bar
])
|
538cddd09eaa68328e29330c592ec2e04e07eb6c
| 577,583 |
def czyMur(mapObj, x, y):
"""Zwraca True jesli (x,y) pozycja na mapie jest murem,
w.p.p. zwraca False"""
if x < 0 or x >= len(mapObj) or y < 0 or y >= len(mapObj[x]):
return False # (x,y) nie sa na mapie
elif mapObj[x][y] in ('#'):
return True # mur na drodze
return False
|
617ebf983c41fdcb5399f57b89d126469e93875e
| 35,825 |
def gain(xi=100.0, xf=110.0):
"""
Calculate gain from intial to final value.
"""
return (xf - xi) / xi
|
41fb410c9382c5611f9a12b53148cc6c59585abd
| 610,541 |
def gini(reads):
"""Gini index for a set of reads.
Gini index, a common measure of income inequality in economics, can measure the evenness of sgRNA
read counts [14]. It is perfectly normal for later time points in positive selection experiments
to have higher Gini index since a few surviving clones (a few sgRNA with extreme high counts)
could dominate the final pool while most of the other cells die (more sgRNAs with zero-count).
In contrast, high Gini index in plasmid library, in early time points, or in negative selection
experiments may indicate CRISPR oligonucleotide synthesis unevenness, low viral transfection
efficiency, and over selection, respectively. At time 0 it should be between 0.1-0.2
Parameters
----------
reads : array_like
Returns
-------
float
"""
sorted_list = sorted(reads)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(reads) / 2.
return (fair_area - area) / fair_area
|
214103676a42dba4d758c207a7bf517e16aa4ee0
| 573,025 |
import enum
import socket
def sockfam_to_enum(num):
"""Convert a numeric socket family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
if enum is None:
return num
else: # pragma: no cover
try:
return socket.AddressFamily(num)
except ValueError:
return num
|
b834fadb5eae24672f05f050c9846af92af053a6
| 447,480 |
def makeVector(dim, initValue):
"""
Return a list of dim copies of initValue
"""
return [initValue]*dim
|
2d5d77c21ae24eb59ae70fbd6d4024bf446507c6
| 71,831 |
def append_prefix(prefix: str, name: str) -> str:
"""
Appends provided prefix to provided name.
"""
if prefix != "" and name != "":
return prefix + "." + name
else:
return prefix + name
|
d9bc895ab33ab0c2d35dbf61e1c549323a840675
| 330,127 |
def writeContactPoint(cp):
"""Writes a contact point's members x,n,kFriction"""
return ' '.join(str(v) for v in (cp.x+cp.n+[cp.kFriction]))
|
32a4eee95a5ac04d35f60ba9731783cac17c1e49
| 188,891 |
def isFinalState(state, goal):
"""
Check if a state is final
:param state: state to check
:param goal: goal state
:return: true or false
"""
return state.properties.intersection(goal.properties) == goal.properties
|
b99b1dc18f7b3d14128f02086c94f28523ed6ccd
| 68,553 |
def format_quote_text(text):
"""
Обработка текста цитаты (удаление табов, переходов на новую строку)
:param text: string or None
:return: string or None
"""
return None if text is None else text.replace('\t', ' ').replace('\n', ' ')
|
7895bd2349e22da5711da252852689b21dac4a75
| 187,018 |
import itertools
def sample_chains(chain_keys, exp, idx):
"""
Returns the `idx`th item in the chain of all chain keys to be sampled.
Args:
chain_keys (list): keys in the experimental configuration
that are to be used in the full chain
exp (dict): experimental configuration
idx (int): index of the current sample
Returns:
dict: sampled point in the cartesian space (with keys = chain_keys)
"""
conf = {}
chain_values = [[(key, value) for value in exp[key]["from"]] for key in chain_keys]
chain = list(itertools.chain(*chain_values))
k, v = chain[idx % len(chain)]
conf[k] = v
if exp[k].get("normalized"):
conf["normalization_folder"] = k
return conf
|
d08dab08f1231e002802005baa8d9b4a94bbb3d7
| 276,960 |
import re
def remove_digits(s: str) -> str:
""" removes digits in a string """
return re.sub("\d+", "", s)
|
d2c9ecf5d6b3d6c9bff33e09ebb0dc7f0b0c92f6
| 397,489 |
def val_or_none_key(getter_fcn):
"""Wraps getter_fcn, returning a key that is a tuple of (0 or 1, val) where
val=getter_fcn(obj), and the int is 0 if val is None."""
def result_key_fcn(obj):
val = getter_fcn(obj)
n = 0 if val is None else 1
return n, val
return result_key_fcn
|
e56270f48254a3b35393354ada00166564ecbfcb
| 684,330 |
def get_json_carts(response):
"""Get rendered cart's templates from JsonResponse."""
return response.json()['header'], response.json()['table']
|
5d80db9330390dbef74f27cc82d2a84de211b0e5
| 653,407 |
def zscore(dat, mean, sigma):
"""Calculates zscore of a data point in (or outside of) a dataset
zscore: how many sigmas away is a value from the mean of a dataset?
Parameters
----------
dat: float
Data point
mean: float
Mean of dataset
sigma: flaot
Sigma of dataset
"""
zsc = (dat-mean)/sigma
return zsc
|
b11216e50632e2024af0a389184d5e1dba7ed4fd
| 2,963 |
import json
def from_json_string(my_str):
"""returns an object (Python data struct) represented by a JSON string"""
return json.loads(my_str)
|
be3cbb8218c092190014c8d6b1adf329ba15babd
| 435,108 |
import torch
def masked_temperature_softmax(x:torch.Tensor, idx:int, T:float=0.01):
"""Temperature scaled softmax on torch.Tensor masked by indices `idx`"""
return torch.exp(x[idx, :] / T) / torch.exp(x[idx, :] / T).sum()
|
4f9e1f448e99e6bc1fe51c08f50602eb34c08807
| 208,248 |
def should_trade(strategy, date, previous_trade_date):
"""Determines whether a trade is happening for the strategy."""
# We invest the whole value, so we can only trade once a day.
if (previous_trade_date and
previous_trade_date.replace(hour=0, minute=0, second=0) ==
date.replace(hour=0, minute=0, second=0)):
return False
# The strategy needs to be active.
if strategy["action"] == "hold":
return False
# We need to know the stock price.
if not strategy["price_at"] or not strategy["price_eod"]:
return False
return True
|
8c52eb554673bb0badff4a55c8e3a11cf9392a47
| 41,140 |
def valid_uris(uris: set, uri_type: str = 'track') -> bool:
"""Checks if given uris are valid. In this case, valid means
each uri is of the form: f"spotify:{uri_type}:{id_string}".
Args:
uris (set): Set of uris that the user would like to check
for validity.
uri_type (str): The type of uri the user would like to check
(Ex. 'track', 'artist', 'album', etc.).
Defaults to 'track'.
Returns:
bool: True if all uris are valid, else False.
"""
# Uris must be iterable
try:
iterator = iter(uris)
except TypeError:
return False
# Can't contain None type
if None in uris:
print(f'An element of "{uris}" has type "None", remove it and try again')
return False
# Spotify uris must be valid
for uri in uris:
uri_parts = uri.split(':')
if uri_parts[0] != 'spotify' or uri_parts[1] != uri_type:
print(f'Invalid uri {uri}')
return False
# Passed all checks; valid uri container
return True
|
d7d738dc7940ce2e14071a03d07b46582ef970cd
| 586,577 |
def count_hits_per_gene(gene_pos, hits, max_dist = 10):
"""
Args:
genePos: A dictionary of gene positions
hits: A list of positions of hits
maxDist (optional): The maximum distance to be associated to a gene
Returns:
A dictionary that associates to each gene the number of hits with
distance maxDist
"""
# You may find this useful: it creates a new dictionary with
# the same keys as the gene_pos dictionary, and values 0
# hit_count=dict.fromkeys(gene_pos.keys(),0)
# WRITE YOUR QUESTION 6 CODE HERE
hit_count=dict.fromkeys(gene_pos.keys(),0)
for key, val in gene_pos.items():
for elem in hits:
if val <= (elem + max_dist) and val >= (elem - max_dist):
hit_count [key] += 1
return hit_count
|
7ba9dcc74b2f8b847dd282e00ca4700f12087688
| 409,180 |
from copy import deepcopy
def xtb_equilibrate(sys, calc, prod_time=20, **kwargs):
"""
This function will automate the process of taking a system from zero
temperature up to room temperature (298.15 kelvin).
This routine will work by performing 10ps simulations at temperatures
from 0 to 298.15 kelvin, with each simulation spaced by 10 degrees
(310 ps total). Finally, the production run can be performed.
Args:
sys (BigDFT.Systems.System): the system to calculate.
calc (BigDFT.Interop.XTBInterop.XTBCalculator): a calculator to use.
prod_time (int): the number of production picoseconds to run at
room temperature.
**kwargs: any argument you would normally want to pass to the
run command.
Returns:
(list, list): a list of positions along the trajectory and a list of
energy values.
"""
positions = []
energies = []
param = kwargs.pop("inp", {})
if "md" not in param:
param["md"] = {}
# Loop over temperature values
usys = deepcopy(sys)
for temp in list(range(0, 300, 10)) + [298.15]:
param["md"]["temp"] = temp
param["md"]["time"] = 10
if temp == 0:
param["md"]["restart"] = "false"
log = calc.run(sys=usys, omd=True, inp=param, **kwargs)
else:
param["md"]["restart"] = "true"
log = calc.run(sys=usys, md=True, inp=param, **kwargs)
# Extract results
positions += log.md_traj
energies += log.md_energy
usys = positions[-1]
# Production Run
param["md"]["time"] = prod_time
param["md"]["restart"] = "true"
param["md"]["temp"] = 298.15
log = calc.run(sys=usys, md=True, inp=param, **kwargs)
positions += log.md_traj
energies += log.md_energy
usys = positions[-1]
return positions, energies
|
cc35cb815c44ca91284824bdaa1b384a500571a8
| 523,630 |
def comparisons(labels, compareall):
"""Return a list of tuples representing the treatments to
compare. If compareall is false, only comparisons against
the first treatment are performed. If true, all comparisons
are done."""
if compareall:
return [(x, y) for x in range(len(labels))
for y in range(len(labels))
if y < x]
else:
return [(x, 0) for x in range(len(labels)) if x != 0]
|
925fd1fdf472732e68e5d0c55d1648ed33e1f340
| 256,683 |
from pathlib import Path
def files_to_process(folder: Path) -> list:
"""Make a list of files to process. File names must meet
the following criteria:
- file ends in ``.xls``
- filename has at least 1 underscore
- text before the first underscore can be converted to an integer
:param folder: folder where files are stored
:type folder: Path
:return: list of files that meet criteria
:rtype: list
"""
# Get a list of all .xls files in the folder
files = list(folder.glob('**/*.xls'))
# Remove any files that don't have proper naming conventions
for f in files:
# Make sure there is at least 1 underscore
if "_" not in str(f.name):
print(f"No underscores, skipping {f.name}")
files.remove(f)
# Make sure that the Location ID is an integer
parts = str(f.name).split("_")
try:
_ = int(parts[0])
except ValueError:
print(f"Bad Location ID, skipping {f.name}")
files.remove(f)
return files
|
14c4cfd60ef9e254f8acc3e41940131e2936bb81
| 143,115 |
def cint(s):
"""Convert to integer"""
try: num = int(float(s))
except: num = 0
return num
|
05bc1207d35dff73ea69047d418eeccb1ecfcc72
| 286,344 |
def _get_input_output_names(onnx_model):
"""
Return input and output names of the ONNX graph.
"""
input_names = [input.name for input in onnx_model.graph.input]
output_names = [output.name for output in onnx_model.graph.output]
assert len(input_names) >= 1, "number of inputs should be at least 1"
assert len(output_names) == 1, "number of outputs should be 1"
return input_names, output_names
|
50b40334d69cb4fdaf7afdcd29809cbac675f27a
| 656,594 |
import requests
from bs4 import BeautifulSoup
def get_soup(request, element, class_value):
"""
Uses the BeautifulSoup library to retrieve the HTML text for a given webpage request.
:param request: The webpage request for which the HTML text is to be retrieved.
:param element: The element of the webpage. Ex: div, a, etc.
:param class_value: The class of the element. Used to identify which parts of the HTML page are to be returned.
:return: All instances of a given element and class (As an array).
"""
html_page = requests.get(request).text
soup = BeautifulSoup(html_page, 'lxml')
return soup.find_all(element, class_=class_value)
|
c93d82b978394119af240fcafdc6c9e5050751e5
| 665,253 |
def is_braces_sequence_correct(seq: str) -> bool:
"""
Check correctness of braces sequence in statement
>>> is_braces_sequence_correct('()(())')
True
>>> is_braces_sequence_correct('()[()]')
True
>>> is_braces_sequence_correct(')')
False
>>> is_braces_sequence_correct('[()')
False
>>> is_braces_sequence_correct('[(])')
False
"""
correspondent = dict(zip('([{', ')]}'))
stack = []
for brace in seq:
if brace in '([{':
stack.append(brace)
continue
elif brace in ')]}':
if len(stack) == 0:
return False
left = stack.pop()
if correspondent[left] != brace:
return False
return bool(len(stack) == 0)
return True
|
99122aef1d1901dd45c9180c93bd314e5c002dad
| 287,198 |
def predict_y(x0, m, c):
"""
Predict the value yhat for a regression line with the given parameters.
Parameters
----------
> `x0`: the value of predictor
> `m`: slope of the regression line
> `c`: y-intercept of the regression line
Returns
-------
The predicted value of y for the above given parameters.
"""
return (m * x0) + c
|
3dded1cb5c00a01933785f98f405f13582159f81
| 66,715 |
def option_to_dict(options):
"""Convert user input options into dictionary."""
new_opts = {}
for opt in options:
if "=" in opt:
splt_opt = opt.split("=")
new_opts[splt_opt[0]] = splt_opt[1]
else:
new_opts[opt] = True
return new_opts
|
5b638522c9acdb2044e35ddf1dbabbbd099148ea
| 154,083 |
def https_in_url(url):
"""
Check if url startswith https
:param url: str: The url to check
:return: True if url startswith https else False
"""
return True if url.startswith('https://') else False
|
34056546031504c9d16bee058d861f97080ddf82
| 338,419 |
def strip_suffix(string, suffix):
"""Strip the given suffix from the string if it's present
"""
return string[:-len(suffix)] if string.endswith(suffix) else string
|
0a34ca5a189181f7479084ea758f140ecfe802b9
| 354,584 |
def f(x):
"""Assume x is an int > 0"""
ans = 0
#Loop that takes constant time
for i in range(1000):
ans += 1
print('Number of additions so far', ans)
#Loop that takes time x
for i in range(x):
ans += 1
print('Number of additions so far', ans)
#Nested loops take time x**2
for i in range(x):
for j in range(x):
ans += 1
ans += 1
print('Number of additions so far', ans)
return ans
|
638bac3138a67fcb142fc7fa863c5fb605d368ee
| 207,404 |
def cleanDate(fullDate):
"""
Function to clean date into date and time
:param fullDate: Raw format of date
:return: date and time tuple
"""
splitDate = fullDate.split(" ");
formatDate = str(splitDate[5]) + " " + str(splitDate[1]) + " " + str(splitDate[2])
return [formatDate, splitDate[3]]
|
c5ce6c9015b140fae0fe31543b83570991f71c8b
| 229,515 |
def flatten(nested_list):
"""Flatten arbitrarily nested lists.
Args:
nested_list (iterable): arbitrarily nested list.
Returns:
list: a flattened list.
Examples:
>>> flatten(range(5))
[0, 1, 2, 3, 4]
>>> flatten([1, [2, 2]])
[1, 2, 2]
>>> flatten([1, [2, 2], [3, 3, 3, [4, 4, 4, 4,], 3], 1])
[1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 3, 1]
Note that strings are not flattened:
>>> flatten(["one", ["two", "three", ["four"]]])
['one', 'two', 'three', 'four']
"""
if not hasattr(nested_list, "__iter__") or isinstance(nested_list, str):
return [nested_list]
result = []
for e in nested_list:
result.extend(flatten(e))
return result
|
4b9d05d5bf14c6139430e443b41150b63355376a
| 145,300 |
def filesafe(str_):
"""Convert a string to something safe for filenames."""
return "".join(c for c in str_ if c.isalnum() or c in (' ', '.', '_', '-')).rstrip()
|
925e163ef6e56e313360a3fd6e57bb1f84f38cb6
| 205,955 |
def isPotentialChessboardTopic(sub):
"""if url is imgur link, or url ends in .png/.jpg/.gif"""
if sub.url == None:
return False
return ('imgur' in sub.url
or any([sub.url.lower().endswith(ending) for ending in ['.png', '.jpg', '.gif']]))
|
b6eb4919010c7bb4257093c6e6cd20f111da049f
| 149,412 |
def count_lines(string: str) -> int:
"""
Counts the number of lines in the given string
:param string: String to count
:return: int
Example:
>>> count_lines("Hello World")
>>> 1
"""
return len(string.splitlines())
|
8ad1847f0d1f56462cd3c9af684b1fc52efe3e23
| 346,385 |
def append_correct_whitespace(token, pronoun):
"""
If the original pronoun was followed by whitespace, append whitespace to new pronoun.
"""
if token.text_with_ws != token.text:
whitespace = token.text_with_ws[-1]
pronoun = pronoun + whitespace
return pronoun
|
8e2aa839b2b457f118a2b015605c2bfe0461f971
| 640,945 |
def get_roles(decoded_token):
"""Get roles declared in the input token
Note: returns both the realm roles and client roles
Args:
decoded_token (dict): The user's decoded bearer token
Returns:
list[str]: List of role names
"""
# Extract realm scoped roles
try:
# Session logins and Bearer tokens from password Grant Types
if 'realm_access' in decoded_token:
roles = decoded_token['realm_access']['roles']
else: # Bearer tokens from authorization_code Grant Types
# DP ???: a session login uses an authorization_code code, not sure
# about the difference
roles = decoded_token['resource_access']['account']['roles']
except KeyError:
roles = []
# Extract all client scoped roles
for name, client in decoded_token.get('resource_access', {}).items():
if name is 'account':
continue
try:
roles.extend(client['roles'])
except KeyError: # pragma no cover
pass
return roles
|
b585872e02993841b16a10e029a18884974625eb
| 572,471 |
from datetime import datetime
def get_date_time_from_usecs(time_in_usecs):
"""Get date time from epoch usecs"""
return datetime.fromtimestamp(time_in_usecs / 1000000.0)
|
907a0ddb4bded0bb8be93db504ea880327f93b5d
| 393,498 |
def quality(stat_collector, cov_level):
"""
Determine if variance is within an acceptable level.
:param stat_collector: StatCollector object
:param cov_level: acceptable limit for coefficient of variation
:return: True if coefficient of variation is within acceptable limit
"""
avg, stddev = stat_collector.eval(sampled=False)
return stddev / avg < cov_level
|
7711cb9783c87297f60be3f0f33d714c1a4d1d4b
| 328,670 |
def has_wiki_desc(resp: dict) -> bool:
"""
Check if a POI response contains a Wikipedia description.
"""
return any(b["type"] == "description" and b["source"] == "wikipedia" for b in resp["blocks"])
|
388c3a8dd7d52774f691ed54d300e85f06e442b2
| 348,007 |
def emoji_of_status(status: str) -> str:
"""Returns the emoji associated to a docker container status.
The emojis are as follows:
* ``exited``: ⏹,
* ``paused``: ⏸,
* ``restarting``: ↩,
* ``running``: ▶,
* otherwise: ❓.
"""
return {
"exited": "⏹",
"paused": "⏸",
"restarting": "↩",
"running": "▶"
}.get(status, "❓")
|
d6ba1e5b05bd1d4166d8f7a6c3965977a806cafa
| 619,627 |
def is_reserved_attribute(attr):
""" Returns True if the attribute name is a reserved, false otherwise. """
taken_attributes = ['x', 'y', 'z', 'uv',
'scalar_field']
return attr in taken_attributes
|
3427f5b93b8721664e8866b8bec331681a1c8a4b
| 507,824 |
def calc_cutoff(target, pairs):
"""Return read_length such that sum(lens for len >= rl) >= target.
Raise on empty pairs, which are (length, count) tuples.
"""
accum = 0
for length, count in reversed(sorted(pairs)):
accum += length*count
if accum >= target:
break
else:
raise Exception('Total=%d < target=%d' %(accum, target))
return length
|
cc05c0f7243d18462c5f08f5360f17a981388bcf
| 207,583 |
def output_transform_ped(process_output):
"""
Output transform for pedestrian presence metrics.
"""
y_pred = process_output[0]['pedestrian'].argmax(dim=1)
y = process_output[1]['pedestrian'].argmax(dim=1)
return dict(y_pred=y_pred, y=y)
|
15e2f63bce9ae85bcda7c72b01df9bba78cfbd69
| 148,795 |
def seed_dict(source_data, data_dictionary, seed):
"""Seed a dictionary with source data.
Goes through a list of items and adds items not currently in
data_dictionary to the dictionary. Added items are seeded with the passed
seed. Updated dictionary is returned.
Args:
soure_data (list): List of items to add to dictionary (if not present).
data_dictionary (dict): Dictionary to be updated.
seed (str or int): Seed value to be added to new dictionary items.
Returns:
data_dictionary (dict): Updated dictionary.
"""
# Get unique items in source_data)
unique = set(source_data)
unique_items = list(unique)
# Find missing values and add to dictionary
for item in unique_items:
if item not in data_dictionary.keys():
data_dictionary[item] = seed
return data_dictionary
|
5c1cb9ced2c8c8e8c4f044c9394ee9333097b36a
| 176,120 |
def fatorial_3(number):
"""Fatorial calculation recursively."""
if number == 0:
return 1
else:
return number * fatorial_3(number - 1)
|
db31419f00806093d2f5b99c0f5d6e08496ed56f
| 51,890 |
def apply_deltas(start,deltas):
""" Return a list of the sums of DELTAS starting from START. """
result = []
current = start
for delta in deltas:
current = current + delta
result.append(current)
return result
|
69a21c64567a6bb722fdb9352613cf78824079eb
| 237,209 |
def EnablePrecompile(env, target_name):
"""Enable use of precompiled headers for target_name.
Args:
env: The environment.
target_name: Name of component.
Returns:
The pch .obj file.
"""
if env.Bit('use_precompiled_headers'):
# We enable all warnings on all levels. The goal is to fix the code that
# we have written and to programmatically disable the warnings for the
# code we do not control. This list of warnings should shrink as the code
# gets fixed.
env.FilterOut(CCFLAGS=['/W3'])
env.Append(
CCFLAGS=[
'/W4',
'/Wall',
],
INCLUDES=[
'$MAIN_DIR/precompile/precompile.h'
],
)
env['PCHSTOP'] = '$MAIN_DIR/precompile/precompile.h'
pch_env = env.Clone()
# Must manually force-include the header, as the precompilation step does
# not evaluate $INCLUDES
pch_env.Append(CCFLAGS=['/FI$MAIN_DIR/precompile/precompile.h'])
# Append '_pch' to the target base name to prevent target name collisions.
# One case where this might have occurred is when a .cc file has the same
# base name as the target program/library.
pch_output = pch_env.PCH(
target=target_name.replace('.', '_') + '_pch' + '.pch',
source='$MAIN_DIR/precompile/precompile.cc',
)
env['PCH'] = pch_output[0]
# Return the pch .obj file that is created, so it can be
# included with the inputs of a module
return [pch_output[1]]
|
576cefb85a055cf5e99bb1052d8035b32f37abdc
| 590,688 |
def rm(x, l):
"""List l without element x."""
return [y for y in l if x != y]
|
8a5967f446b3b514eeef5c1cd693c35d6cf927a6
| 576,213 |
import warnings
def rename_internal_nodes(tree, names=None, inplace=False):
""" Names the internal according to level ordering.
The tree will be traversed in level order (i.e. top-down, left to right).
If `names` is not specified, the node with the smallest label (y0)
will be located at the root of the tree, and the node with the largest
label will be located at bottom right corner of the tree.
Parameters
----------
tree : skbio.TreeNode
Tree object where the leafs correspond to the features.
names : list, optional
List of labels to rename the tip names. It is assumed that the
names are listed in level ordering, and the length of the list
is at least as long as the number of internal nodes.
inplace : bool, optional
Specifies if the operation should be done on the original tree or not.
Returns
-------
skbio.TreeNode
Tree with renamed internal nodes.
Raises
------
ValueError:
Raised if `tree` and `name` have incompatible sizes.
"""
if inplace:
_tree = tree
else:
_tree = tree.copy()
non_tips = [n for n in _tree.levelorder() if not n.is_tip()]
if names is not None and len(non_tips) != len(names):
raise ValueError("`_tree` and `names` have incompatible sizes, "
"`_tree` has %d tips, `names` has %d elements." %
(len(non_tips), len(names)))
i = 0
for n in _tree.levelorder():
if not n.is_tip():
if names is None:
label = 'y%i' % i
else:
label = names[i]
if n.name is not None and label == n.name:
warnings.warn("Warning. Internal node (%s) has been replaced "
"with (%s)" % (n.name, label), UserWarning)
n.name = label
i += 1
return _tree
|
d5df42023afe184af41d7553b2e1491b09d5edc1
| 13,286 |
def unf_heat_capacity_oil_Wes_Wright_JkgC(gamma_oil, t_c):
"""
Oil heat capacity in SI. Wes Wright method
:param gamma_oil: specific oil density(by water)
:param t_c: temperature in C
:return: heat capacity in SI - JkgC
ref1 https://www.petroskills.com/blog/entry/crude-oil-and-changing-temperature#.XQkEnogzaM8
"""
return ((2 * 10** (-3) * t_c - 1.429 ) * gamma_oil +
(2.67 * 10 ** (-3)) * t_c + 3.049) * 1000
|
c74a2351e15353e4a556bf1754e50c3bf5e25699
| 51,721 |
import socket
import gc
def get_free_port() -> int:
"""Get free port."""
sock = socket.socket()
sock.bind(('localhost', 0))
port = sock.getsockname()[1]
sock.close()
del sock
gc.collect()
return port
|
2195c087ca632c569edeece7692a182ff4861df5
| 248,864 |
import pickle
def pickle_from_file(path):
"""Load the pickle file from the provided path and returns the object."""
return pickle.load(open(path, 'rb'))
|
71cacc34457314853897f2835d52a893d6a56547
| 631,434 |
def is_gr_line(line):
"""Returns True if line is a GR line"""
return line.startswith('#=GR')
|
6d79ad17b9935ae50c19c7278a905daa31887fed
| 274,481 |
def getDimensions(name):
"""Gets the rows and columns of a matrix from a text file
Args:
name (string): filename for matrix
Returns:
tuple: a tuple of the rows and columns
"""
file = open(name, 'r')
size = file.readline().split() #split at a tab
rows = size[0]
cols = size[1]
file.close()
print(rows,cols)
return (rows, cols)
|
339fda5cd34ed20b3c2aa1cd5908d642e3ca0d2e
| 624,370 |
def invalid_spelled_content_pack(pack):
"""
Create a pack with invalid spelled content.
"""
misspelled_files = set()
for i in range(3):
rn = pack.create_release_notes(
version=f"release-note-{i}",
content="\n#### Scipt\n##### SciptName\n- Added a feature"
)
misspelled_files.add(rn.path)
integration = pack.create_integration(
name=f"integration-{i}", yml={"display": "invalidd", "description": "invalidd", "category": "category"}
)
misspelled_files.add(integration.yml.path)
pack.create_incident_field(name=f"incident-field-{i}", content={"invalidd": "invalidd"})
script = pack.create_script(name=f"script-{i}", yml={"comment": "invalidd", "script": "script"})
misspelled_files.add(script.yml.path)
pack.create_layout(name=f"layout-{i}", content={"invalidd": "invalidd"})
return pack, misspelled_files
|
4f26d0b703490ff62e6da4cf438a8622a861a4c6
| 242,420 |
def get_item(dictionary, key):
"""
Return the value at dictionary[key].
For some reason this isn't allowed directly in Django templates.
"""
return dictionary.get(key)
|
f18c8a51f370a7cbee607e42a3a7f53f7ff8fe10
| 565,958 |
def clip(value, minimum, maximum):
"""
Function that clips a value between a minimum and a maximum.
:param value: value to clip
:param minimum: minimum value
:param maximum: maximum value
:return: clipped value
"""
if value < minimum:
return minimum
elif value > maximum:
return maximum
return value
|
17956dea94a70c1f60c21ad65db68a58af8fb75d
| 567,860 |
def get_contents(filename, encoding='utf-8'):
"""Retrieve the file content's as a decoded string."""
with open(filename, "rb") as f:
return f.read().decode(encoding)
|
ad5cec9e9272e0643e5a1bfa4c944900cd43e328
| 364,223 |
import collections
def load_empirical_regions_bed(empirical_regions_bed, sel_pop):
"""Load the list of empirical regions containing putatively selected variants"""
chrom2regions = collections.defaultdict(collections.OrderedDict)
with open(empirical_regions_bed) as empirical_regions_bed_in:
for line in empirical_regions_bed_in:
chrom, beg, end, *rest = line.strip().split('\t')
if sel_pop:
region_name, region_sel_pop = rest
if region_sel_pop != sel_pop:
continue
else:
region_sel_pop = None
chrom2regions[chrom].setdefault(f'{chrom}:{beg}-{end}', []).append(region_sel_pop)
return chrom2regions
|
f01dd15a47ac3eb7627f089c1c3c2729fd00fd86
| 302,034 |
from typing import Dict
from typing import Any
def _make_pod_envconfig(
config: Dict[str, Any], relation_state: Dict[str, Any]
) -> Dict[str, Any]:
"""Generate pod environment configuration.
Args:
config (Dict[str, Any]): configuration information.
relation_state (Dict[str, Any]): relation state information.
Returns:
Dict[str, Any]: pod environment configuration.
"""
envconfig = {
# General configuration
"ALLOW_ANONYMOUS_LOGIN": "yes",
"OSMMON_OPENSTACK_DEFAULT_GRANULARITY": config["openstack_default_granularity"],
"OSMMON_GLOBAL_REQUEST_TIMEOUT": config["global_request_timeout"],
"OSMMON_GLOBAL_LOGLEVEL": config["log_level"],
"OSMMON_COLLECTOR_INTERVAL": config["collector_interval"],
"OSMMON_EVALUATOR_INTERVAL": config["evaluator_interval"],
# Kafka configuration
"OSMMON_MESSAGE_DRIVER": "kafka",
"OSMMON_MESSAGE_HOST": relation_state["message_host"],
"OSMMON_MESSAGE_PORT": relation_state["message_port"],
# Database configuration
"OSMMON_DATABASE_DRIVER": "mongo",
"OSMMON_DATABASE_URI": relation_state["database_uri"],
"OSMMON_DATABASE_COMMONKEY": config["database_commonkey"],
# Prometheus configuration
"OSMMON_PROMETHEUS_URL": f"http://{relation_state['prometheus_host']}:{relation_state['prometheus_port']}",
# VCA configuration
"OSMMON_VCA_HOST": config["vca_host"],
"OSMMON_VCA_USER": config["vca_user"],
"OSMMON_VCA_SECRET": config["vca_password"],
"OSMMON_VCA_CACERT": config["vca_cacert"],
}
return envconfig
|
a4d5413c7606b2a75575c7f69d7b09e124f0bb1d
| 229,711 |
import platform
def get_system_type() -> str:
"""Attempts to determine what type of system we're on, limited to systems this addon can usefully do things with"""
system = platform.uname()[0].lower()
if system in ["windows"]:
return system
return "unknown"
|
a10f417089eecbff138cdfa3c7eb216bdb95828d
| 625,355 |
def pretty_file_size(size: float, precision: int = 2, align: str = ">",
width: int = 0) -> str:
"""Helper function to format size in human readable format.
Parameters
----------
size: float
The size in bytes to be converted into human readable format.
precision: int, optional
Define shown precision.
align: {'<', '^', '>'}, optional
Format align specifier.
width: int
Define maximum width for number.
Returns
-------
human_fmt: str
Human readable representation of given `size`.
Notes
-----
Credit to https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
""" # noqa: E501
template = "{size:{align}{width}.{precision}f} {unit}B"
kwargs = dict(width=width, precision=precision, align=align)
# iterate units (multiples of 1024 bytes)
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(size) < 1024.0:
return template.format(size=size, unit=unit, **kwargs)
size /= 1024.0
return template.format(size=size, unit='Yi', **kwargs)
|
96d094a4959aa678a580686ff5ea20ed1f216f3a
| 122,424 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.