index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
70,869 |
dvc_objects.fs.base
|
chdir
| null |
def chdir(self, path: str):
raise NotImplementedError
|
(self, path: str)
|
70,870 |
dvc_objects.fs.base
|
checksum
| null |
def checksum(self, path: AnyFSPath) -> str:
return self.fs.checksum(path)
|
(self, path: str) -> str
|
70,871 |
dvc_objects.fs.base
|
copy
| null |
def copy(self, from_info: AnyFSPath, to_info: AnyFSPath) -> None:
self.makedirs(self.parent(to_info))
self.fs.copy(from_info, to_info)
|
(self, from_info: str, to_info: str) -> NoneType
|
70,872 |
dvc_objects.fs.base
|
cp_file
| null |
def cp_file(self, from_info: AnyFSPath, to_info: AnyFSPath, **kwargs: Any) -> None:
self.fs.cp_file(from_info, to_info, **kwargs)
|
(self, from_info: str, to_info: str, **kwargs: Any) -> NoneType
|
70,873 |
dvc_objects.fs.base
|
created
| null |
def created(self, path: AnyFSPath) -> datetime.datetime:
return self.fs.created(path)
|
(self, path: str) -> datetime.datetime
|
70,874 |
dvc_objects.fs.base
|
du
| null |
def du(
self,
path: AnyFSPath,
total: bool = True,
maxdepth: Optional[int] = None,
**kwargs: Any,
) -> Union[int, dict[AnyFSPath, int]]:
return self.fs.du(path, total=total, maxdepth=maxdepth, **kwargs)
|
(self, path: str, total: bool = True, maxdepth: Optional[int] = None, **kwargs: Any) -> Union[int, dict[str, int]]
|
70,875 |
dvc_objects.fs.base
|
exists
| null |
def exists(
self,
path: Union[AnyFSPath, list[AnyFSPath]],
callback: fsspec.Callback = DEFAULT_CALLBACK,
batch_size: Optional[int] = None,
) -> Union[bool, list[bool]]:
if isinstance(path, str):
return self.fs.exists(path)
callback.set_size(len(path))
jobs = batch_size or self.jobs
if self.fs.async_impl:
loop = get_loop()
fut = asyncio.run_coroutine_threadsafe(
batch_coros(
[self.fs._exists(p) for p in path],
batch_size=jobs,
callback=callback,
),
loop,
)
return fut.result()
with ThreadPoolExecutor(max_workers=jobs, cancel_on_error=True) as executor:
it = executor.map(self.fs.exists, path)
return list(callback.wrap(it))
|
(self, path: Union[str, list[str]], callback: fsspec.callbacks.Callback = <fsspec.callbacks.NoOpCallback object at 0x7f72c79766b0>, batch_size: Optional[int] = None) -> Union[bool, list[bool]]
|
70,876 |
dvc_objects.fs.base
|
find
| null |
def find(
self,
path: Union[AnyFSPath, list[AnyFSPath]],
prefix: bool = False,
batch_size: Optional[int] = None,
**kwargs,
) -> Iterator[str]:
if isinstance(path, str):
paths = [path]
else:
paths = path
def _make_args(paths: list[AnyFSPath]) -> Iterator[tuple[str, str]]:
for path in paths:
if prefix and not path.endswith(self.flavour.sep):
parent = self.parent(path)
yield parent, self.parts(path)[-1]
else:
yield path, ""
args = list(_make_args(paths))
if len(args) == 1:
path, prefix_str = args[0]
yield from self.fs.find(path, prefix=prefix_str)
return
jobs = batch_size or self.jobs
if self.fs.async_impl:
loop = get_loop()
fut = asyncio.run_coroutine_threadsafe(
batch_coros(
[
self.fs._find(path, prefix=prefix_str)
for path, prefix_str in args
],
batch_size=jobs,
),
loop,
)
for result in fut.result():
yield from result
return
# NOTE: this is not parallelized yet since imap_unordered does not
# handle kwargs. We do not actually support any non-async object
# storages, so this can be addressed when it is actually needed
for path, prefix_str in args:
yield from self.fs.find(path, prefix=prefix_str)
|
(self, path: Union[str, list[str]], prefix: bool = False, batch_size: Optional[int] = None, **kwargs) -> collections.abc.Iterator[str]
|
70,877 |
dvc_objects.fs.base
|
get
| null |
def get(
self,
from_info: Union[AnyFSPath, list[AnyFSPath]],
to_info: Union[AnyFSPath, list[AnyFSPath]],
callback: fsspec.Callback = DEFAULT_CALLBACK,
recursive: bool = False,
batch_size: Optional[int] = None,
) -> None:
# Currently, the implementation is non-recursive if the paths are
# provided as a list, and recursive if it's a single path.
from .local import localfs
def get_file(rpath, lpath, **kwargs):
localfs.makedirs(localfs.parent(lpath), exist_ok=True)
with callback.branched(rpath, lpath) as child:
self.fs.get_file(rpath, lpath, callback=child, **kwargs)
if isinstance(from_info, list) and isinstance(to_info, list):
from_infos: list[AnyFSPath] = from_info
to_infos: list[AnyFSPath] = to_info
else:
assert isinstance(from_info, str)
assert isinstance(to_info, str)
if not self.isdir(from_info):
callback.set_size(1)
get_file(from_info, to_info)
callback.relative_update()
return
from_infos = list(self.find(from_info))
if not from_infos:
return localfs.makedirs(to_info, exist_ok=True)
to_infos = [
localfs.join(to_info, *self.relparts(info, from_info))
for info in from_infos
]
jobs = batch_size or self.jobs
if self.fs.async_impl:
return self.fs.get(
from_infos,
to_infos,
callback=callback,
batch_size=jobs,
)
callback.set_size(len(from_infos))
executor = ThreadPoolExecutor(max_workers=jobs, cancel_on_error=True)
with executor:
it = executor.imap_unordered(get_file, from_infos, to_infos)
list(callback.wrap(it))
|
(self, from_info: Union[str, list[str]], to_info: Union[str, list[str]], callback: fsspec.callbacks.Callback = <fsspec.callbacks.NoOpCallback object at 0x7f72c79766b0>, recursive: bool = False, batch_size: Optional[int] = None) -> NoneType
|
70,878 |
dvc_objects.fs.base
|
get_file
| null |
def get_file(
self,
from_info: AnyFSPath,
to_info: AnyFSPath,
callback: fsspec.Callback = DEFAULT_CALLBACK,
**kwargs,
) -> None:
self.fs.get_file(from_info, to_info, callback=callback, **kwargs)
|
(self, from_info: str, to_info: str, callback: fsspec.callbacks.Callback = <fsspec.callbacks.NoOpCallback object at 0x7f72c79766b0>, **kwargs) -> NoneType
|
70,879 |
dvc_objects.fs.base
|
getcwd
| null |
def getcwd(self) -> str:
return ""
|
(self) -> str
|
70,880 |
dvc_objects.fs.base
|
glob
| null |
def glob(self, path: AnyFSPath, **kwargs: Any):
return self.fs.glob(path, **kwargs)
|
(self, path: str, **kwargs: Any)
|
70,881 |
dvc_objects.fs.base
|
link
| null |
def link(self, from_info: AnyFSPath, to_info: AnyFSPath) -> None:
try:
return self.fs.link(from_info, to_info)
except AttributeError as e:
raise LinkError("hardlink", self, from_info) from e
|
(self, from_info: str, to_info: str) -> NoneType
|
70,882 |
dvc_objects.fs.base
|
head
| null |
def head(self, path: AnyFSPath, size: int = 1024) -> bytes:
return self.fs.head(path, size=size)
|
(self, path: str, size: int = 1024) -> bytes
|
70,883 |
dvc_objects.fs.base
|
info
| null |
def info(self, path, callback=DEFAULT_CALLBACK, batch_size=None, **kwargs):
if isinstance(path, str):
return self.fs.info(path, **kwargs)
callback.set_size(len(path))
jobs = batch_size or self.jobs
if self.fs.async_impl:
loop = get_loop()
fut = asyncio.run_coroutine_threadsafe(
batch_coros(
[self.fs._info(p, **kwargs) for p in path],
batch_size=jobs,
callback=callback,
),
loop,
)
return fut.result()
func = partial(self.fs.info, **kwargs)
with ThreadPoolExecutor(max_workers=jobs, cancel_on_error=True) as executor:
it = executor.map(func, path)
return list(callback.wrap(it))
|
(self, path, callback=<fsspec.callbacks.NoOpCallback object at 0x7f72c79766b0>, batch_size=None, **kwargs)
|
70,884 |
dvc_objects.fs.base
|
is_empty
| null |
def is_empty(self, path: AnyFSPath) -> bool:
entry = self.info(path)
if entry["type"] == "directory":
return not self.fs.ls(path)
return entry["size"] == 0
|
(self, path: str) -> bool
|
70,885 |
dvc_objects.fs.base
|
is_hardlink
| null |
def is_hardlink(self, path: AnyFSPath) -> bool:
try:
return self.fs.is_hardlink(path)
except AttributeError:
return False
|
(self, path: str) -> bool
|
70,886 |
dvc_objects.fs.base
|
islink
| null |
def islink(self, path: AnyFSPath) -> bool:
try:
return self.fs.islink(path)
except AttributeError:
return False
|
(self, path: str) -> bool
|
70,887 |
dvc_objects.fs.base
|
iscopy
| null |
def iscopy(self, path: AnyFSPath) -> bool:
return not (self.is_symlink(path) or self.is_hardlink(path))
|
(self, path: str) -> bool
|
70,888 |
dvc_objects.fs.base
|
isdir
| null |
def isdir(self, path: AnyFSPath) -> bool:
return self.fs.isdir(path)
|
(self, path: str) -> bool
|
70,889 |
dvc_objects.fs.base
|
isfile
| null |
def isfile(self, path: AnyFSPath) -> bool:
return self.fs.isfile(path)
|
(self, path: str) -> bool
|
70,891 |
dvc_objects.fs.base
|
lexists
| null |
def lexists(self, path: AnyFSPath) -> bool:
return self.fs.lexists(path)
|
(self, path: str) -> bool
|
70,893 |
dvc_objects.fs.base
|
ls
| null |
def ls(self, path, detail=False, **kwargs):
return self.fs.ls(path, detail=detail, **kwargs)
|
(self, path, detail=False, **kwargs)
|
70,894 |
dvc_objects.fs.base
|
makedirs
| null |
def makedirs(self, path: AnyFSPath, **kwargs: Any) -> None:
# For object storages make this method a no-op. The original
# fs.makedirs() method will only check if the bucket exists
# and create if it doesn't though we don't want to support
# that behavior, and the check will cost some time so we'll
# simply ignore all mkdir()/makedirs() calls.
return None
|
(self, path: str, **kwargs: Any) -> NoneType
|
70,895 |
dvc_objects.fs.base
|
mkdir
| null |
def mkdir(
self, path: AnyFSPath, create_parents: bool = True, **kwargs: Any
) -> None:
return None
|
(self, path: str, create_parents: bool = True, **kwargs: Any) -> NoneType
|
70,896 |
dvc_objects.fs.base
|
modified
| null |
def modified(self, path: AnyFSPath) -> datetime.datetime:
return self.fs.modified(path)
|
(self, path: str) -> datetime.datetime
|
70,897 |
dvc_objects.fs.base
|
mv
| null |
def mv(self, from_info: AnyFSPath, to_info: AnyFSPath, **kwargs: Any) -> None:
self.fs.mv(from_info, to_info)
|
(self, from_info: str, to_info: str, **kwargs: Any) -> NoneType
|
70,899 |
dvc_objects.fs.base
|
normpath
| null |
def normpath(self, path: str) -> str:
if self.flavour == ntpath:
return self.flavour.normpath(path)
parts = list(urlsplit(path))
parts[2] = self.flavour.normpath(parts[2])
return urlunsplit(parts)
|
(self, path: str) -> str
|
70,900 |
dvc_objects.fs.base
|
open
| null |
def open(
self,
path: AnyFSPath,
mode: str = "r",
**kwargs: Any,
) -> "IO[Any]":
if "b" in mode:
kwargs.pop("encoding", None)
return self.fs.open(path, mode=mode, **kwargs)
|
(self, path: str, mode: str = 'r', **kwargs: Any) -> IO[Any]
|
70,901 |
dvc_objects.fs.base
|
pipe
| null |
def pipe(
self,
path: Union[AnyFSPath, dict[AnyFSPath, bytes]],
value: Optional[bytes] = None,
**kwargs: Any,
) -> None:
return self.fs.pipe(path, value=value, **kwargs)
|
(self, path: Union[str, dict[str, bytes]], value: Optional[bytes] = None, **kwargs: Any) -> NoneType
|
70,902 |
dvc_objects.fs.base
|
pipe_file
| null |
def pipe_file(self, path: AnyFSPath, value: bytes, **kwargs: Any) -> None:
return self.fs.pipe_file(path, value, **kwargs)
|
(self, path: str, value: bytes, **kwargs: Any) -> NoneType
|
70,903 |
dvc_objects.fs.base
|
put
| null |
def put(
self,
from_info: Union[AnyFSPath, list[AnyFSPath]],
to_info: Union[AnyFSPath, list[AnyFSPath]],
callback: fsspec.Callback = DEFAULT_CALLBACK,
recursive: bool = False,
batch_size: Optional[int] = None,
):
jobs = batch_size or self.jobs
if self.fs.async_impl:
return self.fs.put(
from_info,
to_info,
callback=callback,
batch_size=jobs,
recursive=recursive,
)
assert not recursive, "not implemented yet"
from_infos = [from_info] if isinstance(from_info, str) else from_info
to_infos = [to_info] if isinstance(to_info, str) else to_info
callback.set_size(len(from_infos))
executor = ThreadPoolExecutor(max_workers=jobs, cancel_on_error=True)
def put_file(from_path, to_path):
with callback.branched(from_path, to_path) as child:
return self.put_file(from_path, to_path, callback=child)
with executor:
it = executor.imap_unordered(put_file, from_infos, to_infos)
list(callback.wrap(it))
|
(self, from_info: Union[str, list[str]], to_info: Union[str, list[str]], callback: fsspec.callbacks.Callback = <fsspec.callbacks.NoOpCallback object at 0x7f72c79766b0>, recursive: bool = False, batch_size: Optional[int] = None)
|
70,904 |
dvc_objects.fs.base
|
put_file
| null |
def put_file(
self,
from_file: Union[AnyFSPath, "BinaryIO"],
to_info: AnyFSPath,
callback: fsspec.Callback = DEFAULT_CALLBACK,
size: Optional[int] = None,
**kwargs,
) -> None:
if size:
callback.set_size(size)
if hasattr(from_file, "read"):
stream = wrap_file(from_file, callback)
self.upload_fobj(stream, to_info, size=size)
else:
assert isinstance(from_file, str)
self.fs.put_file(os.fspath(from_file), to_info, callback=callback, **kwargs)
self.fs.invalidate_cache(self.parent(to_info))
|
(self, from_file: Union[str, ForwardRef('BinaryIO')], to_info: str, callback: fsspec.callbacks.Callback = <fsspec.callbacks.NoOpCallback object at 0x7f72c79766b0>, size: Optional[int] = None, **kwargs) -> None
|
70,905 |
dvc_objects.fs.base
|
read_block
| null |
def read_block(
self,
path: AnyFSPath,
offset: int,
length: int,
delimiter: Optional[bytes] = None,
) -> bytes:
return self.fs.read_block(path, offset, length, delimiter=delimiter)
|
(self, path: str, offset: int, length: int, delimiter: Optional[bytes] = None) -> bytes
|
70,907 |
dvc_objects.fs.base
|
read_text
| null |
def read_text(
self,
path: AnyFSPath,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
**kwargs: Any,
) -> str:
return self.fs.read_text(
path, encoding=encoding, errors=errors, newline=newline, **kwargs
)
|
(self, path: str, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, **kwargs: Any) -> str
|
70,908 |
dvc_objects.fs.base
|
reflink
| null |
def reflink(self, from_info: AnyFSPath, to_info: AnyFSPath) -> None:
try:
return self.fs.reflink(from_info, to_info)
except AttributeError as e:
raise LinkError("reflink", self, from_info) from e
|
(self, from_info: str, to_info: str) -> NoneType
|
70,909 |
dvc_objects.fs.base
|
relparts
| null |
def relparts(self, path: str, start: Optional[str] = None) -> tuple[str, ...]:
return self.parts(self.relpath(path, start=start))
|
(self, path: str, start: Optional[str] = None) -> tuple[str, ...]
|
70,910 |
dvc_objects.fs.base
|
relpath
| null |
def relpath(self, path: str, start: Optional[str] = None) -> str:
if start is None:
start = "."
return self.flavour.relpath(self.abspath(path), start=self.abspath(start))
|
(self, path: str, start: Optional[str] = None) -> str
|
70,911 |
dvc_objects.fs.base
|
rm
| null |
def rm(
self,
path: Union[AnyFSPath, list[AnyFSPath]],
recursive: bool = False,
**kwargs,
) -> None:
self.fs.rm(path, recursive=recursive, **kwargs)
|
(self, path: Union[str, list[str]], recursive: bool = False, **kwargs) -> NoneType
|
70,913 |
dvc_objects.fs.base
|
rm_file
| null |
def rm_file(self, path: AnyFSPath) -> None:
self.fs.rm_file(path)
|
(self, path: str) -> NoneType
|
70,914 |
dvc_objects.fs.base
|
rmdir
| null |
def rmdir(self, path: AnyFSPath) -> None:
self.fs.rmdir(path)
|
(self, path: str) -> NoneType
|
70,915 |
dvc_objects.fs.base
|
sign
| null |
def sign(self, path: AnyFSPath, expiration: int = 100, **kwargs: Any) -> str:
return self.fs.sign(path, expiration=expiration, **kwargs)
|
(self, path: str, expiration: int = 100, **kwargs: Any) -> str
|
70,916 |
dvc_objects.fs.base
|
size
| null |
def size(self, path: AnyFSPath) -> Optional[int]:
return self.fs.size(path)
|
(self, path: str) -> Optional[int]
|
70,917 |
dvc_objects.fs.base
|
sizes
| null |
def sizes(self, paths: list[AnyFSPath]) -> list[Optional[int]]:
return self.fs.sizes(paths)
|
(self, paths: list[str]) -> list[typing.Optional[int]]
|
70,918 |
dvc_objects.fs.base
|
symlink
| null |
def symlink(self, from_info: AnyFSPath, to_info: AnyFSPath) -> None:
try:
return self.fs.symlink(from_info, to_info)
except AttributeError as e:
raise LinkError("symlink", self, from_info) from e
|
(self, from_info: str, to_info: str) -> NoneType
|
70,919 |
dvc_objects.fs.base
|
tail
| null |
def tail(self, path: AnyFSPath, size: int = 1024) -> bytes:
return self.fs.tail(path, size=size)
|
(self, path: str, size: int = 1024) -> bytes
|
70,920 |
dvc_objects.fs.base
|
touch
| null |
def touch(self, path: AnyFSPath, truncate: bool = True, **kwargs: Any) -> None:
return self.fs.touch(path, truncate=truncate, **kwargs)
|
(self, path: str, truncate: bool = True, **kwargs: Any) -> NoneType
|
70,921 |
dvc_objects.fs.base
|
ukey
| null |
def ukey(self, path: AnyFSPath) -> str:
return self.fs.ukey(path)
|
(self, path: str) -> str
|
70,922 |
dvc_objects.fs.base
|
unstrip_protocol
| null |
def unstrip_protocol(self, path: str) -> str:
return path
|
(self, path: str) -> str
|
70,923 |
dvc_objects.fs.base
|
upload_fobj
| null |
def upload_fobj(self, fobj: IO, to_info: AnyFSPath, **kwargs) -> None:
self.makedirs(self.parent(to_info))
with self.open(to_info, "wb") as fdest:
shutil.copyfileobj(
fobj,
fdest,
length=getattr(fdest, "blocksize", None), # type: ignore[arg-type]
)
|
(self, fobj: <class 'IO'>, to_info: str, **kwargs) -> NoneType
|
70,924 |
dvc_objects.fs.base
|
walk
| null |
def walk(self, path: AnyFSPath, **kwargs: Any):
return self.fs.walk(path, **kwargs)
|
(self, path: str, **kwargs: Any)
|
70,926 |
dvc_objects.fs.base
|
write_text
| null |
def write_text(
self,
path: AnyFSPath,
value: str,
encoding: Optional[str] = None,
errors: Optional[str] = None,
newline: Optional[str] = None,
**kwargs: Any,
) -> None:
self.fs.write_text(
path,
value,
encoding=encoding,
errors=errors,
newline=newline,
**kwargs,
)
|
(self, path: str, value: str, encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, **kwargs: Any) -> NoneType
|
70,927 |
dvc_s3
|
S3FileSystem
| null |
class S3FileSystem(ObjectFileSystem):
protocol = "s3"
REQUIRES: ClassVar[dict[str, str]] = {"s3fs": "s3fs", "boto3": "boto3"}
PARAM_CHECKSUM = "etag"
VERSION_ID_KEY = "versionId"
_GRANTS: ClassVar[dict[str, str]] = {
"grant_full_control": "GrantFullControl",
"grant_read": "GrantRead",
"grant_read_acp": "GrantReadACP",
"grant_write_acp": "GrantWriteACP",
}
_TRANSFER_CONFIG_ALIASES: ClassVar[dict[str, str]] = {
"max_queue_size": "max_io_queue",
"max_concurrent_requests": "max_concurrency",
"multipart_threshold": "multipart_threshold",
"multipart_chunksize": "multipart_chunksize",
}
def getcwd(self):
return self.fs.root_marker
@classmethod
def split_version(cls, path: str) -> tuple[str, Optional[str]]:
parts = list(urlsplit(path))
query = parse_qs(parts[3])
if cls.VERSION_ID_KEY in query:
version_id = first(query[cls.VERSION_ID_KEY])
del query[cls.VERSION_ID_KEY]
parts[3] = urlencode(query)
else:
version_id = None
return urlunsplit(parts), version_id
@classmethod
def join_version(cls, path: str, version_id: Optional[str]) -> str:
parts = list(urlsplit(path))
query = parse_qs(parts[3])
if cls.VERSION_ID_KEY in query:
raise ValueError("path already includes a version query")
parts[3] = f"{cls.VERSION_ID_KEY}={version_id}" if version_id else ""
return urlunsplit(parts)
@classmethod
def version_path(cls, path: str, version_id: Optional[str]) -> str:
path, _ = cls.split_version(path)
return cls.join_version(path, version_id)
@classmethod
def coalesce_version(
cls, path: str, version_id: Optional[str]
) -> tuple[str, Optional[str]]:
path, path_version_id = cls.split_version(path)
versions = {ver for ver in (version_id, path_version_id) if ver}
if len(versions) > 1:
raise ValueError("Path version mismatch: '{path}', '{version_id}'")
return path, (versions.pop() if versions else None)
@classmethod
def _get_kwargs_from_urls(cls, urlpath: str) -> dict[str, Any]:
ret = super()._get_kwargs_from_urls(urlpath)
url_query = ret.get("url_query")
if url_query is not None:
parsed = parse_qs(url_query)
if "versionId" in parsed:
ret["version_aware"] = True
return ret
def _split_s3_config(self, s3_config):
"""Splits the general s3 config into 2 different config
objects, one for transfer.TransferConfig and other is the
general session config"""
from boto3.s3.transfer import TransferConfig
config, transfer_config = {}, {}
for key, value in s3_config.items():
if key in self._TRANSFER_CONFIG_ALIASES:
if key in {"multipart_chunksize", "multipart_threshold"}:
# cast human readable sizes (like 24MiB) to integers
value = human_readable_to_bytes(value)
else:
value = int(value)
transfer_config[self._TRANSFER_CONFIG_ALIASES[key]] = value
else:
config[key] = value
# pylint: disable=attribute-defined-outside-init
self._transfer_config = TransferConfig(**transfer_config)
return config
def _load_aws_config_file(self, profile):
from botocore.configloader import load_config
# pylint: disable=attribute-defined-outside-init
self._transfer_config = None
config_path = os.environ.get("AWS_CONFIG_FILE", _AWS_CONFIG_PATH)
if not os.path.exists(config_path):
return {}
config = load_config(config_path)
profile_config = config["profiles"].get(profile or "default")
if not profile_config:
return {}
s3_config = profile_config.get("s3", {})
return self._split_s3_config(s3_config)
def _prepare_credentials(self, **config):
import base64
from flatten_dict import flatten, unflatten
from s3fs.utils import SSEParams
login_info = defaultdict(dict)
login_info["version_aware"] = config.get("version_aware", False)
# credentials
login_info["key"] = config.get("access_key_id")
login_info["secret"] = config.get("secret_access_key")
login_info["token"] = config.get("session_token")
# session configuration
login_info["profile"] = config.get("profile")
login_info["use_ssl"] = config.get("use_ssl", True)
login_info["anon"] = config.get("allow_anonymous_login")
# extra client configuration
client = login_info["client_kwargs"]
client["region_name"] = config.get("region")
client["endpoint_url"] = config.get("endpointurl")
client["verify"] = config.get("ssl_verify")
# timeout configuration
config_kwargs = login_info["config_kwargs"]
config_kwargs["read_timeout"] = config.get("read_timeout")
config_kwargs["connect_timeout"] = config.get("connect_timeout")
# encryptions
additional = login_info["s3_additional_kwargs"]
sse_customer_key = None
if config.get("sse_customer_key"):
if config.get("sse_kms_key_id"):
raise ConfigError(
"`sse_kms_key_id` and `sse_customer_key` AWS S3 config "
"options are mutually exclusive"
)
sse_customer_key = base64.b64decode(config.get("sse_customer_key"))
sse_customer_algorithm = config.get("sse_customer_algorithm")
if not sse_customer_algorithm and sse_customer_key:
sse_customer_algorithm = "AES256"
sse_params = SSEParams(
server_side_encryption=config.get("sse"),
sse_customer_algorithm=sse_customer_algorithm,
sse_customer_key=sse_customer_key,
sse_kms_key_id=config.get("sse_kms_key_id"),
)
additional.update(sse_params.to_kwargs())
additional["ACL"] = config.get("acl")
for grant_option, grant_key in self._GRANTS.items():
if config.get(grant_option):
if additional["ACL"]:
raise ConfigError(
"`acl` and `grant_*` AWS S3 config options "
"are mutually exclusive"
)
additional[grant_key] = config[grant_option]
# config kwargs
session_config = login_info["config_kwargs"]
session_config["s3"] = self._load_aws_config_file(login_info["profile"])
shared_creds = config.get("credentialpath")
if shared_creds:
os.environ.setdefault("AWS_SHARED_CREDENTIALS_FILE", shared_creds)
if (
client["region_name"] is None
and session_config["s3"].get("region_name") is None
and os.getenv("AWS_REGION") is None
):
# Enable bucket region caching
login_info["cache_regions"] = config.get("cache_regions", True)
config_path = config.get("configpath")
if config_path:
os.environ.setdefault("AWS_CONFIG_FILE", config_path)
d = flatten(login_info, reducer="dot")
return unflatten(
{key: value for key, value in d.items() if value is not None},
splitter="dot",
)
@wrap_prop(threading.Lock())
@cached_property
def fs(self):
from s3fs import S3FileSystem as _S3FileSystem
s3_filesystem = _S3FileSystem(**self.fs_args)
s3_filesystem.connect()
return s3_filesystem
@classmethod
def _strip_protocol(cls, path: str) -> str:
from fsspec.utils import infer_storage_options
return infer_storage_options(path)["path"]
def unstrip_protocol(self, path):
return "s3://" + path.lstrip("/")
|
(fs=None, **kwargs: Any)
|
70,930 |
dvc_s3
|
_load_aws_config_file
| null |
def _load_aws_config_file(self, profile):
from botocore.configloader import load_config
# pylint: disable=attribute-defined-outside-init
self._transfer_config = None
config_path = os.environ.get("AWS_CONFIG_FILE", _AWS_CONFIG_PATH)
if not os.path.exists(config_path):
return {}
config = load_config(config_path)
profile_config = config["profiles"].get(profile or "default")
if not profile_config:
return {}
s3_config = profile_config.get("s3", {})
return self._split_s3_config(s3_config)
|
(self, profile)
|
70,931 |
dvc_s3
|
_prepare_credentials
| null |
def _prepare_credentials(self, **config):
import base64
from flatten_dict import flatten, unflatten
from s3fs.utils import SSEParams
login_info = defaultdict(dict)
login_info["version_aware"] = config.get("version_aware", False)
# credentials
login_info["key"] = config.get("access_key_id")
login_info["secret"] = config.get("secret_access_key")
login_info["token"] = config.get("session_token")
# session configuration
login_info["profile"] = config.get("profile")
login_info["use_ssl"] = config.get("use_ssl", True)
login_info["anon"] = config.get("allow_anonymous_login")
# extra client configuration
client = login_info["client_kwargs"]
client["region_name"] = config.get("region")
client["endpoint_url"] = config.get("endpointurl")
client["verify"] = config.get("ssl_verify")
# timeout configuration
config_kwargs = login_info["config_kwargs"]
config_kwargs["read_timeout"] = config.get("read_timeout")
config_kwargs["connect_timeout"] = config.get("connect_timeout")
# encryptions
additional = login_info["s3_additional_kwargs"]
sse_customer_key = None
if config.get("sse_customer_key"):
if config.get("sse_kms_key_id"):
raise ConfigError(
"`sse_kms_key_id` and `sse_customer_key` AWS S3 config "
"options are mutually exclusive"
)
sse_customer_key = base64.b64decode(config.get("sse_customer_key"))
sse_customer_algorithm = config.get("sse_customer_algorithm")
if not sse_customer_algorithm and sse_customer_key:
sse_customer_algorithm = "AES256"
sse_params = SSEParams(
server_side_encryption=config.get("sse"),
sse_customer_algorithm=sse_customer_algorithm,
sse_customer_key=sse_customer_key,
sse_kms_key_id=config.get("sse_kms_key_id"),
)
additional.update(sse_params.to_kwargs())
additional["ACL"] = config.get("acl")
for grant_option, grant_key in self._GRANTS.items():
if config.get(grant_option):
if additional["ACL"]:
raise ConfigError(
"`acl` and `grant_*` AWS S3 config options "
"are mutually exclusive"
)
additional[grant_key] = config[grant_option]
# config kwargs
session_config = login_info["config_kwargs"]
session_config["s3"] = self._load_aws_config_file(login_info["profile"])
shared_creds = config.get("credentialpath")
if shared_creds:
os.environ.setdefault("AWS_SHARED_CREDENTIALS_FILE", shared_creds)
if (
client["region_name"] is None
and session_config["s3"].get("region_name") is None
and os.getenv("AWS_REGION") is None
):
# Enable bucket region caching
login_info["cache_regions"] = config.get("cache_regions", True)
config_path = config.get("configpath")
if config_path:
os.environ.setdefault("AWS_CONFIG_FILE", config_path)
d = flatten(login_info, reducer="dot")
return unflatten(
{key: value for key, value in d.items() if value is not None},
splitter="dot",
)
|
(self, **config)
|
70,932 |
dvc_s3
|
_split_s3_config
|
Splits the general s3 config into 2 different config
objects, one for transfer.TransferConfig and other is the
general session config
|
def _split_s3_config(self, s3_config):
"""Splits the general s3 config into 2 different config
objects, one for transfer.TransferConfig and other is the
general session config"""
from boto3.s3.transfer import TransferConfig
config, transfer_config = {}, {}
for key, value in s3_config.items():
if key in self._TRANSFER_CONFIG_ALIASES:
if key in {"multipart_chunksize", "multipart_threshold"}:
# cast human readable sizes (like 24MiB) to integers
value = human_readable_to_bytes(value)
else:
value = int(value)
transfer_config[self._TRANSFER_CONFIG_ALIASES[key]] = value
else:
config[key] = value
# pylint: disable=attribute-defined-outside-init
self._transfer_config = TransferConfig(**transfer_config)
return config
|
(self, s3_config)
|
70,947 |
dvc_s3
|
getcwd
| null |
def getcwd(self):
return self.fs.root_marker
|
(self)
|
70,990 |
dvc_s3
|
unstrip_protocol
| null |
def unstrip_protocol(self, path):
return "s3://" + path.lstrip("/")
|
(self, path)
|
70,995 |
funcy.objects
|
cached_property
|
Decorator that converts a method with a single self argument into
a property cached on the instance.
|
class cached_property(object):
"""
Decorator that converts a method with a single self argument into
a property cached on the instance.
"""
# NOTE: implementation borrowed from Django.
# NOTE: we use fget, fset and fdel attributes to mimic @property.
fset = fdel = None
def __init__(self, fget):
self.fget = fget
self.__doc__ = getattr(fget, '__doc__')
def __get__(self, instance, type=None):
if instance is None:
return self
res = instance.__dict__[self.fget.__name__] = self.fget(instance)
return res
|
(fget)
|
70,996 |
funcy.objects
|
__get__
| null |
def __get__(self, instance, type=None):
if instance is None:
return self
res = instance.__dict__[self.fget.__name__] = self.fget(instance)
return res
|
(self, instance, type=None)
|
70,997 |
funcy.objects
|
__init__
| null |
def __init__(self, fget):
self.fget = fget
self.__doc__ = getattr(fget, '__doc__')
|
(self, fget)
|
70,999 |
funcy.seqs
|
first
|
Returns the first item in the sequence.
Returns None if the sequence is empty.
|
def first(seq):
"""Returns the first item in the sequence.
Returns None if the sequence is empty."""
return next(iter(seq), None)
|
(seq)
|
71,000 |
dvc_s3
|
human_readable_to_bytes
| null |
def human_readable_to_bytes(value: str) -> int:
value = value.lower()
suffix = ""
if value.endswith(tuple(MULTIPLIERS.keys())):
size = 2
size += value[-2] == "i" # KiB, MiB etc
value, suffix = value[:-size], value[-size:]
multiplier = MULTIPLIERS.get(suffix, 1)
return int(value) * multiplier
|
(value: str) -> int
|
71,005 |
urllib.parse
|
urlsplit
|
Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
The result is a named 5-tuple with fields corresponding to the
above. It is either a SplitResult or SplitResultBytes object,
depending on the type of the url parameter.
The username, password, hostname, and port sub-components of netloc
can also be accessed as attributes of the returned object.
The scheme argument provides the default value of the scheme
component when no scheme is found in url.
If allow_fragments is False, no attempt is made to separate the
fragment component from the previous component, which can be either
path or query.
Note that % escapes are not expanded.
|
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
The result is a named 5-tuple with fields corresponding to the
above. It is either a SplitResult or SplitResultBytes object,
depending on the type of the url parameter.
The username, password, hostname, and port sub-components of netloc
can also be accessed as attributes of the returned object.
The scheme argument provides the default value of the scheme
component when no scheme is found in url.
If allow_fragments is False, no attempt is made to separate the
fragment component from the previous component, which can be either
path or query.
Note that % escapes are not expanded.
"""
url, scheme, _coerce_result = _coerce_args(url, scheme)
# Only lstrip url as some applications rely on preserving trailing space.
# (https://url.spec.whatwg.org/#concept-basic-url-parser would strip both)
url = url.lstrip(_WHATWG_C0_CONTROL_OR_SPACE)
scheme = scheme.strip(_WHATWG_C0_CONTROL_OR_SPACE)
for b in _UNSAFE_URL_BYTES_TO_REMOVE:
url = url.replace(b, "")
scheme = scheme.replace(b, "")
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return _coerce_result(cached)
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
_checknetloc(netloc)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return _coerce_result(v)
|
(url, scheme='', allow_fragments=True)
|
71,006 |
urllib.parse
|
urlunsplit
|
Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent).
|
def urlunsplit(components):
"""Combine the elements of a tuple as returned by urlsplit() into a
complete URL as a string. The data argument can be any five-item iterable.
This may result in a slightly different, but equivalent URL, if the URL that
was parsed originally had unnecessary delimiters (for example, a ? with an
empty query; the RFC states that these are equivalent)."""
scheme, netloc, url, query, fragment, _coerce_result = (
_coerce_args(*components))
if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
if url and url[:1] != '/': url = '/' + url
url = '//' + (netloc or '') + url
if scheme:
url = scheme + ':' + url
if query:
url = url + '?' + query
if fragment:
url = url + '#' + fragment
return _coerce_result(url)
|
(components)
|
71,007 |
funcy.objects
|
wrap_prop
|
Wrap a property accessors with a context manager
|
def wrap_prop(ctx):
"""Wrap a property accessors with a context manager"""
def decorator(prop):
class WrapperProp(object):
def __repr__(self):
return repr(prop)
def __get__(self, instance, type=None):
if instance is None:
return self
with ctx:
return prop.__get__(instance, type)
if hasattr(prop, '__set__'):
def __set__(self, name, value):
with ctx:
return prop.__set__(name, value)
if hasattr(prop, '__del__'):
def __del__(self, name):
with ctx:
return prop.__del__(name)
return WrapperProp()
return decorator
|
(ctx)
|
71,008 |
uhashring.ring
|
HashRing
|
Implement a consistent hashing ring.
|
class HashRing:
"""Implement a consistent hashing ring."""
def __init__(self, nodes=[], **kwargs):
"""Create a new HashRing given the implementation.
:param nodes: nodes used to create the continuum (see doc for format).
:param hash_fn: use this callable function to hash keys, can be set to
'ketama' to use the ketama compatible implementation.
:param vnodes: default number of vnodes per node.
:param weight_fn: use this function to calculate the node's weight.
"""
hash_fn = kwargs.get("hash_fn", None)
vnodes = kwargs.get("vnodes", None)
weight_fn = kwargs.get("weight_fn", None)
if hash_fn == "ketama":
ketama_args = {k: v for k, v in kwargs.items() if k in ("replicas",)}
if vnodes is None:
vnodes = 40
self.runtime = KetamaRing(**ketama_args)
else:
if vnodes is None:
vnodes = 160
self.runtime = MetaRing(hash_fn)
self._default_vnodes = vnodes
self.hashi = self.runtime.hashi
if weight_fn and not hasattr(weight_fn, "__call__"):
raise TypeError("weight_fn should be a callable function")
self._weight_fn = weight_fn
if self._configure_nodes(nodes):
self.runtime._create_ring(self.runtime._nodes.items())
def _configure_nodes(self, nodes):
"""Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
"""
if isinstance(nodes, str):
nodes = [nodes]
elif not isinstance(nodes, (dict, list)):
raise ValueError(
"nodes configuration should be a list or a dict," " got {}".format(type(nodes))
)
conf_changed = False
for node in nodes:
conf = {
"hostname": node,
"instance": None,
"nodename": node,
"port": None,
"vnodes": self._default_vnodes,
"weight": 1,
}
current_conf = self.runtime._nodes.get(node, {})
nodename = node
# new node, trigger a ring update
if not current_conf:
conf_changed = True
# complex config
if isinstance(nodes, dict):
node_conf = nodes[node]
if isinstance(node_conf, int):
conf["weight"] = node_conf
elif isinstance(node_conf, dict):
for k, v in node_conf.items():
if k in conf:
conf[k] = v
# changing those config trigger a ring update
if k in ["nodename", "vnodes", "weight"]:
if current_conf.get(k) != v:
conf_changed = True
else:
raise ValueError(
"node configuration should be a dict or an int,"
" got {}".format(type(node_conf))
)
if self._weight_fn:
conf["weight"] = self._weight_fn(**conf)
# changing the weight of a node trigger a ring update
if current_conf.get("weight") != conf["weight"]:
conf_changed = True
self.runtime._nodes[nodename] = conf
return conf_changed
def __delitem__(self, nodename):
"""Remove the given node.
:param nodename: the node name.
"""
self.runtime._remove_node(nodename)
remove_node = __delitem__
def __getitem__(self, key):
"""Returns the instance of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "instance")
get_node_instance = __getitem__
def __setitem__(self, nodename, conf={"weight": 1}):
"""Add the given node with its associated configuration.
:param nodename: the node name.
:param conf: the node configuration.
"""
if self._configure_nodes({nodename: conf}):
self.runtime._create_ring([(nodename, self._nodes[nodename])])
add_node = __setitem__
def _get_pos(self, key):
"""Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
"""
p = bisect(self.runtime._keys, self.hashi(key))
if p == len(self.runtime._keys):
return 0
else:
return p
def _get(self, key, what):
"""Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight
"""
if not self.runtime._ring:
return None
pos = self._get_pos(key)
if what == "pos":
return pos
nodename = self.runtime._ring[self.runtime._keys[pos]]
if what in ["hostname", "instance", "port", "weight"]:
return self.runtime._nodes[nodename][what]
elif what == "dict":
return self.runtime._nodes[nodename]
elif what == "nodename":
return nodename
elif what == "tuple":
return (self.runtime._keys[pos], nodename)
def get(self, key):
"""Returns the node object dict matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "dict")
def get_instances(self):
"""Returns a list of the instances of all the configured nodes."""
return [c.get("instance") for c in self.runtime._nodes.values() if c.get("instance")]
def get_key(self, key):
"""Alias of ketama hashi method, returns the hash of the given key.
This method is present for hash_ring compatibility.
:param key: the key to look for.
"""
return self.hashi(key)
def get_node(self, key):
"""Returns the node name of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "nodename")
def get_node_hostname(self, key):
"""Returns the hostname of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "hostname")
def get_node_port(self, key):
"""Returns the port of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "port")
def get_node_pos(self, key):
"""Returns the index position of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "pos")
def get_node_weight(self, key):
"""Returns the weight of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "weight")
def get_nodes(self):
"""Returns a list of the names of all the configured nodes."""
return self.runtime._nodes.keys()
def get_points(self):
"""Returns a ketama compatible list of (position, nodename) tuples."""
return [(k, self.runtime._ring[k]) for k in self.runtime._keys]
def get_server(self, key):
"""Returns a ketama compatible (position, nodename) tuple.
:param key: the key to look for.
"""
return self._get(key, "tuple")
def iterate_nodes(self, key, distinct=True):
"""hash_ring compatibility implementation.
Given a string key it returns the nodes as a generator that
can hold the key.
The generator iterates one time through the ring
starting at the correct position.
if `distinct` is set, then the nodes returned will be unique,
i.e. no virtual copies will be returned.
"""
if not self.runtime._ring:
yield None
else:
for node in self.range(key, unique=distinct):
yield node["nodename"]
def print_continuum(self):
"""Prints a ketama compatible continuum report."""
numpoints = len(self.runtime._keys)
if numpoints:
print(f"Numpoints in continuum: {numpoints}")
else:
print("Continuum empty")
for p in self.get_points():
point, node = p
print(f"{node} ({point})")
def range(self, key, size=None, unique=True):
"""Returns a generator of nodes' configuration available
in the continuum/ring.
:param key: the key to look for.
:param size: limit the list to at most this number of nodes.
:param unique: a node may only appear once in the list (default True).
"""
all_nodes = set()
if unique:
size = size or len(self.runtime._nodes)
else:
all_nodes = []
pos = self._get_pos(key)
for key in self.runtime._keys[pos:]:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
else:
for i, key in enumerate(self.runtime._keys):
if i < pos:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
def regenerate(self):
self.runtime._create_ring(self.runtime._nodes.items())
@property
def conf(self):
return self.runtime._nodes
nodes = conf
@property
def distribution(self):
return self.runtime._distribution
@property
def ring(self):
return self.runtime._ring
continuum = ring
@property
def size(self):
return len(self.runtime._ring)
@property
def _ring(self):
return self.runtime._ring
@property
def _nodes(self):
return self.runtime._nodes
@property
def _keys(self):
return self.runtime._keys
|
(nodes=[], **kwargs)
|
71,009 |
uhashring.ring
|
__delitem__
|
Remove the given node.
:param nodename: the node name.
|
def __delitem__(self, nodename):
"""Remove the given node.
:param nodename: the node name.
"""
self.runtime._remove_node(nodename)
|
(self, nodename)
|
71,010 |
uhashring.ring
|
__getitem__
|
Returns the instance of the node matching the hashed key.
:param key: the key to look for.
|
def __getitem__(self, key):
"""Returns the instance of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "instance")
|
(self, key)
|
71,011 |
uhashring.ring
|
__init__
|
Create a new HashRing given the implementation.
:param nodes: nodes used to create the continuum (see doc for format).
:param hash_fn: use this callable function to hash keys, can be set to
'ketama' to use the ketama compatible implementation.
:param vnodes: default number of vnodes per node.
:param weight_fn: use this function to calculate the node's weight.
|
def __init__(self, nodes=[], **kwargs):
"""Create a new HashRing given the implementation.
:param nodes: nodes used to create the continuum (see doc for format).
:param hash_fn: use this callable function to hash keys, can be set to
'ketama' to use the ketama compatible implementation.
:param vnodes: default number of vnodes per node.
:param weight_fn: use this function to calculate the node's weight.
"""
hash_fn = kwargs.get("hash_fn", None)
vnodes = kwargs.get("vnodes", None)
weight_fn = kwargs.get("weight_fn", None)
if hash_fn == "ketama":
ketama_args = {k: v for k, v in kwargs.items() if k in ("replicas",)}
if vnodes is None:
vnodes = 40
self.runtime = KetamaRing(**ketama_args)
else:
if vnodes is None:
vnodes = 160
self.runtime = MetaRing(hash_fn)
self._default_vnodes = vnodes
self.hashi = self.runtime.hashi
if weight_fn and not hasattr(weight_fn, "__call__"):
raise TypeError("weight_fn should be a callable function")
self._weight_fn = weight_fn
if self._configure_nodes(nodes):
self.runtime._create_ring(self.runtime._nodes.items())
|
(self, nodes=[], **kwargs)
|
71,012 |
uhashring.ring
|
__setitem__
|
Add the given node with its associated configuration.
:param nodename: the node name.
:param conf: the node configuration.
|
def __setitem__(self, nodename, conf={"weight": 1}):
"""Add the given node with its associated configuration.
:param nodename: the node name.
:param conf: the node configuration.
"""
if self._configure_nodes({nodename: conf}):
self.runtime._create_ring([(nodename, self._nodes[nodename])])
|
(self, nodename, conf={'weight': 1})
|
71,013 |
uhashring.ring
|
_configure_nodes
|
Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
|
def _configure_nodes(self, nodes):
"""Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
"""
if isinstance(nodes, str):
nodes = [nodes]
elif not isinstance(nodes, (dict, list)):
raise ValueError(
"nodes configuration should be a list or a dict," " got {}".format(type(nodes))
)
conf_changed = False
for node in nodes:
conf = {
"hostname": node,
"instance": None,
"nodename": node,
"port": None,
"vnodes": self._default_vnodes,
"weight": 1,
}
current_conf = self.runtime._nodes.get(node, {})
nodename = node
# new node, trigger a ring update
if not current_conf:
conf_changed = True
# complex config
if isinstance(nodes, dict):
node_conf = nodes[node]
if isinstance(node_conf, int):
conf["weight"] = node_conf
elif isinstance(node_conf, dict):
for k, v in node_conf.items():
if k in conf:
conf[k] = v
# changing those config trigger a ring update
if k in ["nodename", "vnodes", "weight"]:
if current_conf.get(k) != v:
conf_changed = True
else:
raise ValueError(
"node configuration should be a dict or an int,"
" got {}".format(type(node_conf))
)
if self._weight_fn:
conf["weight"] = self._weight_fn(**conf)
# changing the weight of a node trigger a ring update
if current_conf.get("weight") != conf["weight"]:
conf_changed = True
self.runtime._nodes[nodename] = conf
return conf_changed
|
(self, nodes)
|
71,014 |
uhashring.ring
|
_get
|
Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight
|
def _get(self, key, what):
"""Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight
"""
if not self.runtime._ring:
return None
pos = self._get_pos(key)
if what == "pos":
return pos
nodename = self.runtime._ring[self.runtime._keys[pos]]
if what in ["hostname", "instance", "port", "weight"]:
return self.runtime._nodes[nodename][what]
elif what == "dict":
return self.runtime._nodes[nodename]
elif what == "nodename":
return nodename
elif what == "tuple":
return (self.runtime._keys[pos], nodename)
|
(self, key, what)
|
71,015 |
uhashring.ring
|
_get_pos
|
Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
|
def _get_pos(self, key):
"""Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
"""
p = bisect(self.runtime._keys, self.hashi(key))
if p == len(self.runtime._keys):
return 0
else:
return p
|
(self, key)
|
71,017 |
uhashring.ring
|
get
|
Returns the node object dict matching the hashed key.
:param key: the key to look for.
|
def get(self, key):
"""Returns the node object dict matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "dict")
|
(self, key)
|
71,018 |
uhashring.ring
|
get_instances
|
Returns a list of the instances of all the configured nodes.
|
def get_instances(self):
"""Returns a list of the instances of all the configured nodes."""
return [c.get("instance") for c in self.runtime._nodes.values() if c.get("instance")]
|
(self)
|
71,019 |
uhashring.ring
|
get_key
|
Alias of ketama hashi method, returns the hash of the given key.
This method is present for hash_ring compatibility.
:param key: the key to look for.
|
def get_key(self, key):
"""Alias of ketama hashi method, returns the hash of the given key.
This method is present for hash_ring compatibility.
:param key: the key to look for.
"""
return self.hashi(key)
|
(self, key)
|
71,020 |
uhashring.ring
|
get_node
|
Returns the node name of the node matching the hashed key.
:param key: the key to look for.
|
def get_node(self, key):
"""Returns the node name of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "nodename")
|
(self, key)
|
71,021 |
uhashring.ring
|
get_node_hostname
|
Returns the hostname of the node matching the hashed key.
:param key: the key to look for.
|
def get_node_hostname(self, key):
"""Returns the hostname of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "hostname")
|
(self, key)
|
71,023 |
uhashring.ring
|
get_node_port
|
Returns the port of the node matching the hashed key.
:param key: the key to look for.
|
def get_node_port(self, key):
"""Returns the port of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "port")
|
(self, key)
|
71,024 |
uhashring.ring
|
get_node_pos
|
Returns the index position of the node matching the hashed key.
:param key: the key to look for.
|
def get_node_pos(self, key):
"""Returns the index position of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "pos")
|
(self, key)
|
71,025 |
uhashring.ring
|
get_node_weight
|
Returns the weight of the node matching the hashed key.
:param key: the key to look for.
|
def get_node_weight(self, key):
"""Returns the weight of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "weight")
|
(self, key)
|
71,026 |
uhashring.ring
|
get_nodes
|
Returns a list of the names of all the configured nodes.
|
def get_nodes(self):
"""Returns a list of the names of all the configured nodes."""
return self.runtime._nodes.keys()
|
(self)
|
71,027 |
uhashring.ring
|
get_points
|
Returns a ketama compatible list of (position, nodename) tuples.
|
def get_points(self):
"""Returns a ketama compatible list of (position, nodename) tuples."""
return [(k, self.runtime._ring[k]) for k in self.runtime._keys]
|
(self)
|
71,028 |
uhashring.ring
|
get_server
|
Returns a ketama compatible (position, nodename) tuple.
:param key: the key to look for.
|
def get_server(self, key):
"""Returns a ketama compatible (position, nodename) tuple.
:param key: the key to look for.
"""
return self._get(key, "tuple")
|
(self, key)
|
71,029 |
uhashring.ring
|
iterate_nodes
|
hash_ring compatibility implementation.
Given a string key it returns the nodes as a generator that
can hold the key.
The generator iterates one time through the ring
starting at the correct position.
if `distinct` is set, then the nodes returned will be unique,
i.e. no virtual copies will be returned.
|
def iterate_nodes(self, key, distinct=True):
"""hash_ring compatibility implementation.
Given a string key it returns the nodes as a generator that
can hold the key.
The generator iterates one time through the ring
starting at the correct position.
if `distinct` is set, then the nodes returned will be unique,
i.e. no virtual copies will be returned.
"""
if not self.runtime._ring:
yield None
else:
for node in self.range(key, unique=distinct):
yield node["nodename"]
|
(self, key, distinct=True)
|
71,030 |
uhashring.ring
|
print_continuum
|
Prints a ketama compatible continuum report.
|
def print_continuum(self):
"""Prints a ketama compatible continuum report."""
numpoints = len(self.runtime._keys)
if numpoints:
print(f"Numpoints in continuum: {numpoints}")
else:
print("Continuum empty")
for p in self.get_points():
point, node = p
print(f"{node} ({point})")
|
(self)
|
71,031 |
uhashring.ring
|
range
|
Returns a generator of nodes' configuration available
in the continuum/ring.
:param key: the key to look for.
:param size: limit the list to at most this number of nodes.
:param unique: a node may only appear once in the list (default True).
|
def range(self, key, size=None, unique=True):
"""Returns a generator of nodes' configuration available
in the continuum/ring.
:param key: the key to look for.
:param size: limit the list to at most this number of nodes.
:param unique: a node may only appear once in the list (default True).
"""
all_nodes = set()
if unique:
size = size or len(self.runtime._nodes)
else:
all_nodes = []
pos = self._get_pos(key)
for key in self.runtime._keys[pos:]:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
else:
for i, key in enumerate(self.runtime._keys):
if i < pos:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
|
(self, key, size=None, unique=True)
|
71,032 |
uhashring.ring
|
regenerate
| null |
def regenerate(self):
self.runtime._create_ring(self.runtime._nodes.items())
|
(self)
|
71,037 |
dna_kmer
|
dna_kmer
| null |
def dna_kmer(dna_string, kmer, step):
print(dna_string)
kmerized = ""
for i in range(0, len(dna_string)-kmer+1, step):
kmerized = kmerized + dna_string[i:i+kmer] + " "
return kmerized
|
(dna_string, kmer, step)
|
71,038 |
vodscrepe.scrape
|
Scraper
| null |
class Scraper:
__slots__ = "base_url", "num_workers", "num_page_workers", "session", "num_pages", "verbose"
base_url: URL
num_workers: int
num_page_workers: int
session: FuturesSession
num_pages: int
verbose: bool
def __init__(
self,
video_game: str,
event: str = "",
player1: str = "",
player2: str = "",
character1: str = "",
character2: str = "",
caster1: str = "",
caster2: str = "",
num_workers: int = 10,
num_page_workers: int = 2,
verbose: bool = False,
):
self.base_url = self.URL(video_game, event, player1, player2, character1, character2, caster1, caster2)
self.num_workers = num_workers
self.num_page_workers = min(num_page_workers, self.num_workers)
self.session = FuturesSession(max_workers=self.num_workers)
page_content = self.request(str(self.base_url)).result().content
page_soup = BeautifulSoup(page_content, "lxml")
self.num_pages = 1
last_page_tag = page_soup.findChild("a", title="Go to last page")
if last_page_tag:
self.num_pages = int(re.search(r"page=([\d]+)", last_page_tag["href"]).group(1))
self.verbose = verbose
def request(self, url: str) -> Future:
return self.session.get(url, headers={"Accept-Encoding": "gzip"})
def scrape_vod_page(self, vod_id: str, vod_request: Future) -> Tuple[List[str], List[Vod.Caster]]:
vod_content = vod_request.result().content
vod_strainer = SoupStrainer("div", class_="region-inner clearfix")
vod_soup = BeautifulSoup(vod_content, "lxml", parse_only=vod_strainer)
content = vod_soup.findChild(recursive=False)
try:
video_ids = [
re.search(r"^([^?]*)", v["data-vod"]).group(1)
for v in content.findChildren("div", class_="js-video widescreen", recursive=False)
]
if len(video_ids) == 0:
raise InvalidVideoError(vod_id)
casters = []
casters_tag = content.findChild("div", class_="field-items")
if casters_tag:
casters = [Vod.Caster(c.getText()) for c in casters_tag.findChildren(recursive=False)]
return (video_ids, casters)
except KeyError:
raise InvalidVideoError(vod_id)
def scrape_page(self, page_request: Future) -> Generator[Vod, None, None]:
page_content = page_request.result().content
page_strainer = SoupStrainer("table")
page_soup = BeautifulSoup(page_content, "lxml", parse_only=page_strainer)
vod_requests = [self.request(tr.findChild("a")["href"]) for tr in page_soup.findChildren("tr")]
for table in page_soup.findChildren(recursive=False):
date = table.caption.span.getText()
for i, row in enumerate(table.tbody.findChildren(recursive=False)):
cells = row.findChildren(recursive=False)
try:
vod_id = re.search(r".*\/(.*)", cells[1].a["href"]).group(1)
try:
best_of = int(re.search(r"Bo([\d]*)", cells[3].getText()).group(1))
except AttributeError:
continue
players = []
player = Vod.Player("Unknown", [])
for tag in cells[1].a.span.findChildren(recursive=False):
if tag.name == "b":
if len(player.characters) != 0:
players.append(player)
player = Vod.Player("Unknown", [])
player.alias = tag.getText()
elif tag.name == "img":
player.characters.append(guess_character(tag["src"][24:-4]))
players.append(player)
video_ids, casters = self.scrape_vod_page(vod_id, vod_requests[i])
tournament = re.search(r"[^\s].*[^\s]", cells[0].getText()).group()
_round = re.search(r"[^\s].*[^\s]", cells[4].getText()).group()
yield Vod(vod_id, video_ids, date, tournament, players, casters, _round, best_of)
except InvalidVideoError as e:
if self.verbose:
print(e, file=sys.stderr)
def scrape(self, pages: Sequence[int] = [], show_progress: bool = False) -> Generator[Vod, None, None]:
if not pages:
pages = range(self.num_pages - 1)
self.num_page_workers = min(self.num_page_workers, len(pages))
request_queue: Queue[Future] = Queue(self.num_page_workers)
for i in range(self.num_page_workers):
request_queue.put(self.request(f"{self.base_url}?page={pages[i]}"))
if show_progress:
pages = tqdm(pages, position=1, unit="pages", desc="All vods")
for page in pages:
vods = self.scrape_page(request_queue.get())
if show_progress:
vods = tqdm(vods, position=0, unit="vods", desc=f"Page {page}", total=60)
for vod in vods:
yield vod
request_queue.put(self.request(f"{self.base_url}?page={page + self.num_page_workers}"))
@dataclass
class URL:
video_game: str
event: str = ""
player1: str = ""
player2: str = ""
character1: str = ""
character2: str = ""
caster1: str = ""
caster2: str = ""
def __str__(self) -> str:
url = "https://vods.co/" + self.video_game
if self.player1:
url += "/player/" + self.player1
if self.player2:
url += "/player2/" + self.player2
if self.event:
url += "/event/" + self.event
if self.character1:
url += "/character/" + self.character1
if self.character2:
url += "/character2/" + self.character2
if self.caster1:
url += "/caster/" + self.character1
if self.caster2:
url += "/caster2/" + self.character2
return url
|
(video_game: 'str', event: 'str' = '', player1: 'str' = '', player2: 'str' = '', character1: 'str' = '', character2: 'str' = '', caster1: 'str' = '', caster2: 'str' = '', num_workers: 'int' = 10, num_page_workers: 'int' = 2, verbose: 'bool' = False)
|
71,039 |
vodscrepe.scrape
|
__init__
| null |
def __init__(
self,
video_game: str,
event: str = "",
player1: str = "",
player2: str = "",
character1: str = "",
character2: str = "",
caster1: str = "",
caster2: str = "",
num_workers: int = 10,
num_page_workers: int = 2,
verbose: bool = False,
):
self.base_url = self.URL(video_game, event, player1, player2, character1, character2, caster1, caster2)
self.num_workers = num_workers
self.num_page_workers = min(num_page_workers, self.num_workers)
self.session = FuturesSession(max_workers=self.num_workers)
page_content = self.request(str(self.base_url)).result().content
page_soup = BeautifulSoup(page_content, "lxml")
self.num_pages = 1
last_page_tag = page_soup.findChild("a", title="Go to last page")
if last_page_tag:
self.num_pages = int(re.search(r"page=([\d]+)", last_page_tag["href"]).group(1))
self.verbose = verbose
|
(self, video_game: str, event: str = '', player1: str = '', player2: str = '', character1: str = '', character2: str = '', caster1: str = '', caster2: str = '', num_workers: int = 10, num_page_workers: int = 2, verbose: bool = False)
|
71,040 |
vodscrepe.scrape
|
request
| null |
def request(self, url: str) -> Future:
return self.session.get(url, headers={"Accept-Encoding": "gzip"})
|
(self, url: str) -> concurrent.futures._base.Future
|
71,041 |
vodscrepe.scrape
|
scrape
| null |
def scrape(self, pages: Sequence[int] = [], show_progress: bool = False) -> Generator[Vod, None, None]:
if not pages:
pages = range(self.num_pages - 1)
self.num_page_workers = min(self.num_page_workers, len(pages))
request_queue: Queue[Future] = Queue(self.num_page_workers)
for i in range(self.num_page_workers):
request_queue.put(self.request(f"{self.base_url}?page={pages[i]}"))
if show_progress:
pages = tqdm(pages, position=1, unit="pages", desc="All vods")
for page in pages:
vods = self.scrape_page(request_queue.get())
if show_progress:
vods = tqdm(vods, position=0, unit="vods", desc=f"Page {page}", total=60)
for vod in vods:
yield vod
request_queue.put(self.request(f"{self.base_url}?page={page + self.num_page_workers}"))
|
(self, pages: Sequence[int] = [], show_progress: bool = False) -> Generator[vodscrepe.vod.Vod, NoneType, NoneType]
|
71,042 |
vodscrepe.scrape
|
scrape_page
| null |
def scrape_page(self, page_request: Future) -> Generator[Vod, None, None]:
page_content = page_request.result().content
page_strainer = SoupStrainer("table")
page_soup = BeautifulSoup(page_content, "lxml", parse_only=page_strainer)
vod_requests = [self.request(tr.findChild("a")["href"]) for tr in page_soup.findChildren("tr")]
for table in page_soup.findChildren(recursive=False):
date = table.caption.span.getText()
for i, row in enumerate(table.tbody.findChildren(recursive=False)):
cells = row.findChildren(recursive=False)
try:
vod_id = re.search(r".*\/(.*)", cells[1].a["href"]).group(1)
try:
best_of = int(re.search(r"Bo([\d]*)", cells[3].getText()).group(1))
except AttributeError:
continue
players = []
player = Vod.Player("Unknown", [])
for tag in cells[1].a.span.findChildren(recursive=False):
if tag.name == "b":
if len(player.characters) != 0:
players.append(player)
player = Vod.Player("Unknown", [])
player.alias = tag.getText()
elif tag.name == "img":
player.characters.append(guess_character(tag["src"][24:-4]))
players.append(player)
video_ids, casters = self.scrape_vod_page(vod_id, vod_requests[i])
tournament = re.search(r"[^\s].*[^\s]", cells[0].getText()).group()
_round = re.search(r"[^\s].*[^\s]", cells[4].getText()).group()
yield Vod(vod_id, video_ids, date, tournament, players, casters, _round, best_of)
except InvalidVideoError as e:
if self.verbose:
print(e, file=sys.stderr)
|
(self, page_request: concurrent.futures._base.Future) -> Generator[vodscrepe.vod.Vod, NoneType, NoneType]
|
71,043 |
vodscrepe.scrape
|
scrape_vod_page
| null |
def scrape_vod_page(self, vod_id: str, vod_request: Future) -> Tuple[List[str], List[Vod.Caster]]:
vod_content = vod_request.result().content
vod_strainer = SoupStrainer("div", class_="region-inner clearfix")
vod_soup = BeautifulSoup(vod_content, "lxml", parse_only=vod_strainer)
content = vod_soup.findChild(recursive=False)
try:
video_ids = [
re.search(r"^([^?]*)", v["data-vod"]).group(1)
for v in content.findChildren("div", class_="js-video widescreen", recursive=False)
]
if len(video_ids) == 0:
raise InvalidVideoError(vod_id)
casters = []
casters_tag = content.findChild("div", class_="field-items")
if casters_tag:
casters = [Vod.Caster(c.getText()) for c in casters_tag.findChildren(recursive=False)]
return (video_ids, casters)
except KeyError:
raise InvalidVideoError(vod_id)
|
(self, vod_id: str, vod_request: concurrent.futures._base.Future) -> Tuple[List[str], List[vodscrepe.vod.Vod.Caster]]
|
71,048 |
evaluate.evaluator.audio_classification
|
AudioClassificationEvaluator
|
Audio classification evaluator.
This audio classification evaluator can currently be loaded from [`evaluator`] using the default task name
`audio-classification`.
Methods in this class assume a data format compatible with the [`transformers.AudioClassificationPipeline`].
|
class AudioClassificationEvaluator(Evaluator):
"""
Audio classification evaluator.
This audio classification evaluator can currently be loaded from [`evaluator`] using the default task name
`audio-classification`.
Methods in this class assume a data format compatible with the [`transformers.AudioClassificationPipeline`].
"""
PIPELINE_KWARGS = {}
def __init__(self, task="audio-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def predictions_processor(self, predictions, label_mapping):
pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions]
pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label]
return {"predictions": pred_label}
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: Optional[str] = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: int = None,
random_state: Optional[int] = None,
input_column: str = "file",
label_column: str = "label",
label_mapping: Optional[Dict[str, Number]] = None,
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"file"`):
The name of the column containing either the audio files or a raw waveform, represented as a numpy array, in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
We want to map class labels defined by the model in the pipeline to values consistent with those
defined in the `label_column` of the `data` dataset.
"""
result = super().compute(
model_or_pipeline=model_or_pipeline,
data=data,
subset=subset,
split=split,
metric=metric,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
device=device,
random_state=random_state,
input_column=input_column,
label_column=label_column,
label_mapping=label_mapping,
)
return result
|
(task='audio-classification', default_metric_name=None)
|
71,049 |
evaluate.evaluator.audio_classification
|
__init__
| null |
def __init__(self, task="audio-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
|
(self, task='audio-classification', default_metric_name=None)
|
71,050 |
evaluate.evaluator.base
|
_compute_confidence_interval
|
A utility function enabling the confidence interval calculation for metrics computed
by the evaluator based on `scipy`'s `bootstrap` method.
|
@staticmethod
def _compute_confidence_interval(
metric,
metric_inputs,
metric_keys: List[str],
confidence_level: float = 0.95,
n_resamples: int = 9999,
random_state: Optional[int] = None,
) -> Dict[str, Any]:
"""
A utility function enabling the confidence interval calculation for metrics computed
by the evaluator based on `scipy`'s `bootstrap` method.
"""
# bootstrap only works with functions that use args and no kwargs
def build_args_metric(metric, key, **kwargs):
def args_metric(*args):
return metric.compute(**{k: v for k, v in zip(kwargs.keys(), args)})[key]
return args_metric
bootstrap_dict = {}
for key in metric_keys:
bs = bootstrap(
data=list(metric_inputs.values()),
statistic=build_args_metric(metric, key, **metric_inputs),
paired=True,
vectorized=False,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
bootstrap_dict[key] = {
"confidence_interval": (bs.confidence_interval.low, bs.confidence_interval.high),
"standard_error": bs.standard_error,
}
return bootstrap_dict
|
(metric, metric_inputs, metric_keys: List[str], confidence_level: float = 0.95, n_resamples: int = 9999, random_state: Optional[int] = None) -> Dict[str, Any]
|
71,051 |
evaluate.evaluator.base
|
_compute_time_perf
|
A utility function computing time performance metrics:
- `total_time_in_seconds` - pipeline inference runtime for the evaluation data in seconds,
- `samples_per_second` - pipeline throughput in the number of samples per second.
- `latency_in_seconds` - pipeline inference runtime for the evaluation data in seconds per sample,
|
@staticmethod
def _compute_time_perf(start_time: float, end_time: float, num_samples: int) -> Dict[str, Any]:
"""
A utility function computing time performance metrics:
- `total_time_in_seconds` - pipeline inference runtime for the evaluation data in seconds,
- `samples_per_second` - pipeline throughput in the number of samples per second.
- `latency_in_seconds` - pipeline inference runtime for the evaluation data in seconds per sample,
"""
latency = end_time - start_time
throughput = num_samples / latency
latency_sample = 1.0 / throughput
return {
"total_time_in_seconds": latency,
"samples_per_second": throughput,
"latency_in_seconds": latency_sample,
}
|
(start_time: float, end_time: float, num_samples: int) -> Dict[str, Any]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.