index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
68,718 |
fs.mountfs
|
getsize
| null |
def getsize(self, path):
# type: (Text) -> int
self.check()
fs, _path = self._delegate(path)
return fs.getsize(_path)
|
(self, path)
|
68,719 |
fs.mountfs
|
getsyspath
| null |
def getsyspath(self, path):
# type: (Text) -> Text
self.check()
fs, _path = self._delegate(path)
return fs.getsyspath(_path)
|
(self, path)
|
68,721 |
fs.mountfs
|
gettype
| null |
def gettype(self, path):
# type: (Text) -> ResourceType
self.check()
fs, _path = self._delegate(path)
return fs.gettype(_path)
|
(self, path)
|
68,722 |
fs.mountfs
|
geturl
| null |
def geturl(self, path, purpose="download"):
# type: (Text, Text) -> Text
self.check()
fs, _path = self._delegate(path)
return fs.geturl(_path, purpose=purpose)
|
(self, path, purpose='download')
|
68,725 |
fs.mountfs
|
hasurl
| null |
def hasurl(self, path, purpose="download"):
# type: (Text, Text) -> bool
self.check()
fs, _path = self._delegate(path)
return fs.hasurl(_path, purpose=purpose)
|
(self, path, purpose='download')
|
68,727 |
fs.mountfs
|
isdir
| null |
def isdir(self, path):
# type: (Text) -> bool
self.check()
fs, _path = self._delegate(path)
return fs.isdir(_path)
|
(self, path)
|
68,728 |
fs.base
|
isempty
|
Check if a directory is empty.
A directory is considered empty when it does not contain
any file or any directory.
Arguments:
path (str): A path to a directory on the filesystem.
Returns:
bool: `True` if the directory is empty.
Raises:
errors.DirectoryExpected: If ``path`` is not a directory.
errors.ResourceNotFound: If ``path`` does not exist.
|
def isempty(self, path):
# type: (Text) -> bool
"""Check if a directory is empty.
A directory is considered empty when it does not contain
any file or any directory.
Arguments:
path (str): A path to a directory on the filesystem.
Returns:
bool: `True` if the directory is empty.
Raises:
errors.DirectoryExpected: If ``path`` is not a directory.
errors.ResourceNotFound: If ``path`` does not exist.
"""
return next(iter(self.scandir(path)), None) is None
|
(self, path)
|
68,729 |
fs.mountfs
|
isfile
| null |
def isfile(self, path):
# type: (Text) -> bool
self.check()
fs, _path = self._delegate(path)
return fs.isfile(_path)
|
(self, path)
|
68,731 |
fs.mountfs
|
listdir
| null |
def listdir(self, path):
# type: (Text) -> List[Text]
self.check()
fs, _path = self._delegate(path)
return fs.listdir(_path)
|
(self, path)
|
68,733 |
fs.mountfs
|
makedir
| null |
def makedir(self, path, permissions=None, recreate=False):
# type: (Text, Optional[Permissions], bool) -> SubFS[FS]
self.check()
fs, _path = self._delegate(path)
return fs.makedir(_path, permissions=permissions, recreate=recreate)
|
(self, path, permissions=None, recreate=False)
|
68,734 |
triad.collections.fs
|
makedirs
|
Make a directory, and any missing intermediate directories.
.. note::
This overrides the base ``makedirs``
:param path: path to directory from root.
:param permissions: initial permissions, or `None` to use defaults.
:recreate: if `False` (the default), attempting to
create an existing directory will raise an error. Set
to `True` to ignore existing directories.
:return: a sub-directory filesystem.
:raises fs.errors.DirectoryExists: if the path is already
a directory, and ``recreate`` is `False`.
:raises fs.errors.DirectoryExpected: if one of the ancestors
in the path is not a directory.
|
def makedirs(
self, path: str, permissions: Any = None, recreate: bool = False
) -> SubFS:
"""Make a directory, and any missing intermediate directories.
.. note::
This overrides the base ``makedirs``
:param path: path to directory from root.
:param permissions: initial permissions, or `None` to use defaults.
:recreate: if `False` (the default), attempting to
create an existing directory will raise an error. Set
to `True` to ignore existing directories.
:return: a sub-directory filesystem.
:raises fs.errors.DirectoryExists: if the path is already
a directory, and ``recreate`` is `False`.
:raises fs.errors.DirectoryExpected: if one of the ancestors
in the path is not a directory.
"""
self.check()
fs, _path = self._delegate(path)
return fs.makedirs(_path, permissions=permissions, recreate=recreate)
|
(self, path: str, permissions: Optional[Any] = None, recreate: bool = False) -> fs.subfs.SubFS
|
68,736 |
fs.mountfs
|
mount
|
Mounts a host FS object on a given path.
Arguments:
path (str): A path within the MountFS.
fs (FS or str): A filesystem (instance or URL) to mount.
|
def mount(self, path, fs):
# type: (Text, Union[FS, Text]) -> None
"""Mounts a host FS object on a given path.
Arguments:
path (str): A path within the MountFS.
fs (FS or str): A filesystem (instance or URL) to mount.
"""
if isinstance(fs, text_type):
from .opener import open_fs
fs = open_fs(fs)
if not isinstance(fs, FS):
raise TypeError("fs argument must be an FS object or a FS URL")
if fs is self:
raise ValueError("Unable to mount self")
_path = forcedir(abspath(normpath(path)))
for mount_path, _ in self.mounts:
if _path.startswith(mount_path):
raise MountError("mount point overlaps existing mount")
self.mounts.append((_path, fs))
self.default_fs.makedirs(_path, recreate=True)
|
(self, path, fs)
|
68,737 |
fs.base
|
move
|
Move a file from ``src_path`` to ``dst_path``.
Arguments:
src_path (str): A path on the filesystem to move.
dst_path (str): A path on the filesystem where the source
file will be written to.
overwrite (bool): If `True`, destination path will be
overwritten if it exists.
preserve_time (bool): If `True`, try to preserve mtime of the
resources (defaults to `False`).
Raises:
fs.errors.FileExpected: If ``src_path`` maps to a
directory instead of a file.
fs.errors.DestinationExists: If ``dst_path`` exists,
and ``overwrite`` is `False`.
fs.errors.ResourceNotFound: If a parent directory of
``dst_path`` does not exist.
|
def move(self, src_path, dst_path, overwrite=False, preserve_time=False):
# type: (Text, Text, bool, bool) -> None
"""Move a file from ``src_path`` to ``dst_path``.
Arguments:
src_path (str): A path on the filesystem to move.
dst_path (str): A path on the filesystem where the source
file will be written to.
overwrite (bool): If `True`, destination path will be
overwritten if it exists.
preserve_time (bool): If `True`, try to preserve mtime of the
resources (defaults to `False`).
Raises:
fs.errors.FileExpected: If ``src_path`` maps to a
directory instead of a file.
fs.errors.DestinationExists: If ``dst_path`` exists,
and ``overwrite`` is `False`.
fs.errors.ResourceNotFound: If a parent directory of
``dst_path`` does not exist.
"""
if not overwrite and self.exists(dst_path):
raise errors.DestinationExists(dst_path)
if self.getinfo(src_path).is_dir:
raise errors.FileExpected(src_path)
if self.getmeta().get("supports_rename", False):
try:
src_sys_path = self.getsyspath(src_path)
dst_sys_path = self.getsyspath(dst_path)
except errors.NoSysPath: # pragma: no cover
pass
else:
try:
os.rename(src_sys_path, dst_sys_path)
except OSError:
pass
else:
if preserve_time:
copy_modified_time(self, src_path, self, dst_path)
return
with self._lock:
with self.open(src_path, "rb") as read_file:
# FIXME(@althonos): typing complains because open return IO
self.upload(dst_path, read_file) # type: ignore
if preserve_time:
copy_modified_time(self, src_path, self, dst_path)
self.remove(src_path)
|
(self, src_path, dst_path, overwrite=False, preserve_time=False)
|
68,739 |
fs.mountfs
|
open
| null |
def open(
self,
path, # type: Text
mode="r", # type: Text
buffering=-1, # type: int
encoding=None, # type: Optional[Text]
errors=None, # type: Optional[Text]
newline="", # type: Text
**options # type: Any
):
# type: (...) -> IO
validate_open_mode(mode)
self.check()
fs, _path = self._delegate(path)
return fs.open(
_path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
**options
)
|
(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline='', **options)
|
68,740 |
fs.mountfs
|
openbin
| null |
def openbin(self, path, mode="r", buffering=-1, **kwargs):
# type: (Text, Text, int, **Any) -> BinaryIO
validate_openbin_mode(mode)
self.check()
fs, _path = self._delegate(path)
return fs.openbin(_path, mode=mode, buffering=-1, **kwargs)
|
(self, path, mode='r', buffering=-1, **kwargs)
|
68,742 |
fs.mountfs
|
readbytes
| null |
def readbytes(self, path):
# type: (Text) -> bytes
self.check()
fs, _path = self._delegate(path)
return fs.readbytes(_path)
|
(self, path)
|
68,743 |
fs.mountfs
|
readtext
| null |
def readtext(
self,
path, # type: Text
encoding=None, # type: Optional[Text]
errors=None, # type: Optional[Text]
newline="", # type: Text
):
# type: (...) -> Text
self.check()
fs, _path = self._delegate(path)
return fs.readtext(_path, encoding=encoding, errors=errors, newline=newline)
|
(self, path, encoding=None, errors=None, newline='')
|
68,744 |
fs.mountfs
|
remove
| null |
def remove(self, path):
# type: (Text) -> None
self.check()
fs, _path = self._delegate(path)
return fs.remove(_path)
|
(self, path)
|
68,745 |
fs.mountfs
|
removedir
| null |
def removedir(self, path):
# type: (Text) -> None
self.check()
path = normpath(path)
if path in ("", "/"):
raise errors.RemoveRootError(path)
fs, _path = self._delegate(path)
return fs.removedir(_path)
|
(self, path)
|
68,747 |
fs.mountfs
|
scandir
| null |
def scandir(
self,
path, # type: Text
namespaces=None, # type: Optional[Collection[Text]]
page=None, # type: Optional[Tuple[int, int]]
):
# type: (...) -> Iterator[Info]
self.check()
fs, _path = self._delegate(path)
return fs.scandir(_path, namespaces=namespaces, page=page)
|
(self, path, namespaces=None, page=None)
|
68,751 |
fs.mountfs
|
setinfo
| null |
def setinfo(self, path, info):
# type: (Text, RawInfo) -> None
self.check()
fs, _path = self._delegate(path)
return fs.setinfo(_path, info)
|
(self, path, info)
|
68,756 |
fs.mountfs
|
upload
| null |
def upload(self, path, file, chunk_size=None, **options):
# type: (Text, BinaryIO, Optional[int], **Any) -> None
self.check()
fs, _path = self._delegate(path)
return fs.upload(_path, file, chunk_size=chunk_size, **options)
|
(self, path, file, chunk_size=None, **options)
|
68,757 |
fs.mountfs
|
validatepath
| null |
def validatepath(self, path):
# type: (Text) -> Text
self.check()
fs, _path = self._delegate(path)
fs.validatepath(_path)
path = abspath(normpath(path))
return path
|
(self, path)
|
68,758 |
fs.mountfs
|
writebytes
| null |
def writebytes(self, path, contents):
# type: (Text, bytes) -> None
self.check()
fs, _path = self._delegate(path)
return fs.writebytes(_path, contents)
|
(self, path, contents)
|
68,760 |
fs.mountfs
|
writetext
| null |
def writetext(
self,
path, # type: Text
contents, # type: Text
encoding="utf-8", # type: Text
errors=None, # type: Optional[Text]
newline="", # type: Text
):
# type: (...) -> None
fs, _path = self._delegate(path)
return fs.writetext(
_path, contents, encoding=encoding, errors=errors, newline=newline
)
|
(self, path, contents, encoding='utf-8', errors=None, newline='')
|
68,761 |
triad.collections.dict
|
IndexedOrderedDict
|
Subclass of OrderedDict that can get and set with index
|
class IndexedOrderedDict(OrderedDict, Dict[KT, VT]):
"""Subclass of OrderedDict that can get and set with index"""
def __init__(self, *args: Any, **kwds: Any):
self._readonly = False
self._need_reindex = True
self._key_index: Dict[Any, int] = {}
self._index_key: List[Any] = []
super().__init__(*args, **kwds)
@property
def readonly(self) -> bool:
"""Whether this dict is readonly"""
return self._readonly
def set_readonly(self) -> None:
"""Make this dict readonly"""
self._readonly = True
def index_of_key(self, key: Any) -> int:
"""Get index of key
:param key: key value
:return: index of the key value
"""
self._build_index()
return self._key_index[key]
def get_key_by_index(self, index: int) -> KT:
"""Get key by index
:param index: index of the key
:return: key value at the index
"""
self._build_index()
return self._index_key[index]
def get_value_by_index(self, index: int) -> VT:
"""Get value by index
:param index: index of the item
:return: value at the index
"""
key = self.get_key_by_index(index)
return self[key]
def get_item_by_index(self, index: int) -> Tuple[KT, VT]:
"""Get key value pair by index
:param index: index of the item
:return: key value tuple at the index
"""
key = self.get_key_by_index(index)
return key, self[key]
def set_value_by_index(self, index: int, value: VT) -> None:
"""Set value by index
:param index: index of the item
:param value: new value
"""
key = self.get_key_by_index(index)
self[key] = value
def pop_by_index(self, index: int) -> Tuple[KT, VT]:
"""Pop item at index
:param index: index of the item
:return: key value tuple at the index
"""
key = self.get_key_by_index(index)
return key, self.pop(key)
def equals(self, other: Any, with_order: bool):
"""Compare with another object
:param other: for possible types, see :func:`~triad.utils.iter.to_kv_iterable`
:param with_order: whether to compare order
:return: whether they equal
"""
if with_order:
if isinstance(other, OrderedDict):
return self == other
return self == OrderedDict(to_kv_iterable(other))
else:
if isinstance(other, OrderedDict) or not isinstance(other, Dict):
return self == dict(to_kv_iterable(other))
return self == other
# ----------------------------------- Wrappers over OrderedDict
def __setitem__( # type: ignore
self, key: KT, value: VT, *args: Any, **kwds: Any
) -> None:
self._pre_update("__setitem__", key not in self)
super().__setitem__(key, value, *args, **kwds) # type: ignore
def __delitem__(self, *args: Any, **kwds: Any) -> None: # type: ignore
self._pre_update("__delitem__")
super().__delitem__(*args, **kwds) # type: ignore
def clear(self) -> None:
self._pre_update("clear")
super().clear()
def copy(self) -> "IndexedOrderedDict":
other = super().copy()
assert isinstance(other, IndexedOrderedDict)
other._need_reindex = self._need_reindex
other._index_key = self._index_key.copy()
other._key_index = self._key_index.copy()
other._readonly = False
return other
def __copy__(self) -> "IndexedOrderedDict":
return self.copy()
def __deepcopy__(self, arg: Any) -> "IndexedOrderedDict":
it = [(copy.deepcopy(k), copy.deepcopy(v)) for k, v in self.items()]
return IndexedOrderedDict(it)
def popitem(self, *args: Any, **kwds: Any) -> Tuple[KT, VT]: # type: ignore
self._pre_update("popitem")
return super().popitem(*args, **kwds) # type: ignore
def move_to_end(self, *args: Any, **kwds: Any) -> None: # type: ignore
self._pre_update("move_to_end")
super().move_to_end(*args, **kwds) # type: ignore
def __sizeof__(self) -> int: # pragma: no cover
return super().__sizeof__() + sys.getsizeof(self._need_reindex)
def pop(self, *args: Any, **kwds: Any) -> VT: # type: ignore
self._pre_update("pop")
return super().pop(*args, **kwds) # type: ignore
def _build_index(self) -> None:
if self._need_reindex:
self._index_key = list(self.keys())
self._key_index = {x: i for i, x in enumerate(self._index_key)}
self._need_reindex = False
def _pre_update(self, op: str, need_reindex: bool = True) -> None:
if self.readonly:
raise InvalidOperationError("This dict is readonly")
self._need_reindex = need_reindex
|
(*args: Any, **kwds: Any)
|
68,762 |
triad.collections.dict
|
__copy__
| null |
def __copy__(self) -> "IndexedOrderedDict":
return self.copy()
|
(self) -> triad.collections.dict.IndexedOrderedDict
|
68,763 |
triad.collections.dict
|
__deepcopy__
| null |
def __deepcopy__(self, arg: Any) -> "IndexedOrderedDict":
it = [(copy.deepcopy(k), copy.deepcopy(v)) for k, v in self.items()]
return IndexedOrderedDict(it)
|
(self, arg: Any) -> triad.collections.dict.IndexedOrderedDict
|
68,764 |
triad.collections.dict
|
__delitem__
| null |
def __delitem__(self, *args: Any, **kwds: Any) -> None: # type: ignore
self._pre_update("__delitem__")
super().__delitem__(*args, **kwds) # type: ignore
|
(self, *args: Any, **kwds: Any) -> NoneType
|
68,765 |
triad.collections.dict
|
__init__
| null |
def __init__(self, *args: Any, **kwds: Any):
self._readonly = False
self._need_reindex = True
self._key_index: Dict[Any, int] = {}
self._index_key: List[Any] = []
super().__init__(*args, **kwds)
|
(self, *args: Any, **kwds: Any)
|
68,766 |
triad.collections.dict
|
__setitem__
| null |
def __setitem__( # type: ignore
self, key: KT, value: VT, *args: Any, **kwds: Any
) -> None:
self._pre_update("__setitem__", key not in self)
super().__setitem__(key, value, *args, **kwds) # type: ignore
|
(self, key: ~KT, value: ~VT, *args: Any, **kwds: Any) -> NoneType
|
68,767 |
triad.collections.dict
|
__sizeof__
| null |
def __sizeof__(self) -> int: # pragma: no cover
return super().__sizeof__() + sys.getsizeof(self._need_reindex)
|
(self) -> int
|
68,768 |
triad.collections.dict
|
_build_index
| null |
def _build_index(self) -> None:
if self._need_reindex:
self._index_key = list(self.keys())
self._key_index = {x: i for i, x in enumerate(self._index_key)}
self._need_reindex = False
|
(self) -> NoneType
|
68,769 |
triad.collections.dict
|
_pre_update
| null |
def _pre_update(self, op: str, need_reindex: bool = True) -> None:
if self.readonly:
raise InvalidOperationError("This dict is readonly")
self._need_reindex = need_reindex
|
(self, op: str, need_reindex: bool = True) -> NoneType
|
68,770 |
triad.collections.dict
|
clear
| null |
def clear(self) -> None:
self._pre_update("clear")
super().clear()
|
(self) -> NoneType
|
68,771 |
triad.collections.dict
|
copy
| null |
def copy(self) -> "IndexedOrderedDict":
other = super().copy()
assert isinstance(other, IndexedOrderedDict)
other._need_reindex = self._need_reindex
other._index_key = self._index_key.copy()
other._key_index = self._key_index.copy()
other._readonly = False
return other
|
(self) -> triad.collections.dict.IndexedOrderedDict
|
68,772 |
triad.collections.dict
|
equals
|
Compare with another object
:param other: for possible types, see :func:`~triad.utils.iter.to_kv_iterable`
:param with_order: whether to compare order
:return: whether they equal
|
def equals(self, other: Any, with_order: bool):
"""Compare with another object
:param other: for possible types, see :func:`~triad.utils.iter.to_kv_iterable`
:param with_order: whether to compare order
:return: whether they equal
"""
if with_order:
if isinstance(other, OrderedDict):
return self == other
return self == OrderedDict(to_kv_iterable(other))
else:
if isinstance(other, OrderedDict) or not isinstance(other, Dict):
return self == dict(to_kv_iterable(other))
return self == other
|
(self, other: Any, with_order: bool)
|
68,773 |
triad.collections.dict
|
get_item_by_index
|
Get key value pair by index
:param index: index of the item
:return: key value tuple at the index
|
def get_item_by_index(self, index: int) -> Tuple[KT, VT]:
"""Get key value pair by index
:param index: index of the item
:return: key value tuple at the index
"""
key = self.get_key_by_index(index)
return key, self[key]
|
(self, index: int) -> Tuple[~KT, ~VT]
|
68,774 |
triad.collections.dict
|
get_key_by_index
|
Get key by index
:param index: index of the key
:return: key value at the index
|
def get_key_by_index(self, index: int) -> KT:
"""Get key by index
:param index: index of the key
:return: key value at the index
"""
self._build_index()
return self._index_key[index]
|
(self, index: int) -> ~KT
|
68,775 |
triad.collections.dict
|
get_value_by_index
|
Get value by index
:param index: index of the item
:return: value at the index
|
def get_value_by_index(self, index: int) -> VT:
"""Get value by index
:param index: index of the item
:return: value at the index
"""
key = self.get_key_by_index(index)
return self[key]
|
(self, index: int) -> ~VT
|
68,776 |
triad.collections.dict
|
index_of_key
|
Get index of key
:param key: key value
:return: index of the key value
|
def index_of_key(self, key: Any) -> int:
"""Get index of key
:param key: key value
:return: index of the key value
"""
self._build_index()
return self._key_index[key]
|
(self, key: Any) -> int
|
68,777 |
triad.collections.dict
|
move_to_end
| null |
def move_to_end(self, *args: Any, **kwds: Any) -> None: # type: ignore
self._pre_update("move_to_end")
super().move_to_end(*args, **kwds) # type: ignore
|
(self, *args: Any, **kwds: Any) -> NoneType
|
68,778 |
triad.collections.dict
|
pop
| null |
def pop(self, *args: Any, **kwds: Any) -> VT: # type: ignore
self._pre_update("pop")
return super().pop(*args, **kwds) # type: ignore
|
(self, *args: Any, **kwds: Any) -> ~VT
|
68,779 |
triad.collections.dict
|
pop_by_index
|
Pop item at index
:param index: index of the item
:return: key value tuple at the index
|
def pop_by_index(self, index: int) -> Tuple[KT, VT]:
"""Pop item at index
:param index: index of the item
:return: key value tuple at the index
"""
key = self.get_key_by_index(index)
return key, self.pop(key)
|
(self, index: int) -> Tuple[~KT, ~VT]
|
68,780 |
triad.collections.dict
|
popitem
| null |
def popitem(self, *args: Any, **kwds: Any) -> Tuple[KT, VT]: # type: ignore
self._pre_update("popitem")
return super().popitem(*args, **kwds) # type: ignore
|
(self, *args: Any, **kwds: Any) -> Tuple[~KT, ~VT]
|
68,781 |
triad.collections.dict
|
set_readonly
|
Make this dict readonly
|
def set_readonly(self) -> None:
"""Make this dict readonly"""
self._readonly = True
|
(self) -> NoneType
|
68,782 |
triad.collections.dict
|
set_value_by_index
|
Set value by index
:param index: index of the item
:param value: new value
|
def set_value_by_index(self, index: int, value: VT) -> None:
"""Set value by index
:param index: index of the item
:param value: new value
"""
key = self.get_key_by_index(index)
self[key] = value
|
(self, index: int, value: ~VT) -> NoneType
|
68,783 |
triad.collections.dict
|
ParamDict
|
Parameter dictionary, a subclass of ``IndexedOrderedDict``, keys must be string
:param data: for possible types, see :func:`~triad.utils.iter.to_kv_iterable`
:param deep: whether to deep copy ``data``
|
class ParamDict(IndexedOrderedDict[str, Any]):
"""Parameter dictionary, a subclass of ``IndexedOrderedDict``, keys must be string
:param data: for possible types, see :func:`~triad.utils.iter.to_kv_iterable`
:param deep: whether to deep copy ``data``
"""
OVERWRITE = 0
THROW = 1
IGNORE = 2
def __init__(self, data: Any = None, deep: bool = True):
super().__init__()
self.update(data, deep=deep)
def __setitem__( # type: ignore
self, key: str, value: Any, *args: Any, **kwds: Any
) -> None:
assert isinstance(key, str)
super().__setitem__(key, value, *args, **kwds) # type: ignore
def __getitem__(self, key: Union[str, int]) -> Any: # type: ignore
if isinstance(key, int):
key = self.get_key_by_index(key)
return super().__getitem__(key) # type: ignore
def get(self, key: Union[int, str], default: Any) -> Any: # type: ignore
"""Get value by ``key``, and the value must be a subtype of the type of
``default``(which can't be None). If the ``key`` is not found,
return ``default``.
:param key: the key to search
:raises NoneArgumentError: if default is None
:raises TypeError: if the value can't be converted to the type of ``default``
:return: the value by ``key``, and the value must be a subtype of the type of
``default``. If ``key`` is not found, return `default`
"""
assert_arg_not_none(default, "default")
if (isinstance(key, str) and key in self) or isinstance(key, int):
return as_type(self[key], type(default))
return default
def get_or_none(self, key: Union[int, str], expected_type: type) -> Any:
"""Get value by `key`, and the value must be a subtype of ``expected_type``
:param key: the key to search
:param expected_type: expected return value type
:raises TypeError: if the value can't be converted to ``expected_type``
:return: if ``key`` is not found, None. Otherwise if the value can be converted
to ``expected_type``, return the converted value, otherwise raise exception
"""
return self._get_or(key, expected_type, throw=False)
def get_or_throw(self, key: Union[int, str], expected_type: type) -> Any:
"""Get value by ``key``, and the value must be a subtype of ``expected_type``.
If ``key`` is not found or value can't be converted to ``expected_type``, raise
exception
:param key: the key to search
:param expected_type: expected return value type
:raises KeyError: if ``key`` is not found
:raises TypeError: if the value can't be converted to ``expected_type``
:return: only when ``key`` is found and can be converted to ``expected_type``,
return the converted value
"""
return self._get_or(key, expected_type, throw=True)
def to_json(self, indent: bool = False) -> str:
"""Generate json expression string for the dictionary
:param indent: whether to have indent
:return: json string
"""
if not indent:
return json.dumps(self, separators=(",", ":"))
else:
return json.dumps(self, indent=4)
def update( # type: ignore
self, other: Any, on_dup: int = 0, deep: bool = True
) -> "ParamDict":
"""Update dictionary with another object (for possible types,
see :func:`~triad.utils.iter.to_kv_iterable`)
:param other: for possible types, see :func:`~triad.utils.iter.to_kv_iterable`
:param on_dup: one of ``ParamDict.OVERWRITE``, ``ParamDict.THROW``
and ``ParamDict.IGNORE``
:raises KeyError: if using ``ParamDict.THROW`` and other contains existing keys
:raises ValueError: if ``on_dup`` is invalid
:return: itself
"""
self._pre_update("update", True)
for k, v in to_kv_iterable(other):
if on_dup == ParamDict.OVERWRITE or k not in self:
self[k] = copy.deepcopy(v) if deep else v
elif on_dup == ParamDict.THROW:
raise KeyError(f"{k} exists in dict")
elif on_dup == ParamDict.IGNORE:
continue
else:
raise ValueError(f"{on_dup} is not supported")
return self
def _get_or(
self, key: Union[int, str], expected_type: type, throw: bool = True
) -> Any:
if (isinstance(key, str) and key in self) or isinstance(key, int):
return as_type(self[key], expected_type)
if throw:
raise KeyError(f"{key} not found")
return None
|
(data: Any = None, deep: bool = True)
|
68,787 |
triad.collections.dict
|
__getitem__
| null |
def __getitem__(self, key: Union[str, int]) -> Any: # type: ignore
if isinstance(key, int):
key = self.get_key_by_index(key)
return super().__getitem__(key) # type: ignore
|
(self, key: Union[str, int]) -> Any
|
68,788 |
triad.collections.dict
|
__init__
| null |
def __init__(self, data: Any = None, deep: bool = True):
super().__init__()
self.update(data, deep=deep)
|
(self, data: Optional[Any] = None, deep: bool = True)
|
68,789 |
triad.collections.dict
|
__setitem__
| null |
def __setitem__( # type: ignore
self, key: str, value: Any, *args: Any, **kwds: Any
) -> None:
assert isinstance(key, str)
super().__setitem__(key, value, *args, **kwds) # type: ignore
|
(self, key: str, value: Any, *args: Any, **kwds: Any) -> NoneType
|
68,792 |
triad.collections.dict
|
_get_or
| null |
def _get_or(
self, key: Union[int, str], expected_type: type, throw: bool = True
) -> Any:
if (isinstance(key, str) and key in self) or isinstance(key, int):
return as_type(self[key], expected_type)
if throw:
raise KeyError(f"{key} not found")
return None
|
(self, key: Union[int, str], expected_type: type, throw: bool = True) -> Any
|
68,797 |
triad.collections.dict
|
get
|
Get value by ``key``, and the value must be a subtype of the type of
``default``(which can't be None). If the ``key`` is not found,
return ``default``.
:param key: the key to search
:raises NoneArgumentError: if default is None
:raises TypeError: if the value can't be converted to the type of ``default``
:return: the value by ``key``, and the value must be a subtype of the type of
``default``. If ``key`` is not found, return `default`
|
def get(self, key: Union[int, str], default: Any) -> Any: # type: ignore
"""Get value by ``key``, and the value must be a subtype of the type of
``default``(which can't be None). If the ``key`` is not found,
return ``default``.
:param key: the key to search
:raises NoneArgumentError: if default is None
:raises TypeError: if the value can't be converted to the type of ``default``
:return: the value by ``key``, and the value must be a subtype of the type of
``default``. If ``key`` is not found, return `default`
"""
assert_arg_not_none(default, "default")
if (isinstance(key, str) and key in self) or isinstance(key, int):
return as_type(self[key], type(default))
return default
|
(self, key: Union[int, str], default: Any) -> Any
|
68,800 |
triad.collections.dict
|
get_or_none
|
Get value by `key`, and the value must be a subtype of ``expected_type``
:param key: the key to search
:param expected_type: expected return value type
:raises TypeError: if the value can't be converted to ``expected_type``
:return: if ``key`` is not found, None. Otherwise if the value can be converted
to ``expected_type``, return the converted value, otherwise raise exception
|
def get_or_none(self, key: Union[int, str], expected_type: type) -> Any:
"""Get value by `key`, and the value must be a subtype of ``expected_type``
:param key: the key to search
:param expected_type: expected return value type
:raises TypeError: if the value can't be converted to ``expected_type``
:return: if ``key`` is not found, None. Otherwise if the value can be converted
to ``expected_type``, return the converted value, otherwise raise exception
"""
return self._get_or(key, expected_type, throw=False)
|
(self, key: Union[int, str], expected_type: type) -> Any
|
68,801 |
triad.collections.dict
|
get_or_throw
|
Get value by ``key``, and the value must be a subtype of ``expected_type``.
If ``key`` is not found or value can't be converted to ``expected_type``, raise
exception
:param key: the key to search
:param expected_type: expected return value type
:raises KeyError: if ``key`` is not found
:raises TypeError: if the value can't be converted to ``expected_type``
:return: only when ``key`` is found and can be converted to ``expected_type``,
return the converted value
|
def get_or_throw(self, key: Union[int, str], expected_type: type) -> Any:
"""Get value by ``key``, and the value must be a subtype of ``expected_type``.
If ``key`` is not found or value can't be converted to ``expected_type``, raise
exception
:param key: the key to search
:param expected_type: expected return value type
:raises KeyError: if ``key`` is not found
:raises TypeError: if the value can't be converted to ``expected_type``
:return: only when ``key`` is found and can be converted to ``expected_type``,
return the converted value
"""
return self._get_or(key, expected_type, throw=True)
|
(self, key: Union[int, str], expected_type: type) -> Any
|
68,810 |
triad.collections.dict
|
to_json
|
Generate json expression string for the dictionary
:param indent: whether to have indent
:return: json string
|
def to_json(self, indent: bool = False) -> str:
"""Generate json expression string for the dictionary
:param indent: whether to have indent
:return: json string
"""
if not indent:
return json.dumps(self, separators=(",", ":"))
else:
return json.dumps(self, indent=4)
|
(self, indent: bool = False) -> str
|
68,811 |
triad.collections.dict
|
update
|
Update dictionary with another object (for possible types,
see :func:`~triad.utils.iter.to_kv_iterable`)
:param other: for possible types, see :func:`~triad.utils.iter.to_kv_iterable`
:param on_dup: one of ``ParamDict.OVERWRITE``, ``ParamDict.THROW``
and ``ParamDict.IGNORE``
:raises KeyError: if using ``ParamDict.THROW`` and other contains existing keys
:raises ValueError: if ``on_dup`` is invalid
:return: itself
|
def update( # type: ignore
self, other: Any, on_dup: int = 0, deep: bool = True
) -> "ParamDict":
"""Update dictionary with another object (for possible types,
see :func:`~triad.utils.iter.to_kv_iterable`)
:param other: for possible types, see :func:`~triad.utils.iter.to_kv_iterable`
:param on_dup: one of ``ParamDict.OVERWRITE``, ``ParamDict.THROW``
and ``ParamDict.IGNORE``
:raises KeyError: if using ``ParamDict.THROW`` and other contains existing keys
:raises ValueError: if ``on_dup`` is invalid
:return: itself
"""
self._pre_update("update", True)
for k, v in to_kv_iterable(other):
if on_dup == ParamDict.OVERWRITE or k not in self:
self[k] = copy.deepcopy(v) if deep else v
elif on_dup == ParamDict.THROW:
raise KeyError(f"{k} exists in dict")
elif on_dup == ParamDict.IGNORE:
continue
else:
raise ValueError(f"{on_dup} is not supported")
return self
|
(self, other: Any, on_dup: int = 0, deep: bool = True) -> triad.collections.dict.ParamDict
|
68,812 |
triad.collections.schema
|
Schema
|
A Schema wrapper on top of pyarrow.Fields. This has more features
than pyarrow.Schema, and they can convert to each other.
This class can be initialized from schema like objects. Here is a list of
schema like objects:
* pyarrow.Schema or Schema objects
* pyarrow.Field: single field will be treated as a single column schema
* schema expressions: :func:`~triad.utils.pyarrow.expression_to_schema`
* Dict[str,Any]: key will be the columns, and value will be type like objects
* Tuple[str,Any]: first item will be the only column name of the schema,
and the second has to be a type like object
* List[Any]: a list of Schema like objects
* pandas.DataFrame: it will extract the dataframe's schema
Here is a list of data type like objects:
* pyarrow.DataType
* pyarrow.Field: will only use the type attribute of the field
* type expression or other objects: for :func:`~triad.utils.pyarrow.to_pa_datatype`
.. admonition:: Examples
.. code-block:: python
Schema("a:int,b:int")
Schema("a:int","b:int")
Schema(a=int,b=str) # == Schema("a:long,b:str")
Schema(dict(a=int,b=str)) # == Schema("a:long,b:str")
Schema([(a,int),(b,str)]) # == Schema("a:long,b:str")
Schema((a,int),(b,str)) # == Schema("a:long,b:str")
Schema("a:[int],b:{x:int,y:{z:[str],w:byte}},c:[{x:str}]")
.. note::
* For supported pyarrow.DataTypes see :func:`~triad.utils.pyarrow.is_supported`
* If you use python type as data type (e.g. `Schema(a=int,b=str)`) be aware
the data type different. (e.g. python `int` type -> pyarrow `long`/`int64`
type)
* When not readonly, only `append` is allowed, `update` or `remove` are
disallowed
* When readonly, no modification on the existing schema is allowed
* `append`, `update` and `remove` are always allowed when creating a new object
* InvalidOperationError will be raised for disallowed operations
* At most one of `*args` and `**kwargs` can be set
:param args: one or multiple schema like objects, which will be combined in order
:param kwargs: key value pairs for the schema
|
class Schema(IndexedOrderedDict[str, pa.Field]):
"""A Schema wrapper on top of pyarrow.Fields. This has more features
than pyarrow.Schema, and they can convert to each other.
This class can be initialized from schema like objects. Here is a list of
schema like objects:
* pyarrow.Schema or Schema objects
* pyarrow.Field: single field will be treated as a single column schema
* schema expressions: :func:`~triad.utils.pyarrow.expression_to_schema`
* Dict[str,Any]: key will be the columns, and value will be type like objects
* Tuple[str,Any]: first item will be the only column name of the schema,
and the second has to be a type like object
* List[Any]: a list of Schema like objects
* pandas.DataFrame: it will extract the dataframe's schema
Here is a list of data type like objects:
* pyarrow.DataType
* pyarrow.Field: will only use the type attribute of the field
* type expression or other objects: for :func:`~triad.utils.pyarrow.to_pa_datatype`
.. admonition:: Examples
.. code-block:: python
Schema("a:int,b:int")
Schema("a:int","b:int")
Schema(a=int,b=str) # == Schema("a:long,b:str")
Schema(dict(a=int,b=str)) # == Schema("a:long,b:str")
Schema([(a,int),(b,str)]) # == Schema("a:long,b:str")
Schema((a,int),(b,str)) # == Schema("a:long,b:str")
Schema("a:[int],b:{x:int,y:{z:[str],w:byte}},c:[{x:str}]")
.. note::
* For supported pyarrow.DataTypes see :func:`~triad.utils.pyarrow.is_supported`
* If you use python type as data type (e.g. `Schema(a=int,b=str)`) be aware
the data type different. (e.g. python `int` type -> pyarrow `long`/`int64`
type)
* When not readonly, only `append` is allowed, `update` or `remove` are
disallowed
* When readonly, no modification on the existing schema is allowed
* `append`, `update` and `remove` are always allowed when creating a new object
* InvalidOperationError will be raised for disallowed operations
* At most one of `*args` and `**kwargs` can be set
:param args: one or multiple schema like objects, which will be combined in order
:param kwargs: key value pairs for the schema
"""
def __init__(self, *args: Any, **kwargs: Any):
if len(args) > 0 and len(kwargs) > 0:
raise SchemaError("Can't set both *args and **kwargs")
if len(args) == 1: # duplicate code for better performance
if isinstance(args[0], Schema):
super().__init__(args[0]) # type: ignore
return
fields: Optional[List[pa.Field]] = None
if isinstance(args[0], str):
fields = list(expression_to_schema(args[0]))
if isinstance(args[0], pa.Schema):
fields = list(args[0])
if isinstance(args[0], pa.Field):
fields = [args[0]]
if fields is not None:
fields = [self._validate_field(f) for f in fields]
super().__init__([(x.name, x) for x in fields])
return
super().__init__()
if len(args) > 0:
self.append(list(args))
elif len(kwargs) > 0:
self.append(kwargs)
@property
def names(self) -> List[str]:
"""List of column names"""
self._build_index()
return self._index_key # type: ignore
@property
def fields(self) -> List[pa.Field]:
"""List of pyarrow.Fields"""
return list(self.values())
@property
def types(self) -> List[pa.DataType]:
"""List of pyarrow.DataTypes"""
return [v.type for v in self.values()]
@property
def pyarrow_schema(self) -> pa.Schema:
"""convert as pyarrow.Schema"""
return pa.schema(self.fields)
@property
def pa_schema(self) -> pa.Schema:
"""convert as pyarrow.Schema"""
return self.pyarrow_schema
@property
def pandas_dtype(self) -> Dict[str, np.dtype]:
"""Convert as `dtype` dict for pandas dataframes.
Currently, struct type is not supported
"""
return self.to_pandas_dtype(self.pa_schema)
def to_pandas_dtype(
self, use_extension_types: bool = False, use_arrow_dtype: bool = False
) -> Dict[str, np.dtype]:
"""Convert as `dtype` dict for pandas dataframes.
:param use_extension_types: if True, use pandas extension types,
default False
:param use_arrow_dtype: if True and when pandas supports ``ArrowDType``,
use pyarrow types, default False
.. note::
* If ``use_extension_types`` is False and ``use_arrow_dtype`` is True,
it converts all types to ``ArrowDType``
* If both are true, it converts types to the numpy backend nullable
dtypes if possible, otherwise, it converts to ``ArrowDType``
"""
return to_pandas_dtype(
self.pa_schema,
use_extension_types=use_extension_types,
use_arrow_dtype=use_arrow_dtype,
)
@property
def pd_dtype(self) -> Dict[str, np.dtype]:
"""convert as `dtype` dict for pandas dataframes.
Currently, struct type is not supported
"""
return self.pandas_dtype
def assert_not_empty(self) -> "Schema":
"""Raise exception if schema is empty"""
if len(self) > 0:
return self
raise SchemaError("Schema can't be empty")
def copy(self) -> "Schema":
"""Clone Schema object
:return: cloned object
"""
other = super().copy()
assert isinstance(other, Schema)
return other
def __repr__(self) -> str:
"""Convert to the string representation of the schema"""
return schema_to_expression(self.pyarrow_schema)
def __iadd__(self, obj: Any) -> "Schema":
"""Append a schema like object to the current schema
:param obj: a schema like object
.. admonition:: Examples
.. code-block:: python
s = Schema("a:int,b:str")
s += "c:long"
s += ("d",int)
"""
return self.append(obj)
def __isub__(self, obj: Any) -> "Schema":
"""Remove columns from a schema is not allowed"""
raise SchemaError("'-=' is not allowed for Schema")
def __add__(self, obj: Any) -> "Schema":
"""Add a schema like object to the current schema
:param obj: a schema like object
.. admonition:: Examples
.. code-block:: python
s = Schema("a:int,b:str")
s = s + "c:long" + ("d",int)
"""
return self.copy().append(obj)
def __sub__(self, obj: Any) -> "Schema":
"""Remove columns or schema from the schema.
:param obj: one column name, a list/set of column names or
a schema like object
:return: a schema excluding the columns in ``obj``
.. note::
If ``obj`` violates any of the following conditions, ``SchemaError``
will be raised:
* all columns in ``obj`` must be found in the schema
* If ``obj`` is a schema like object, the types must also match
"""
return self.remove(
obj,
ignore_key_mismatch=False,
require_type_match=True,
ignore_type_mismatch=False,
)
def __setitem__( # type: ignore
self, name: str, value: Any, *args: List[Any], **kwds: Dict[str, Any]
) -> None:
assert_arg_not_none(value, "value")
if name in self: # update existing value is not allowed
raise SchemaError(f"{name} already exists in {self}")
if isinstance(value, pa.Field):
assert_or_throw(
name == value.name, SchemaError(f"{name} doesn't match {value}")
)
elif isinstance(value, pa.DataType):
value = pa.field(name, value)
else:
value = pa.field(name, to_pa_datatype(value))
assert_or_throw(
is_supported(value.type), SchemaError(f"{value} is not supported")
)
super().__setitem__(name, value, *args, **kwds) # type: ignore
def __eq__(self, other: Any) -> bool:
"""Check if the two schemas are equal
:param other: a schema like object
:return: True if the two schemas are equal
"""
return self.is_like(other)
def __contains__(self, key: Any) -> bool: # noqa: C901
"""Check if the schema contains the key.
:param key: a column name, a list of column names, a data
type like object or a schema like object
:return: True if the schema contains the object
"""
if key is None:
return False
if isinstance(key, str):
if ":" not in key:
return super().__contains__(key)
elif isinstance(key, pa.Field):
res = super().get(key.name, None)
if res is None:
return False
return key.type == res.type
elif isinstance(key, Schema):
for f in key.values():
if f not in self:
return False
return True
elif isinstance(key, List):
for f in key:
if f not in self:
return False
return True
return Schema(key) in self
def is_like(
self,
other: Any,
equal_groups: Optional[List[List[Callable[[pa.DataType], bool]]]] = None,
) -> bool:
"""Check if the two schemas are equal or similar
:param other: a schema like object
:param equal_groups: a list of list of functions to check if two types
are equal, default None
:return: True if the two schemas are equal
.. admonition:: Examples
.. code-block:: python
s = Schema("a:int,b:str")
assert s.is_like("a:int,b:str")
assert not s.is_like("a:long,b:str")
assert s.is_like("a:long,b:str", equal_groups=[(pa.types.is_integer,)])
"""
if other is None:
return False
if other is self:
return True
if isinstance(other, Schema):
_other = other
elif isinstance(other, str):
if equal_groups is None:
return self.__repr__() == other
_other = Schema(other)
else:
_other = Schema(other)
return pa_schemas_equal(
self.pa_schema, _other.pa_schema, equal_groups=equal_groups
)
def append(self, obj: Any) -> "Schema": # noqa: C901
"""Append schema like object to the current schema. Only new columns
are allowed.
:raises SchemaError: if a column exists or is invalid or obj is not convertible
:return: the Schema object itself
"""
try:
if obj is None:
return self
elif isinstance(obj, pa.Field):
self[obj.name] = obj.type
elif isinstance(obj, str):
self._append_pa_schema(expression_to_schema(obj))
elif isinstance(obj, Dict):
for k, v in obj.items():
self[k] = v
elif isinstance(obj, pa.Schema):
self._append_pa_schema(obj)
elif isinstance(obj, pd.DataFrame):
self._append_pa_schema(PD_UTILS.to_schema(obj))
elif isinstance(obj, Tuple): # type: ignore
self[obj[0]] = obj[1]
elif isinstance(obj, List):
for x in obj:
self.append(x)
else:
raise SchemaError(f"Invalid schema to add {obj}")
return self
except SchemaError:
raise
except Exception as e:
raise SchemaError(str(e))
def remove( # noqa: C901
self,
obj: Any,
ignore_key_mismatch: bool = False,
require_type_match: bool = True,
ignore_type_mismatch: bool = False,
) -> "Schema":
"""Remove columns or schema from the schema
:param obj: one column name, a list/set of column names or
a schema like object
:param ignore_key_mismatch: if True, ignore the non-existing keys,
default False
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:return: a schema excluding the columns in ``obj``
"""
if obj is None:
return self.copy()
target = self
if isinstance(obj, str):
if ":" in obj: # expression
ps = expression_to_schema(obj)
pairs: List[Tuple[str, pa.DataType]] = list(zip(ps.names, ps.types))
else:
pairs = [(obj, None)] # single key
elif isinstance(obj, (pa.Schema, Schema)):
pairs = list(zip(obj.names, obj.types))
elif isinstance(obj, (List, Set)):
keys: List[str] = []
other: List[Any] = []
for x in obj:
if isinstance(x, str) and ":" not in x:
keys.append(x)
else:
other.append(x)
pairs = [(x, None) for x in keys]
for o in other:
target = target.remove(
o,
ignore_key_mismatch=ignore_key_mismatch,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
else:
return self.remove(
Schema(obj),
ignore_key_mismatch=ignore_key_mismatch,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
od = OrderedDict(target)
for k, v in pairs:
# k = k.strip()
# if k == "":
# continue
if k not in od:
if ignore_key_mismatch:
continue
raise SchemaError(
f"Can't remove {quote_name(k)} from {target} (not found)"
)
if v is None:
del od[k]
else:
tp = od[k].type
if not require_type_match or tp == v:
del od[k]
elif not ignore_type_mismatch:
raise SchemaError(
f"Unable to remove {k}:{v} from {self}, type mismatch"
)
return Schema(od)
def alter(self, subschema: Any) -> "Schema":
"""Alter the schema with a subschema
:param subschema: a schema like object
:return: the altered schema
"""
if subschema is None:
return self
sub = Schema(subschema)
assert_or_throw(
sub.names in self,
lambda: ValueError(f"{sub.names} are not all in {self}"),
)
return Schema([(k, sub.get(k, v)) for k, v in self.items()])
def extract( # noqa: C901
self,
obj: Any,
ignore_key_mismatch: bool = False,
require_type_match: bool = True,
ignore_type_mismatch: bool = False,
) -> "Schema":
"""Extract a sub schema from the schema based on the columns in ``obj``
:param obj: one column name, a list/set of column names or
a schema like object
:param ignore_key_mismatch: if True, ignore the non-existing keys,
default False
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:return: a sub-schema containing the columns in ``obj``
"""
if obj is None:
return Schema()
if isinstance(obj, str):
if ":" in obj: # expression
ps = expression_to_schema(obj)
pairs: List[Tuple[str, pa.DataType]] = list(zip(ps.names, ps.types))
else:
pairs = [(obj, None)] # single key
elif isinstance(obj, (pa.Schema, Schema)):
pairs = list(zip(obj.names, obj.types))
elif isinstance(obj, List):
fields: List[pa.Field] = []
for x in obj:
if isinstance(x, str) and ":" not in x:
if x not in self:
if not ignore_key_mismatch:
raise SchemaError(f"Can't extract {x} from {self}")
else:
fields.append(self[x])
else:
fields += self.extract(
x,
ignore_key_mismatch=ignore_key_mismatch,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
).fields
return Schema(pa.schema(fields))
else:
return self.extract(
Schema(obj),
ignore_key_mismatch=ignore_key_mismatch,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
fields = []
for k, v in pairs:
if k not in self:
if ignore_key_mismatch:
continue
raise SchemaError(f"Can't extract {k} from {self}")
if v is None:
fields.append(self[k])
else:
tp = self[k].type
if not require_type_match or tp == v:
fields.append(self[k])
elif not ignore_type_mismatch:
raise SchemaError(
f"Unable to extract {k}:{v} from {self}, type mismatch"
)
return Schema(pa.schema(fields))
def exclude(
self,
other: Any,
require_type_match: bool = True,
ignore_type_mismatch: bool = False,
) -> "Schema":
"""Exclude columns from the current schema which are also in ``other``.
``other`` can contain columns that are not in the current schema, they
will be ignored.
:param other: one column name, a list/set of column names or
a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:return: a schema excluding the columns in ``other``
"""
return self.remove(
other,
ignore_key_mismatch=True,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
def intersect(
self,
other: Any,
require_type_match: bool = True,
ignore_type_mismatch: bool = True,
use_other_order: bool = False,
) -> "Schema":
"""Extract the sub-schema from the current schema which are also in
``other``. ``other`` can contain columns that are not in the current schema,
they will be ignored.
:param other: one column name, a list/set of column names or
a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:param use_other_order: if True, the output schema will use the column order
of ``other``, default False
:return: the intersected schema
"""
if not use_other_order:
diff = self.exclude(
other,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
return self - diff
else:
return self.extract(
other,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
def union(self, other: Any, require_type_match: bool = False) -> "Schema":
"""Union the ``other`` schema
:param other: a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:return: the new unioned schema
"""
return self.copy().union_with(other, require_type_match=require_type_match)
def union_with(self, other: Any, require_type_match: bool = False) -> "Schema":
"""Union the ``other`` schema into the current schema
:param other: a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:return: the current schema
"""
if not isinstance(other, Schema):
other = Schema(other)
try:
other = other.exclude(
self, require_type_match=require_type_match, ignore_type_mismatch=False
)
self += other
return self
except Exception as e:
raise SchemaError(f"Unable to union {self} with {other}: {str(e)}")
def rename(self, columns: Dict[str, str], ignore_missing: bool = False) -> "Schema":
"""Rename the current schema and generate a new one
:param columns: dictionary to map from old to new column names
:return: renamed schema object
"""
if not ignore_missing:
for x in columns:
if x not in self:
raise SchemaError(f"Failed to rename: {x} not in {self}")
pairs = [
(k if k not in columns else columns[k], v.type) for k, v in self.items()
]
return Schema(pairs)
def transform(self, *args: Any, **kwargs: Any) -> "Schema":
"""Transform the current schema to a new schema
:raises SchemaError: if there is any exception
:return: transformed schema
.. admonition:: Examples
.. code-block:: python
s=Schema("a:int,b:int,c:str")
s.transform("x:str") # x:str
# add
s.transform("*,x:str") # a:int,b:int,c:str,x:str
s.transform("*","x:str") # a:int,b:int,c:str,x:str
s.transform("*",x=str) # a:int,b:int,c:str,x:str
# subtract
s.transform("*-c,a") # b:int
s.transform("*-c-a") # b:int
s.transform("*~c,a,x") # b:int # ~ means exlcude if exists
s.transform("*~c~a~x") # b:int # ~ means exlcude if exists
# + means overwrite existing and append new
s.transform("*+e:str,b:str,d:str") # a:int,b:str,c:str,e:str,d:str
# you can have multiple operations
s.transform("*+b:str-a") # b:str,c:str
# callable
s.transform(lambda s:s.fields[0]) # a:int
s.transform(lambda s:s.fields[0], lambda s:s.fields[2]) # a:int,c:str
"""
try:
result = Schema()
for a in args:
if callable(a):
result += a(self)
elif isinstance(a, str):
op_pos = [x[0] for x in safe_search_out_of_quote(a, "-~+")]
op_pos.append(len(a))
s = Schema(
safe_replace_out_of_quote(a[: op_pos[0]], "*", str(self))
)
for i in range(0, len(op_pos) - 1):
op, expr = a[op_pos[i]], a[(op_pos[i] + 1) : op_pos[i + 1]]
if op in ["-", "~"]:
cols = safe_split_and_unquote(
expr, ",", on_unquoted_empty="ignore"
)
s = s.exclude(cols) if op == "~" else s - cols
else: # +
overwrite = Schema(expr)
s = Schema(
[(k, overwrite.get(k, v)) for k, v in s.items()]
).union_with(overwrite)
result += s
else:
result += a
return result + Schema(kwargs)
except SchemaError:
raise
except Exception as e:
raise SchemaError(e)
def create_empty_arrow_table(self) -> pa.Table:
"""Create an empty pyarrow table based on the schema"""
if not hasattr(pa.Table, "from_pylist"): # pragma: no cover
arr = [pa.array([])] * len(self)
return pa.Table.from_arrays(arr, schema=self.pa_schema)
return pa.Table.from_pylist([], schema=self.pa_schema)
def create_empty_pandas_df(
self, use_extension_types: bool = False, use_arrow_dtype: bool = False
) -> pd.DataFrame:
"""Create an empty pandas dataframe based on the schema
:param use_extension_types: if True, use pandas extension types,
default False
:param use_arrow_dtype: if True and when pandas supports ``ArrowDType``,
use pyarrow types, default False
:return: empty pandas dataframe
"""
dtypes = self.to_pandas_dtype(
use_extension_types=use_extension_types,
use_arrow_dtype=use_arrow_dtype,
)
return pd.DataFrame({k: pd.Series(dtype=v) for k, v in dtypes.items()})
def _pre_update(self, op: str, need_reindex: bool = True) -> None:
if op == "__setitem__":
super()._pre_update(op, need_reindex)
else:
raise SchemaError(f"{op} is not allowed in Schema")
def _append_pa_schema(self, other: pa.Schema) -> "Schema":
for k, v in zip(other.names, other.types):
self[k] = v
return self
def _validate_field(self, field: pa.Field) -> pa.Field:
assert_or_throw(
is_supported(field.type), SchemaError(f"{field} type is not supported")
)
return field
|
(*args: Any, **kwargs: Any)
|
68,813 |
triad.collections.schema
|
__add__
|
Add a schema like object to the current schema
:param obj: a schema like object
.. admonition:: Examples
.. code-block:: python
s = Schema("a:int,b:str")
s = s + "c:long" + ("d",int)
|
def __add__(self, obj: Any) -> "Schema":
"""Add a schema like object to the current schema
:param obj: a schema like object
.. admonition:: Examples
.. code-block:: python
s = Schema("a:int,b:str")
s = s + "c:long" + ("d",int)
"""
return self.copy().append(obj)
|
(self, obj: Any) -> triad.collections.schema.Schema
|
68,814 |
triad.collections.schema
|
__contains__
|
Check if the schema contains the key.
:param key: a column name, a list of column names, a data
type like object or a schema like object
:return: True if the schema contains the object
|
def __contains__(self, key: Any) -> bool: # noqa: C901
"""Check if the schema contains the key.
:param key: a column name, a list of column names, a data
type like object or a schema like object
:return: True if the schema contains the object
"""
if key is None:
return False
if isinstance(key, str):
if ":" not in key:
return super().__contains__(key)
elif isinstance(key, pa.Field):
res = super().get(key.name, None)
if res is None:
return False
return key.type == res.type
elif isinstance(key, Schema):
for f in key.values():
if f not in self:
return False
return True
elif isinstance(key, List):
for f in key:
if f not in self:
return False
return True
return Schema(key) in self
|
(self, key: Any) -> bool
|
68,818 |
triad.collections.schema
|
__eq__
|
Check if the two schemas are equal
:param other: a schema like object
:return: True if the two schemas are equal
|
def __eq__(self, other: Any) -> bool:
"""Check if the two schemas are equal
:param other: a schema like object
:return: True if the two schemas are equal
"""
return self.is_like(other)
|
(self, other: Any) -> bool
|
68,819 |
triad.collections.schema
|
__iadd__
|
Append a schema like object to the current schema
:param obj: a schema like object
.. admonition:: Examples
.. code-block:: python
s = Schema("a:int,b:str")
s += "c:long"
s += ("d",int)
|
def __iadd__(self, obj: Any) -> "Schema":
"""Append a schema like object to the current schema
:param obj: a schema like object
.. admonition:: Examples
.. code-block:: python
s = Schema("a:int,b:str")
s += "c:long"
s += ("d",int)
"""
return self.append(obj)
|
(self, obj: Any) -> triad.collections.schema.Schema
|
68,820 |
triad.collections.schema
|
__init__
| null |
def __init__(self, *args: Any, **kwargs: Any):
if len(args) > 0 and len(kwargs) > 0:
raise SchemaError("Can't set both *args and **kwargs")
if len(args) == 1: # duplicate code for better performance
if isinstance(args[0], Schema):
super().__init__(args[0]) # type: ignore
return
fields: Optional[List[pa.Field]] = None
if isinstance(args[0], str):
fields = list(expression_to_schema(args[0]))
if isinstance(args[0], pa.Schema):
fields = list(args[0])
if isinstance(args[0], pa.Field):
fields = [args[0]]
if fields is not None:
fields = [self._validate_field(f) for f in fields]
super().__init__([(x.name, x) for x in fields])
return
super().__init__()
if len(args) > 0:
self.append(list(args))
elif len(kwargs) > 0:
self.append(kwargs)
|
(self, *args: Any, **kwargs: Any)
|
68,821 |
triad.collections.schema
|
__isub__
|
Remove columns from a schema is not allowed
|
def __isub__(self, obj: Any) -> "Schema":
"""Remove columns from a schema is not allowed"""
raise SchemaError("'-=' is not allowed for Schema")
|
(self, obj: Any) -> triad.collections.schema.Schema
|
68,822 |
triad.collections.schema
|
__repr__
|
Convert to the string representation of the schema
|
def __repr__(self) -> str:
"""Convert to the string representation of the schema"""
return schema_to_expression(self.pyarrow_schema)
|
(self) -> str
|
68,823 |
triad.collections.schema
|
__setitem__
| null |
def __setitem__( # type: ignore
self, name: str, value: Any, *args: List[Any], **kwds: Dict[str, Any]
) -> None:
assert_arg_not_none(value, "value")
if name in self: # update existing value is not allowed
raise SchemaError(f"{name} already exists in {self}")
if isinstance(value, pa.Field):
assert_or_throw(
name == value.name, SchemaError(f"{name} doesn't match {value}")
)
elif isinstance(value, pa.DataType):
value = pa.field(name, value)
else:
value = pa.field(name, to_pa_datatype(value))
assert_or_throw(
is_supported(value.type), SchemaError(f"{value} is not supported")
)
super().__setitem__(name, value, *args, **kwds) # type: ignore
|
(self, name: str, value: Any, *args: List[Any], **kwds: Dict[str, Any]) -> NoneType
|
68,825 |
triad.collections.schema
|
__sub__
|
Remove columns or schema from the schema.
:param obj: one column name, a list/set of column names or
a schema like object
:return: a schema excluding the columns in ``obj``
.. note::
If ``obj`` violates any of the following conditions, ``SchemaError``
will be raised:
* all columns in ``obj`` must be found in the schema
* If ``obj`` is a schema like object, the types must also match
|
def __sub__(self, obj: Any) -> "Schema":
"""Remove columns or schema from the schema.
:param obj: one column name, a list/set of column names or
a schema like object
:return: a schema excluding the columns in ``obj``
.. note::
If ``obj`` violates any of the following conditions, ``SchemaError``
will be raised:
* all columns in ``obj`` must be found in the schema
* If ``obj`` is a schema like object, the types must also match
"""
return self.remove(
obj,
ignore_key_mismatch=False,
require_type_match=True,
ignore_type_mismatch=False,
)
|
(self, obj: Any) -> triad.collections.schema.Schema
|
68,826 |
triad.collections.schema
|
_append_pa_schema
| null |
def _append_pa_schema(self, other: pa.Schema) -> "Schema":
for k, v in zip(other.names, other.types):
self[k] = v
return self
|
(self, other: pyarrow.lib.Schema) -> triad.collections.schema.Schema
|
68,828 |
triad.collections.schema
|
_pre_update
| null |
def _pre_update(self, op: str, need_reindex: bool = True) -> None:
if op == "__setitem__":
super()._pre_update(op, need_reindex)
else:
raise SchemaError(f"{op} is not allowed in Schema")
|
(self, op: str, need_reindex: bool = True) -> NoneType
|
68,829 |
triad.collections.schema
|
_validate_field
| null |
def _validate_field(self, field: pa.Field) -> pa.Field:
assert_or_throw(
is_supported(field.type), SchemaError(f"{field} type is not supported")
)
return field
|
(self, field: pyarrow.lib.Field) -> pyarrow.lib.Field
|
68,830 |
triad.collections.schema
|
alter
|
Alter the schema with a subschema
:param subschema: a schema like object
:return: the altered schema
|
def alter(self, subschema: Any) -> "Schema":
"""Alter the schema with a subschema
:param subschema: a schema like object
:return: the altered schema
"""
if subschema is None:
return self
sub = Schema(subschema)
assert_or_throw(
sub.names in self,
lambda: ValueError(f"{sub.names} are not all in {self}"),
)
return Schema([(k, sub.get(k, v)) for k, v in self.items()])
|
(self, subschema: Any) -> triad.collections.schema.Schema
|
68,831 |
triad.collections.schema
|
append
|
Append schema like object to the current schema. Only new columns
are allowed.
:raises SchemaError: if a column exists or is invalid or obj is not convertible
:return: the Schema object itself
|
def append(self, obj: Any) -> "Schema": # noqa: C901
"""Append schema like object to the current schema. Only new columns
are allowed.
:raises SchemaError: if a column exists or is invalid or obj is not convertible
:return: the Schema object itself
"""
try:
if obj is None:
return self
elif isinstance(obj, pa.Field):
self[obj.name] = obj.type
elif isinstance(obj, str):
self._append_pa_schema(expression_to_schema(obj))
elif isinstance(obj, Dict):
for k, v in obj.items():
self[k] = v
elif isinstance(obj, pa.Schema):
self._append_pa_schema(obj)
elif isinstance(obj, pd.DataFrame):
self._append_pa_schema(PD_UTILS.to_schema(obj))
elif isinstance(obj, Tuple): # type: ignore
self[obj[0]] = obj[1]
elif isinstance(obj, List):
for x in obj:
self.append(x)
else:
raise SchemaError(f"Invalid schema to add {obj}")
return self
except SchemaError:
raise
except Exception as e:
raise SchemaError(str(e))
|
(self, obj: Any) -> triad.collections.schema.Schema
|
68,832 |
triad.collections.schema
|
assert_not_empty
|
Raise exception if schema is empty
|
def assert_not_empty(self) -> "Schema":
"""Raise exception if schema is empty"""
if len(self) > 0:
return self
raise SchemaError("Schema can't be empty")
|
(self) -> triad.collections.schema.Schema
|
68,834 |
triad.collections.schema
|
copy
|
Clone Schema object
:return: cloned object
|
def copy(self) -> "Schema":
"""Clone Schema object
:return: cloned object
"""
other = super().copy()
assert isinstance(other, Schema)
return other
|
(self) -> triad.collections.schema.Schema
|
68,835 |
triad.collections.schema
|
create_empty_arrow_table
|
Create an empty pyarrow table based on the schema
|
def create_empty_arrow_table(self) -> pa.Table:
"""Create an empty pyarrow table based on the schema"""
if not hasattr(pa.Table, "from_pylist"): # pragma: no cover
arr = [pa.array([])] * len(self)
return pa.Table.from_arrays(arr, schema=self.pa_schema)
return pa.Table.from_pylist([], schema=self.pa_schema)
|
(self) -> pyarrow.lib.Table
|
68,836 |
triad.collections.schema
|
create_empty_pandas_df
|
Create an empty pandas dataframe based on the schema
:param use_extension_types: if True, use pandas extension types,
default False
:param use_arrow_dtype: if True and when pandas supports ``ArrowDType``,
use pyarrow types, default False
:return: empty pandas dataframe
|
def create_empty_pandas_df(
self, use_extension_types: bool = False, use_arrow_dtype: bool = False
) -> pd.DataFrame:
"""Create an empty pandas dataframe based on the schema
:param use_extension_types: if True, use pandas extension types,
default False
:param use_arrow_dtype: if True and when pandas supports ``ArrowDType``,
use pyarrow types, default False
:return: empty pandas dataframe
"""
dtypes = self.to_pandas_dtype(
use_extension_types=use_extension_types,
use_arrow_dtype=use_arrow_dtype,
)
return pd.DataFrame({k: pd.Series(dtype=v) for k, v in dtypes.items()})
|
(self, use_extension_types: bool = False, use_arrow_dtype: bool = False) -> pandas.core.frame.DataFrame
|
68,838 |
triad.collections.schema
|
exclude
|
Exclude columns from the current schema which are also in ``other``.
``other`` can contain columns that are not in the current schema, they
will be ignored.
:param other: one column name, a list/set of column names or
a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:return: a schema excluding the columns in ``other``
|
def exclude(
self,
other: Any,
require_type_match: bool = True,
ignore_type_mismatch: bool = False,
) -> "Schema":
"""Exclude columns from the current schema which are also in ``other``.
``other`` can contain columns that are not in the current schema, they
will be ignored.
:param other: one column name, a list/set of column names or
a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:return: a schema excluding the columns in ``other``
"""
return self.remove(
other,
ignore_key_mismatch=True,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
|
(self, other: Any, require_type_match: bool = True, ignore_type_mismatch: bool = False) -> triad.collections.schema.Schema
|
68,839 |
triad.collections.schema
|
extract
|
Extract a sub schema from the schema based on the columns in ``obj``
:param obj: one column name, a list/set of column names or
a schema like object
:param ignore_key_mismatch: if True, ignore the non-existing keys,
default False
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:return: a sub-schema containing the columns in ``obj``
|
def extract( # noqa: C901
self,
obj: Any,
ignore_key_mismatch: bool = False,
require_type_match: bool = True,
ignore_type_mismatch: bool = False,
) -> "Schema":
"""Extract a sub schema from the schema based on the columns in ``obj``
:param obj: one column name, a list/set of column names or
a schema like object
:param ignore_key_mismatch: if True, ignore the non-existing keys,
default False
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:return: a sub-schema containing the columns in ``obj``
"""
if obj is None:
return Schema()
if isinstance(obj, str):
if ":" in obj: # expression
ps = expression_to_schema(obj)
pairs: List[Tuple[str, pa.DataType]] = list(zip(ps.names, ps.types))
else:
pairs = [(obj, None)] # single key
elif isinstance(obj, (pa.Schema, Schema)):
pairs = list(zip(obj.names, obj.types))
elif isinstance(obj, List):
fields: List[pa.Field] = []
for x in obj:
if isinstance(x, str) and ":" not in x:
if x not in self:
if not ignore_key_mismatch:
raise SchemaError(f"Can't extract {x} from {self}")
else:
fields.append(self[x])
else:
fields += self.extract(
x,
ignore_key_mismatch=ignore_key_mismatch,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
).fields
return Schema(pa.schema(fields))
else:
return self.extract(
Schema(obj),
ignore_key_mismatch=ignore_key_mismatch,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
fields = []
for k, v in pairs:
if k not in self:
if ignore_key_mismatch:
continue
raise SchemaError(f"Can't extract {k} from {self}")
if v is None:
fields.append(self[k])
else:
tp = self[k].type
if not require_type_match or tp == v:
fields.append(self[k])
elif not ignore_type_mismatch:
raise SchemaError(
f"Unable to extract {k}:{v} from {self}, type mismatch"
)
return Schema(pa.schema(fields))
|
(self, obj: Any, ignore_key_mismatch: bool = False, require_type_match: bool = True, ignore_type_mismatch: bool = False) -> triad.collections.schema.Schema
|
68,844 |
triad.collections.schema
|
intersect
|
Extract the sub-schema from the current schema which are also in
``other``. ``other`` can contain columns that are not in the current schema,
they will be ignored.
:param other: one column name, a list/set of column names or
a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:param use_other_order: if True, the output schema will use the column order
of ``other``, default False
:return: the intersected schema
|
def intersect(
self,
other: Any,
require_type_match: bool = True,
ignore_type_mismatch: bool = True,
use_other_order: bool = False,
) -> "Schema":
"""Extract the sub-schema from the current schema which are also in
``other``. ``other`` can contain columns that are not in the current schema,
they will be ignored.
:param other: one column name, a list/set of column names or
a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:param use_other_order: if True, the output schema will use the column order
of ``other``, default False
:return: the intersected schema
"""
if not use_other_order:
diff = self.exclude(
other,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
return self - diff
else:
return self.extract(
other,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
|
(self, other: Any, require_type_match: bool = True, ignore_type_mismatch: bool = True, use_other_order: bool = False) -> triad.collections.schema.Schema
|
68,845 |
triad.collections.schema
|
is_like
|
Check if the two schemas are equal or similar
:param other: a schema like object
:param equal_groups: a list of list of functions to check if two types
are equal, default None
:return: True if the two schemas are equal
.. admonition:: Examples
.. code-block:: python
s = Schema("a:int,b:str")
assert s.is_like("a:int,b:str")
assert not s.is_like("a:long,b:str")
assert s.is_like("a:long,b:str", equal_groups=[(pa.types.is_integer,)])
|
def is_like(
self,
other: Any,
equal_groups: Optional[List[List[Callable[[pa.DataType], bool]]]] = None,
) -> bool:
"""Check if the two schemas are equal or similar
:param other: a schema like object
:param equal_groups: a list of list of functions to check if two types
are equal, default None
:return: True if the two schemas are equal
.. admonition:: Examples
.. code-block:: python
s = Schema("a:int,b:str")
assert s.is_like("a:int,b:str")
assert not s.is_like("a:long,b:str")
assert s.is_like("a:long,b:str", equal_groups=[(pa.types.is_integer,)])
"""
if other is None:
return False
if other is self:
return True
if isinstance(other, Schema):
_other = other
elif isinstance(other, str):
if equal_groups is None:
return self.__repr__() == other
_other = Schema(other)
else:
_other = Schema(other)
return pa_schemas_equal(
self.pa_schema, _other.pa_schema, equal_groups=equal_groups
)
|
(self, other: Any, equal_groups: Optional[List[List[Callable[[pyarrow.lib.DataType], bool]]]] = None) -> bool
|
68,850 |
triad.collections.schema
|
remove
|
Remove columns or schema from the schema
:param obj: one column name, a list/set of column names or
a schema like object
:param ignore_key_mismatch: if True, ignore the non-existing keys,
default False
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:return: a schema excluding the columns in ``obj``
|
def remove( # noqa: C901
self,
obj: Any,
ignore_key_mismatch: bool = False,
require_type_match: bool = True,
ignore_type_mismatch: bool = False,
) -> "Schema":
"""Remove columns or schema from the schema
:param obj: one column name, a list/set of column names or
a schema like object
:param ignore_key_mismatch: if True, ignore the non-existing keys,
default False
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:param ignore_type_mismatch: if False, when keys match but types don't
(if ``obj`` contains type), raise an exception ``SchemaError``,
default False
:return: a schema excluding the columns in ``obj``
"""
if obj is None:
return self.copy()
target = self
if isinstance(obj, str):
if ":" in obj: # expression
ps = expression_to_schema(obj)
pairs: List[Tuple[str, pa.DataType]] = list(zip(ps.names, ps.types))
else:
pairs = [(obj, None)] # single key
elif isinstance(obj, (pa.Schema, Schema)):
pairs = list(zip(obj.names, obj.types))
elif isinstance(obj, (List, Set)):
keys: List[str] = []
other: List[Any] = []
for x in obj:
if isinstance(x, str) and ":" not in x:
keys.append(x)
else:
other.append(x)
pairs = [(x, None) for x in keys]
for o in other:
target = target.remove(
o,
ignore_key_mismatch=ignore_key_mismatch,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
else:
return self.remove(
Schema(obj),
ignore_key_mismatch=ignore_key_mismatch,
require_type_match=require_type_match,
ignore_type_mismatch=ignore_type_mismatch,
)
od = OrderedDict(target)
for k, v in pairs:
# k = k.strip()
# if k == "":
# continue
if k not in od:
if ignore_key_mismatch:
continue
raise SchemaError(
f"Can't remove {quote_name(k)} from {target} (not found)"
)
if v is None:
del od[k]
else:
tp = od[k].type
if not require_type_match or tp == v:
del od[k]
elif not ignore_type_mismatch:
raise SchemaError(
f"Unable to remove {k}:{v} from {self}, type mismatch"
)
return Schema(od)
|
(self, obj: Any, ignore_key_mismatch: bool = False, require_type_match: bool = True, ignore_type_mismatch: bool = False) -> triad.collections.schema.Schema
|
68,851 |
triad.collections.schema
|
rename
|
Rename the current schema and generate a new one
:param columns: dictionary to map from old to new column names
:return: renamed schema object
|
def rename(self, columns: Dict[str, str], ignore_missing: bool = False) -> "Schema":
"""Rename the current schema and generate a new one
:param columns: dictionary to map from old to new column names
:return: renamed schema object
"""
if not ignore_missing:
for x in columns:
if x not in self:
raise SchemaError(f"Failed to rename: {x} not in {self}")
pairs = [
(k if k not in columns else columns[k], v.type) for k, v in self.items()
]
return Schema(pairs)
|
(self, columns: Dict[str, str], ignore_missing: bool = False) -> triad.collections.schema.Schema
|
68,854 |
triad.collections.schema
|
to_pandas_dtype
|
Convert as `dtype` dict for pandas dataframes.
:param use_extension_types: if True, use pandas extension types,
default False
:param use_arrow_dtype: if True and when pandas supports ``ArrowDType``,
use pyarrow types, default False
.. note::
* If ``use_extension_types`` is False and ``use_arrow_dtype`` is True,
it converts all types to ``ArrowDType``
* If both are true, it converts types to the numpy backend nullable
dtypes if possible, otherwise, it converts to ``ArrowDType``
|
def to_pandas_dtype(
self, use_extension_types: bool = False, use_arrow_dtype: bool = False
) -> Dict[str, np.dtype]:
"""Convert as `dtype` dict for pandas dataframes.
:param use_extension_types: if True, use pandas extension types,
default False
:param use_arrow_dtype: if True and when pandas supports ``ArrowDType``,
use pyarrow types, default False
.. note::
* If ``use_extension_types`` is False and ``use_arrow_dtype`` is True,
it converts all types to ``ArrowDType``
* If both are true, it converts types to the numpy backend nullable
dtypes if possible, otherwise, it converts to ``ArrowDType``
"""
return to_pandas_dtype(
self.pa_schema,
use_extension_types=use_extension_types,
use_arrow_dtype=use_arrow_dtype,
)
|
(self, use_extension_types: bool = False, use_arrow_dtype: bool = False) -> Dict[str, numpy.dtype]
|
68,855 |
triad.collections.schema
|
transform
|
Transform the current schema to a new schema
:raises SchemaError: if there is any exception
:return: transformed schema
.. admonition:: Examples
.. code-block:: python
s=Schema("a:int,b:int,c:str")
s.transform("x:str") # x:str
# add
s.transform("*,x:str") # a:int,b:int,c:str,x:str
s.transform("*","x:str") # a:int,b:int,c:str,x:str
s.transform("*",x=str) # a:int,b:int,c:str,x:str
# subtract
s.transform("*-c,a") # b:int
s.transform("*-c-a") # b:int
s.transform("*~c,a,x") # b:int # ~ means exlcude if exists
s.transform("*~c~a~x") # b:int # ~ means exlcude if exists
# + means overwrite existing and append new
s.transform("*+e:str,b:str,d:str") # a:int,b:str,c:str,e:str,d:str
# you can have multiple operations
s.transform("*+b:str-a") # b:str,c:str
# callable
s.transform(lambda s:s.fields[0]) # a:int
s.transform(lambda s:s.fields[0], lambda s:s.fields[2]) # a:int,c:str
|
def transform(self, *args: Any, **kwargs: Any) -> "Schema":
"""Transform the current schema to a new schema
:raises SchemaError: if there is any exception
:return: transformed schema
.. admonition:: Examples
.. code-block:: python
s=Schema("a:int,b:int,c:str")
s.transform("x:str") # x:str
# add
s.transform("*,x:str") # a:int,b:int,c:str,x:str
s.transform("*","x:str") # a:int,b:int,c:str,x:str
s.transform("*",x=str) # a:int,b:int,c:str,x:str
# subtract
s.transform("*-c,a") # b:int
s.transform("*-c-a") # b:int
s.transform("*~c,a,x") # b:int # ~ means exlcude if exists
s.transform("*~c~a~x") # b:int # ~ means exlcude if exists
# + means overwrite existing and append new
s.transform("*+e:str,b:str,d:str") # a:int,b:str,c:str,e:str,d:str
# you can have multiple operations
s.transform("*+b:str-a") # b:str,c:str
# callable
s.transform(lambda s:s.fields[0]) # a:int
s.transform(lambda s:s.fields[0], lambda s:s.fields[2]) # a:int,c:str
"""
try:
result = Schema()
for a in args:
if callable(a):
result += a(self)
elif isinstance(a, str):
op_pos = [x[0] for x in safe_search_out_of_quote(a, "-~+")]
op_pos.append(len(a))
s = Schema(
safe_replace_out_of_quote(a[: op_pos[0]], "*", str(self))
)
for i in range(0, len(op_pos) - 1):
op, expr = a[op_pos[i]], a[(op_pos[i] + 1) : op_pos[i + 1]]
if op in ["-", "~"]:
cols = safe_split_and_unquote(
expr, ",", on_unquoted_empty="ignore"
)
s = s.exclude(cols) if op == "~" else s - cols
else: # +
overwrite = Schema(expr)
s = Schema(
[(k, overwrite.get(k, v)) for k, v in s.items()]
).union_with(overwrite)
result += s
else:
result += a
return result + Schema(kwargs)
except SchemaError:
raise
except Exception as e:
raise SchemaError(e)
|
(self, *args: Any, **kwargs: Any) -> triad.collections.schema.Schema
|
68,856 |
triad.collections.schema
|
union
|
Union the ``other`` schema
:param other: a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:return: the new unioned schema
|
def union(self, other: Any, require_type_match: bool = False) -> "Schema":
"""Union the ``other`` schema
:param other: a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:return: the new unioned schema
"""
return self.copy().union_with(other, require_type_match=require_type_match)
|
(self, other: Any, require_type_match: bool = False) -> triad.collections.schema.Schema
|
68,857 |
triad.collections.schema
|
union_with
|
Union the ``other`` schema into the current schema
:param other: a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:return: the current schema
|
def union_with(self, other: Any, require_type_match: bool = False) -> "Schema":
"""Union the ``other`` schema into the current schema
:param other: a schema like object
:param require_type_match: if True, a match requires the same key
and same type (if ``obj`` contains type), otherwise, only the
key needs to match, default True
:return: the current schema
"""
if not isinstance(other, Schema):
other = Schema(other)
try:
other = other.exclude(
self, require_type_match=require_type_match, ignore_type_mismatch=False
)
self += other
return self
except Exception as e:
raise SchemaError(f"Unable to union {self} with {other}: {str(e)}")
|
(self, other: Any, require_type_match: bool = False) -> triad.collections.schema.Schema
|
68,858 |
triad.utils.threading
|
SerializableRLock
|
A serialization safe wrapper of :external+python:class:`threading.RLock`
|
class SerializableRLock:
"""A serialization safe wrapper of :external+python:class:`threading.RLock`"""
def __init__(self):
self._lock = RLock()
def __enter__(self) -> Any:
return self._lock.__enter__()
def __exit__(
self, exception_type: Any, exception_value: Any, exception_traceback: Any
) -> Any:
return self._lock.__exit__(exception_type, exception_value, exception_traceback)
def __getstate__(self) -> Dict[str, Any]:
return {}
def __setstate__(self, data: Dict[str, Any]) -> None:
self._lock = RLock()
|
()
|
68,859 |
triad.utils.threading
|
__enter__
| null |
def __enter__(self) -> Any:
return self._lock.__enter__()
|
(self) -> Any
|
68,860 |
triad.utils.threading
|
__exit__
| null |
def __exit__(
self, exception_type: Any, exception_value: Any, exception_traceback: Any
) -> Any:
return self._lock.__exit__(exception_type, exception_value, exception_traceback)
|
(self, exception_type: Any, exception_value: Any, exception_traceback: Any) -> Any
|
68,861 |
triad.utils.threading
|
__getstate__
| null |
def __getstate__(self) -> Dict[str, Any]:
return {}
|
(self) -> Dict[str, Any]
|
68,862 |
triad.utils.threading
|
__init__
| null |
def __init__(self):
self._lock = RLock()
|
(self)
|
68,863 |
triad.utils.threading
|
__setstate__
| null |
def __setstate__(self, data: Dict[str, Any]) -> None:
self._lock = RLock()
|
(self, data: Dict[str, Any]) -> NoneType
|
68,864 |
triad.utils.assertion
|
assert_arg_not_none
|
Assert an argument is not None, otherwise raise exception
:param obj: argument value
:param arg_name: argument name, if None or empty, it will use `msg`
:param msg: only when `arg_name` is None or empty, this value is used
:raises NoneArgumentError: with `arg_name` or `msg`
|
def assert_arg_not_none(obj: Any, arg_name: str = "", msg: str = "") -> None:
"""Assert an argument is not None, otherwise raise exception
:param obj: argument value
:param arg_name: argument name, if None or empty, it will use `msg`
:param msg: only when `arg_name` is None or empty, this value is used
:raises NoneArgumentError: with `arg_name` or `msg`
"""
if obj is None:
if arg_name != "" and arg_name is not None:
msg = f"{arg_name} can't be None"
msg = msg or ""
raise NoneArgumentError(msg)
|
(obj: Any, arg_name: str = '', msg: str = '') -> NoneType
|
68,865 |
triad.utils.assertion
|
assert_or_throw
|
Assert on expression and throw custom exception
:param bool_exp: boolean expression to assert on
:param exception: a custom Exception instance, or any other object that
will be stringfied and instantiate an AssertionError, or a function
that can generate the supported data types
.. admonition:: Examples
.. code-block:: python
assert_or_throw(True, "assertion error")
assert_or_throw(False) # raise AssertionError
assert_or_throw(False, "assertion error") # raise AssertionError
assert_or_throw(False, TypeError("assertion error")) # raise TypeError
# Lazy evaluations is useful when constructing the error
# itself is expensive or error-prone. With lazy evaluations, happy
# path will be fast and error free.
def fail(): # a function that is slow and wrong
sleep(10)
raise TypeError
assert_or_throw(True, fail()) # (unexpectedly) raise TypeError
assert_or_throw(True, fail) # no exception
assert_or_throw(True, lambda: "a" + fail()) # no exception
assert_or_throw(False, lambda: "a" + fail()) # raise TypeError
|
def assert_or_throw(bool_exp: bool, exception: Any = None) -> None:
"""Assert on expression and throw custom exception
:param bool_exp: boolean expression to assert on
:param exception: a custom Exception instance, or any other object that
will be stringfied and instantiate an AssertionError, or a function
that can generate the supported data types
.. admonition:: Examples
.. code-block:: python
assert_or_throw(True, "assertion error")
assert_or_throw(False) # raise AssertionError
assert_or_throw(False, "assertion error") # raise AssertionError
assert_or_throw(False, TypeError("assertion error")) # raise TypeError
# Lazy evaluations is useful when constructing the error
# itself is expensive or error-prone. With lazy evaluations, happy
# path will be fast and error free.
def fail(): # a function that is slow and wrong
sleep(10)
raise TypeError
assert_or_throw(True, fail()) # (unexpectedly) raise TypeError
assert_or_throw(True, fail) # no exception
assert_or_throw(True, lambda: "a" + fail()) # no exception
assert_or_throw(False, lambda: "a" + fail()) # raise TypeError
"""
if not bool_exp:
_exception: Any = exception
if callable(exception):
_exception = exception()
if _exception is None:
raise AssertionError()
if isinstance(_exception, Exception):
raise _exception
if isinstance(_exception, str):
raise AssertionError(_exception)
raise AssertionError(str(_exception))
|
(bool_exp: bool, exception: Optional[Any] = None) -> NoneType
|
68,867 |
triad.utils.dispatcher
|
conditional_broadcaster
|
Decorating a conditional broadcaster that will run **all** registered functions in
other modules/packages.
.. admonition:: Examples
Assume in ``pkg1.module1``, you have:
.. code-block:: python
from triad import conditional_broadcaster
@conditional_broadcaster(entry_point="my.plugins")
def myprint(obj):
raise NotImplementedError
@conditional_broadcaster(entry_point="my.plugins")
def myprint2(obj):
raise NotImplementedError
In another package ``pkg2``, in ``setup.py``, you define
an entry point as:
.. code-block:: python
setup(
...,
entry_points={
"my.plugins": [
"my = pkg2.module2"
]
},
)
And in ``pkg2.module2``:
.. code-block:: python
from pkg1.module1 import get_len
@myprint.candidate(lambda obj: isinstance(obj, str))
def myprinta(obj:str) -> None:
print(obj, "a")
@myprint.candidate(lambda obj: isinstance(obj, str) and obj == "x")
def myprintb(obj:str) -> None:
print(obj, "b")
Now, both functions will be automatically registered when ``pkg2``
is installed in the environement. In another ``pkg3``:
.. code-block:: python
from pkg1.module1 import get_len
myprint("x") # calling both myprinta and myprinta
myprint("y") # calling myprinta only
myprint2("x") # raise NotImplementedError due to no matching candidates
.. note::
Only when no matching candidate found, the implementation of the original
function will be used. If you don't want to throw an error, then use ``pass`` in
the original function instead.
.. seealso::
Please read :meth:`~.ConditionalDispatcher.candidate` for details about the
matching function and priority settings.
:param default_func: the function to decorate
:param entry_point: the entry point to preload dispatchers, defaults to None
|
def conditional_broadcaster(
default_func: Optional[Callable[..., Any]] = None, entry_point: Optional[str] = None
) -> Callable:
"""Decorating a conditional broadcaster that will run **all** registered functions in
other modules/packages.
.. admonition:: Examples
Assume in ``pkg1.module1``, you have:
.. code-block:: python
from triad import conditional_broadcaster
@conditional_broadcaster(entry_point="my.plugins")
def myprint(obj):
raise NotImplementedError
@conditional_broadcaster(entry_point="my.plugins")
def myprint2(obj):
raise NotImplementedError
In another package ``pkg2``, in ``setup.py``, you define
an entry point as:
.. code-block:: python
setup(
...,
entry_points={
"my.plugins": [
"my = pkg2.module2"
]
},
)
And in ``pkg2.module2``:
.. code-block:: python
from pkg1.module1 import get_len
@myprint.candidate(lambda obj: isinstance(obj, str))
def myprinta(obj:str) -> None:
print(obj, "a")
@myprint.candidate(lambda obj: isinstance(obj, str) and obj == "x")
def myprintb(obj:str) -> None:
print(obj, "b")
Now, both functions will be automatically registered when ``pkg2``
is installed in the environement. In another ``pkg3``:
.. code-block:: python
from pkg1.module1 import get_len
myprint("x") # calling both myprinta and myprinta
myprint("y") # calling myprinta only
myprint2("x") # raise NotImplementedError due to no matching candidates
.. note::
Only when no matching candidate found, the implementation of the original
function will be used. If you don't want to throw an error, then use ``pass`` in
the original function instead.
.. seealso::
Please read :meth:`~.ConditionalDispatcher.candidate` for details about the
matching function and priority settings.
:param default_func: the function to decorate
:param entry_point: the entry point to preload dispatchers, defaults to None
"""
return (
( # type: ignore
lambda func: ConditionalDispatcher(
func, is_broadcast=True, entry_point=entry_point
)
)
if default_func is None
else ConditionalDispatcher(
default_func, is_broadcast=True, entry_point=entry_point
)
)
|
(default_func: Optional[Callable[..., Any]] = None, entry_point: Optional[str] = None) -> Callable
|
68,868 |
triad.utils.dispatcher
|
conditional_dispatcher
|
Decorating a conditional dispatcher that will run the **first matching** registered
functions in other modules/packages. This is a more general solution compared to
``functools.singledispatch``. You can write arbitrary matching functions according
to all the inputs of the function.
.. admonition:: Examples
Assume in ``pkg1.module1``, you have:
.. code-block:: python
from triad import conditional_dispatcher
@conditional_dispatcher(entry_point="my.plugins")
def get_len(obj):
raise NotImplementedError
In another package ``pkg2``, in ``setup.py``, you define
an entry point as:
.. code-block:: python
setup(
...,
entry_points={
"my.plugins": [
"my = pkg2.module2"
]
},
)
And in ``pkg2.module2``:
.. code-block:: python
from pkg1.module1 import get_len
@get_len.candidate(lambda obj: isinstance(obj, str))
def get_str_len(obj:str) -> int:
return len(obj)
@get_len.candidate(lambda obj: isinstance(obj, int) and obj == 10)
def get_int_len(obj:int) -> int:
return obj
Now, both functions will be automatically registered when ``pkg2``
is installed in the environement. In another ``pkg3``:
.. code-block:: python
from pkg1.module1 import get_len
assert get_len("abc") == 3 # calling get_str_len
assert get_len(10) == 10 # calling get_int_len
get_len(20) # raise NotImplementedError due to no matching candidates
.. seealso::
Please read :meth:`~.ConditionalDispatcher.candidate` for details about the
matching function and priority settings.
:param default_func: the function to decorate
:param entry_point: the entry point to preload dispatchers, defaults to None
|
def conditional_dispatcher(
default_func: Optional[Callable[..., Any]] = None, entry_point: Optional[str] = None
) -> Callable:
"""Decorating a conditional dispatcher that will run the **first matching** registered
functions in other modules/packages. This is a more general solution compared to
``functools.singledispatch``. You can write arbitrary matching functions according
to all the inputs of the function.
.. admonition:: Examples
Assume in ``pkg1.module1``, you have:
.. code-block:: python
from triad import conditional_dispatcher
@conditional_dispatcher(entry_point="my.plugins")
def get_len(obj):
raise NotImplementedError
In another package ``pkg2``, in ``setup.py``, you define
an entry point as:
.. code-block:: python
setup(
...,
entry_points={
"my.plugins": [
"my = pkg2.module2"
]
},
)
And in ``pkg2.module2``:
.. code-block:: python
from pkg1.module1 import get_len
@get_len.candidate(lambda obj: isinstance(obj, str))
def get_str_len(obj:str) -> int:
return len(obj)
@get_len.candidate(lambda obj: isinstance(obj, int) and obj == 10)
def get_int_len(obj:int) -> int:
return obj
Now, both functions will be automatically registered when ``pkg2``
is installed in the environement. In another ``pkg3``:
.. code-block:: python
from pkg1.module1 import get_len
assert get_len("abc") == 3 # calling get_str_len
assert get_len(10) == 10 # calling get_int_len
get_len(20) # raise NotImplementedError due to no matching candidates
.. seealso::
Please read :meth:`~.ConditionalDispatcher.candidate` for details about the
matching function and priority settings.
:param default_func: the function to decorate
:param entry_point: the entry point to preload dispatchers, defaults to None
"""
return (
( # type: ignore
lambda func: ConditionalDispatcher(
func, is_broadcast=False, entry_point=entry_point
)
)
if default_func is None
else ConditionalDispatcher(
default_func, is_broadcast=False, entry_point=entry_point
)
)
|
(default_func: Optional[Callable[..., Any]] = None, entry_point: Optional[str] = None) -> Callable
|
68,871 |
triad.utils.class_extension
|
extensible_class
|
The decorator making classes extensible by external methods
:param class_type: the class under the decorator
:return: the ``class_type``
.. admonition:: Examples
.. code-block:: python
@extensible_class
class A:
# It's recommended to implement __getattr__ so that
# PyLint will not complain about the dynamically added methods
def __getattr__(self, name):
raise NotImplementedError
@extension_method
def method(obj:A):
return 1
assert 1 == A().method()
.. note::
If the method name is already in the original class, a ValueError will be
thrown. You can't modify any built-in attribute.
|
def extensible_class(class_type: Type) -> Type:
"""The decorator making classes extensible by external methods
:param class_type: the class under the decorator
:return: the ``class_type``
.. admonition:: Examples
.. code-block:: python
@extensible_class
class A:
# It's recommended to implement __getattr__ so that
# PyLint will not complain about the dynamically added methods
def __getattr__(self, name):
raise NotImplementedError
@extension_method
def method(obj:A):
return 1
assert 1 == A().method()
.. note::
If the method name is already in the original class, a ValueError will be
thrown. You can't modify any built-in attribute.
"""
_CLASS_EXTENSIONS.register_type(class_type)
return class_type
|
(class_type: Type) -> Type
|
68,872 |
triad.utils.class_extension
|
extension_method
|
The decorator to add functions as members of the
correspondent classes.
:param func: the function under the decorator
:param class_type: the parent class type, defaults to None
:param name: the specified class method name, defaults to None. If None
then ``func.__name__`` will be used as the method name
:param on_dup: action on name duplication, defaults to ``error``. ``error``
will throw a ValueError; ``ignore`` will take no action; ``overwrite``
will use the current method to overwrite.
:return: the underlying function
.. admonition:: Examples
.. code-block:: python
@extensible_class
class A:
# It's recommended to implement __getattr__ so that
# PyLint will not complain about the dynamically added methods
def __getattr__(self, name):
raise NotImplementedError
# The simplest way to use this decorator, the first argument of
# the method must be annotated, and the annotated type is the
# class type to add this method to.
@extension_method
def method1(obj:A):
return 1
assert 1 == A().method1()
# Or you can be explicit of the class type and the name of the
# method in the class. In this case, you don't have to annotate
# the first argument.
@extension_method(class_type=A, name="m3")
def method2(obj, b):
return 2 + b
assert 5 == A().m3(3)
.. note::
If the method name is already in the original class, a ValueError will be
thrown. You can't modify any built-in attribute.
|
def extension_method(
func: Optional[Callable] = None,
class_type: Optional[Type] = None,
name: Optional[str] = None,
on_dup: str = "error",
) -> Callable:
"""The decorator to add functions as members of the
correspondent classes.
:param func: the function under the decorator
:param class_type: the parent class type, defaults to None
:param name: the specified class method name, defaults to None. If None
then ``func.__name__`` will be used as the method name
:param on_dup: action on name duplication, defaults to ``error``. ``error``
will throw a ValueError; ``ignore`` will take no action; ``overwrite``
will use the current method to overwrite.
:return: the underlying function
.. admonition:: Examples
.. code-block:: python
@extensible_class
class A:
# It's recommended to implement __getattr__ so that
# PyLint will not complain about the dynamically added methods
def __getattr__(self, name):
raise NotImplementedError
# The simplest way to use this decorator, the first argument of
# the method must be annotated, and the annotated type is the
# class type to add this method to.
@extension_method
def method1(obj:A):
return 1
assert 1 == A().method1()
# Or you can be explicit of the class type and the name of the
# method in the class. In this case, you don't have to annotate
# the first argument.
@extension_method(class_type=A, name="m3")
def method2(obj, b):
return 2 + b
assert 5 == A().m3(3)
.. note::
If the method name is already in the original class, a ValueError will be
thrown. You can't modify any built-in attribute.
"""
if func is not None: # @extension_method
_CLASS_EXTENSIONS.add_method(
_get_first_arg_type(func) if class_type is None else class_type,
func=func,
name=name,
on_dup=on_dup,
)
return func
else: # @extension_method(...)
def inner(func):
_CLASS_EXTENSIONS.add_method(
_get_first_arg_type(func) if class_type is None else class_type,
func=func,
name=name,
on_dup=on_dup,
)
return func
return inner
|
(func: Optional[Callable] = None, class_type: Optional[Type] = None, name: Optional[str] = None, on_dup: str = 'error') -> Callable
|
68,873 |
triad.utils.iter
|
make_empty_aware
|
Make an iterable empty aware, or return itself if already empty aware
:param it: underlying iterable
:return: EmptyAwareIterable[T]
|
def make_empty_aware(it: Union[Iterable[T], Iterator[T]]) -> "EmptyAwareIterable[T]":
"""Make an iterable empty aware, or return itself if already empty aware
:param it: underlying iterable
:return: EmptyAwareIterable[T]
"""
return it if isinstance(it, EmptyAwareIterable) else EmptyAwareIterable(it)
|
(it: Union[Iterable[~T], Iterator[~T]]) -> triad.utils.iter.EmptyAwareIterable[~T]
|
68,874 |
triad.utils.dispatcher
|
run_at_def
|
Decorator to run the function at declaration. This is useful when we want import
to trigger a function run (which can guarantee it runs only once).
.. admonition:: Examples
Assume the following python file is a module in your package,
then when you ``import package.module``, the two functions will run.
.. code-block:: python
from triad import run_at_def
@run_at_def
def register_something():
print("registered")
@run_at_def(a=1)
def register_something2(a):
print("registered", a)
:param run_at_def_func: the function to decorate
:param kwargs: the parameters to call this function
|
def run_at_def(run_at_def_func: Optional[Callable] = None, **kwargs: Any) -> Callable:
"""Decorator to run the function at declaration. This is useful when we want import
to trigger a function run (which can guarantee it runs only once).
.. admonition:: Examples
Assume the following python file is a module in your package,
then when you ``import package.module``, the two functions will run.
.. code-block:: python
from triad import run_at_def
@run_at_def
def register_something():
print("registered")
@run_at_def(a=1)
def register_something2(a):
print("registered", a)
:param run_at_def_func: the function to decorate
:param kwargs: the parameters to call this function
"""
def _run(_func: Callable) -> Callable:
_func(**kwargs)
return _func
return _run if run_at_def_func is None else _run(run_at_def_func) # type:ignore
|
(run_at_def_func: Optional[Callable] = None, **kwargs: Any) -> Callable
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.