diff --git a/.gitattributes b/.gitattributes index 6271c832984471ecf8d4c5a0f15f775768d56ace..705cfba6d8f7f530f3c9cd56a374ca216449e6f8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -198,3 +198,4 @@ llmeval-env/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64 llmeval-env/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/pandas/_libs/hashtable.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..cfd29701a6d022512849a387cf73c3adc51c0632 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6345c716b5d67adb2d3f2477c07c0b1a214a70aa7cb71101d99327aba0bfaa0 +size 4438576 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_config_module.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_config_module.py new file mode 100644 index 0000000000000000000000000000000000000000..ef0478535772c9523f743675a95a1a9fbb0998d7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_config_module.py @@ -0,0 +1,369 @@ +import contextlib + +import copy +import hashlib +import inspect +import io +import pickle +import tokenize +import unittest +import warnings +from types import FunctionType, ModuleType +from typing import Any, Dict, Optional, Set, Union +from unittest import mock + +# Types saved/loaded in configs +CONFIG_TYPES = (int, float, bool, type(None), str, list, set, tuple, dict) + + +def install_config_module(module): + """ + Converts a module-level config into a `ConfigModule()`. + + See _config_typing.pyi for instructions on how to get the converted module to typecheck. + """ + + class ConfigModuleInstance(ConfigModule): + _bypass_keys = set({"_is_dirty", "_hash_digest"}) + + def visit(source, dest, prefix): + """Walk the module structure and move everything to module._config""" + for key, value in list(source.__dict__.items()): + if ( + key.startswith("__") + or isinstance(value, (ModuleType, FunctionType)) + or (hasattr(value, "__module__") and value.__module__ == "typing") + ): + continue + + name = f"{prefix}{key}" + if isinstance(value, CONFIG_TYPES): + config[name] = value + default[name] = value + if dest is module: + delattr(module, key) + elif isinstance(value, type): + assert value.__module__ == module.__name__ + # a subconfig with `class Blah:` syntax + proxy = SubConfigProxy(module, f"{name}.") + visit(value, proxy, f"{name}.") + setattr(dest, key, proxy) + else: + raise AssertionError(f"Unhandled config {key}={value} ({type(value)})") + + config: Dict[str, Any] = dict() + default: Dict[str, Any] = dict() + + compile_ignored_keys = get_assignments_with_compile_ignored_comments(module) + + visit(module, module, "") + module._config = config + module._default = default + module._allowed_keys = set(config.keys()) + module._compile_ignored_keys = compile_ignored_keys + module.__class__ = ConfigModuleInstance + module._is_dirty = True + module._hash_digest = None + + +COMPILE_IGNORED_MARKER = "@compile_ignored" + + +# Gets all the keys (i.e. assignments) with a @compile_ignored comment +def get_assignments_with_compile_ignored_comments(module): + source_code = inspect.getsource(module) + assignments = set() + + # Tokenize the source code to retrieve comments + tokens = tokenize.tokenize(io.BytesIO(source_code.encode("utf-8")).readline) + current_comment = "", -1 + prev_name = "" + + for token in tokens: + if token.type == tokenize.COMMENT: + prev_name = "" + maybe_current = token.string.strip() + if COMPILE_IGNORED_MARKER in maybe_current: + assert current_comment == ( + "", + -1, + ), f"unconsumed {COMPILE_IGNORED_MARKER}" + current_comment = maybe_current, token.start[0] + elif token.type == tokenize.NAME: + # Only accept the first name token, to handle if you have + # something like foo: Bar = ... + if not prev_name: + prev_name = token.string + elif token.type == tokenize.OP and token.string == "=": + # Check if the current assignment follows a comment + # with COMPILE_IGNORED_MARKER + if ( + COMPILE_IGNORED_MARKER in current_comment[0] + and current_comment[1] == token.start[0] - 1 + ): + assignments.add(prev_name) + current_comment = "", -1 # reset + prev_name = "" + assert current_comment == ("", -1), f"unconsumed {COMPILE_IGNORED_MARKER}" + return assignments + + +class ConfigModule(ModuleType): + # NOTE: This should be kept in sync with _config_typing.pyi. + + # The default values of the configuration settings. This can be used to + # determine if the config has been changed or not. + _default: Dict[str, Any] + # The actual configuration settings. E.g., torch._dynamo.config.debug + # would live as "debug" in the key, and torch._inductor.config.triton.cudagraphs + # maps as "triton.cudagraphs" + _config: Dict[str, Any] + _allowed_keys: Set[str] + _bypass_keys: Set[str] + _compile_ignored_keys: Set[str] + _is_dirty: bool + _hash_digest: Optional[bytes] + + def __init__(self): + raise NotImplementedError( + f"use {__name__}.install_config_module(sys.modules[__name__])" + ) + + def __setattr__(self, name, value): + if name in self._bypass_keys: + super().__setattr__(name, value) + elif name not in self._allowed_keys: + raise AttributeError(f"{self.__name__}.{name} does not exist") + else: + self._config[name] = value + + def __getattr__(self, name): + try: + return self._config[name] + except KeyError as e: + # make hasattr() work properly + raise AttributeError(f"{self.__name__}.{name} does not exist") from e + + def __delattr__(self, name): + # must support delete because unittest.mock.patch deletes + # then recreate things + del self._config[name] + + def save_config(self) -> bytes: + """Convert config to a pickled blob""" + config = dict(self._config) + for key in config.get("_save_config_ignore", ()): + config.pop(key) + return pickle.dumps(config, protocol=2) + + def codegen_config(self) -> str: + """Convert config to Python statements that replicate current config. + This does NOT include config settings that are at default values. + """ + lines = [] + mod = self.__name__ + for k, v in self._config.items(): + if k in self._config.get("_save_config_ignore", ()): + continue + if v == self._default[k]: + continue + lines.append(f"{mod}.{k} = {v!r}") + return "\n".join(lines) + + def get_hash(self) -> bytes: + """Hashes the configs that are not compile_ignored""" + if self._is_dirty or self._hash_digest is None: + dict_to_hash = { + k: v + for k, v in self._config.items() + if k not in self._compile_ignored_keys + } + string_to_hash = repr(sorted(dict_to_hash.items())) + self._hash_digest = hashlib.md5(string_to_hash.encode("utf-8")).digest() + self._is_dirty = False + return self._hash_digest + + def to_dict(self) -> Dict[str, Any]: + warnings.warn( + "config.to_dict() has been deprecated. It may no longer change the underlying config." + " use config.shallow_copy_dict() or config.get_config_copy() instead", + DeprecationWarning, + ) + return self.shallow_copy_dict() + + def shallow_copy_dict(self) -> Dict[str, Any]: + return {**self._config} + + def load_config(self, maybe_pickled_config: Union[bytes, Dict[str, Any]]) -> None: + """Restore from a prior call to save_config() or shallow_copy_dict()""" + if not isinstance(maybe_pickled_config, dict): + config = pickle.loads(maybe_pickled_config) + else: + config = maybe_pickled_config + self._config.update(config) + + def get_config_copy(self) -> Dict[str, Any]: + return copy.deepcopy(self._config) + + def patch( + self, + arg1: Optional[Union[str, Dict[str, Any]]] = None, + arg2: Any = None, + **kwargs, + ): + """ + Decorator and/or context manager to make temporary changes to a config. + + As a decorator: + + @config.patch("name", val) + @config.patch(name1=val1, name2=val2) + @config.patch({"name1": val1, "name2", val2}) + def foo(...): + ... + + As a context manager: + + with config.patch("name", val): + ... + """ + changes: Dict[str, Any] + if arg1 is not None: + if arg2 is not None: + assert isinstance(arg1, str) + # patch("key", True) syntax + changes = {arg1: arg2} + else: + assert isinstance(arg1, dict) + # patch({"key": True}) syntax + changes = arg1 + assert not kwargs + else: + # patch(key=True) syntax + changes = kwargs + assert arg2 is None + assert isinstance(changes, dict), f"expected `dict` got {type(changes)}" + prior: Dict[str, Any] = {} + config = self + dirty = False + + class ConfigPatch(ContextDecorator): + def __enter__(self): + assert not prior + nonlocal dirty + for key in changes.keys(): + # KeyError on invalid entry + prior[key] = config._config[key] + dirty = key not in config._compile_ignored_keys + config._config.update(changes) + config._is_dirty = dirty + + def __exit__(self, exc_type, exc_val, exc_tb): + nonlocal dirty + config._config.update(prior) + config._is_dirty = dirty + prior.clear() + + return ConfigPatch() + + def _make_closure_patcher(self, **changes): + """ + A lower-overhead version of patch() for things on the critical path. + + Usage: + + # do this off the critical path + change_fn = config.make_closure_patcher(foo=True) + + ... + + revert = change_fn() + try: + ... + finally: + revert() + + """ + config = self._config + + def change(): + prior = {k: config[k] for k in changes} + config.update(changes) + + def revert(): + config.update(prior) + + return revert + + return change + + +class ContextDecorator(contextlib.ContextDecorator): + """ + Same as contextlib.ContextDecorator, but with support for + `unittest.TestCase` + """ + + def __enter__(self): + raise NotImplementedError("NYI") + + def __exit__(self, exc_type, exc_val, exc_tb): + raise NotImplementedError("NYI") + + def __call__(self, func): + if isinstance(func, type) and issubclass(func, unittest.TestCase): + + class _TestCase(func): # type: ignore[valid-type, misc] + @classmethod + def setUpClass(cls): + self.__enter__() + try: + super().setUpClass() + except Exception: + self.__exit__(None, None, None) + raise + + @classmethod + def tearDownClass(cls): + try: + super().tearDownClass() + finally: + self.__exit__(None, None, None) + + _TestCase.__name__ = func.__name__ + _TestCase.__qualname__ = func.__qualname__ + _TestCase.__module__ = func.__module__ + + return _TestCase + + return super().__call__(func) + + +class SubConfigProxy: + """ + Shim to redirect to main config. + `config.triton.cudagraphs` maps to _config["triton.cudagraphs"] + """ + + def __init__(self, config, prefix): + # `super().__setattr__` to bypass custom `__setattr__` + super().__setattr__("_config", config) + super().__setattr__("_prefix", prefix) + + def __setattr__(self, name, value): + return self._config.__setattr__(self._prefix + name, value) + + def __getattr__(self, name): + return self._config.__getattr__(self._prefix + name) + + def __delattr__(self, name): + return self._config.__delattr__(self._prefix + name) + + +def patch_object(obj, name, value): + """ + Workaround `mock.patch.object` issue with ConfigModule + """ + if isinstance(obj, ConfigModule): + return obj.patch(name, value) + return mock.patch.object(obj, name, value) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_contextlib.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_contextlib.py new file mode 100644 index 0000000000000000000000000000000000000000..c55e6961857524d705dac1d9e6baefe5d19d95c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_contextlib.py @@ -0,0 +1,152 @@ +# Extra utilities for working with context managers that should have been +# in the standard library but are not + +import functools +import inspect +import warnings +import sys +from typing import Any, Callable, TypeVar, cast + +# Used for annotating the decorator usage of _DecoratorContextManager (e.g., +# 'no_grad' and 'enable_grad'). +# See https://mypy.readthedocs.io/en/latest/generics.html#declaring-decorators +FuncType = Callable[..., Any] +F = TypeVar('F', bound=FuncType) + + +def _wrap_generator(ctx_factory, func): + """ + Wrap each generator invocation with the context manager factory. + + The input should be a function that returns a context manager, + not a context manager itself, to handle one-shot context managers. + """ + @functools.wraps(func) + def generator_context(*args, **kwargs): + gen = func(*args, **kwargs) + + # Generators are suspended and unsuspended at `yield`, hence we + # make sure the grad mode is properly set every time the execution + # flow returns into the wrapped generator and restored when it + # returns through our `yield` to our caller (see PR #49017). + try: + # Issuing `None` to a generator fires it up + with ctx_factory(): + response = gen.send(None) + + while True: + try: + # Forward the response to our caller and get its next request + request = yield response + + except GeneratorExit: + # Inform the still active generator about its imminent closure + with ctx_factory(): + gen.close() + raise + + except BaseException: + # Propagate the exception thrown at us by the caller + with ctx_factory(): + response = gen.throw(*sys.exc_info()) + + else: + # Pass the last request to the generator and get its response + with ctx_factory(): + response = gen.send(request) + + # We let the exceptions raised above by the generator's `.throw` or + # `.send` methods bubble up to our caller, except for StopIteration + except StopIteration as e: + # The generator informed us that it is done: take whatever its + # returned value (if any) was and indicate that we're done too + # by returning it (see docs for python's return-statement). + return e.value + + return generator_context + + +def context_decorator(ctx, func): + """ + Like contextlib.ContextDecorator. + + But with the following differences: + 1. Is done by wrapping, rather than inheritance, so it works with context + managers that are implemented from C and thus cannot easily inherit from + Python classes + 2. Wraps generators in the intuitive way (c.f. https://bugs.python.org/issue37743) + 3. Errors out if you try to wrap a class, because it is ambiguous whether + or not you intended to wrap only the constructor + + The input argument can either be a context manager (in which case it must + be a multi-shot context manager that can be directly invoked multiple times) + or a callable that produces a context manager. + """ + assert not (callable(ctx) and hasattr(ctx, '__enter__')), ( + f"Passed in {ctx} is both callable and also a valid context manager " + "(has __enter__), making it ambiguous which interface to use. If you " + "intended to pass a context manager factory, rewrite your call as " + "context_decorator(lambda: ctx()); if you intended to pass a context " + "manager directly, rewrite your call as context_decorator(lambda: ctx)" + ) + + if not callable(ctx): + def ctx_factory(): + return ctx + else: + ctx_factory = ctx + + if inspect.isclass(func): + raise RuntimeError( + "Cannot decorate classes; it is ambiguous whether or not only the " + "constructor or all methods should have the context manager applied; " + "additionally, decorating a class at definition-site will prevent " + "use of the identifier as a conventional type. " + "To specify which methods to decorate, decorate each of them " + "individually." + ) + + if inspect.isgeneratorfunction(func): + return _wrap_generator(ctx_factory, func) + + @functools.wraps(func) + def decorate_context(*args, **kwargs): + with ctx_factory(): + return func(*args, **kwargs) + + return decorate_context + + +class _DecoratorContextManager: + """Allow a context manager to be used as a decorator.""" + + def __call__(self, orig_func: F) -> F: + if inspect.isclass(orig_func): + warnings.warn("Decorating classes is deprecated and will be disabled in " + "future versions. You should only decorate functions or methods. " + "To preserve the current behavior of class decoration, you can " + "directly decorate the `__init__` method and nothing else.") + func = cast(F, lambda *args, **kwargs: orig_func(*args, **kwargs)) + else: + func = orig_func + + return cast(F, context_decorator(self.clone, func)) + + def __enter__(self) -> None: + raise NotImplementedError + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + raise NotImplementedError + + def clone(self): + # override this method if your children class takes __init__ parameters + return self.__class__() + + +class _NoParamDecoratorContextManager(_DecoratorContextManager): + """Allow a context manager to be used as a decorator without parentheses.""" + + def __new__(cls, orig_func=None): + if orig_func is None: + return super().__new__(cls) + return cls()(orig_func) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_cpp_extension_versioner.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_cpp_extension_versioner.py new file mode 100644 index 0000000000000000000000000000000000000000..0c09a82413fec8ceb9c277d3e036b7f8061fc3da --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_cpp_extension_versioner.py @@ -0,0 +1,58 @@ +import collections + + +Entry = collections.namedtuple('Entry', 'version, hash') + + +def update_hash(seed, value): + # Good old boost::hash_combine + # https://www.boost.org/doc/libs/1_35_0/doc/html/boost/hash_combine_id241013.html + return seed ^ (hash(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2)) + + +def hash_source_files(hash_value, source_files): + for filename in source_files: + with open(filename) as file: + hash_value = update_hash(hash_value, file.read()) + return hash_value + + +def hash_build_arguments(hash_value, build_arguments): + for group in build_arguments: + if group: + for argument in group: + hash_value = update_hash(hash_value, argument) + return hash_value + + +class ExtensionVersioner: + def __init__(self): + self.entries = {} + + def get_version(self, name): + entry = self.entries.get(name) + return None if entry is None else entry.version + + def bump_version_if_changed(self, + name, + source_files, + build_arguments, + build_directory, + with_cuda, + is_python_module, + is_standalone): + hash_value = 0 + hash_value = hash_source_files(hash_value, source_files) + hash_value = hash_build_arguments(hash_value, build_arguments) + hash_value = update_hash(hash_value, build_directory) + hash_value = update_hash(hash_value, with_cuda) + hash_value = update_hash(hash_value, is_python_module) + hash_value = update_hash(hash_value, is_standalone) + + entry = self.entries.get(name) + if entry is None: + self.entries[name] = entry = Entry(0, hash_value) + elif hash_value != entry.hash: + self.entries[name] = entry = Entry(entry.version + 1, hash_value) + + return entry.version diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_cuda_trace.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_cuda_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..18c8ba4e4a99a82b360c67325893b4b307d70fc1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_cuda_trace.py @@ -0,0 +1,99 @@ +import logging +from typing import Callable, Generic, List + +from typing_extensions import ParamSpec # Python 3.10+ + +logger = logging.getLogger(__name__) +P = ParamSpec("P") + + +class CallbackRegistry(Generic[P]): + def __init__(self, name: str): + self.name = name + self.callback_list: List[Callable[P, None]] = [] + + def add_callback(self, cb: Callable[P, None]) -> None: + self.callback_list.append(cb) + + def fire_callbacks(self, *args: P.args, **kwargs: P.kwargs) -> None: + for cb in self.callback_list: + try: + cb(*args, **kwargs) + except Exception as e: + logger.exception( + "Exception in callback for %s registered with CUDA trace", self.name + ) + + +CUDAEventCreationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA event creation" +) +CUDAEventDeletionCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA event deletion" +) +CUDAEventRecordCallbacks: "CallbackRegistry[int, int]" = CallbackRegistry( + "CUDA event record" +) +CUDAEventWaitCallbacks: "CallbackRegistry[int, int]" = CallbackRegistry( + "CUDA event wait" +) +CUDAMemoryAllocationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA memory allocation" +) +CUDAMemoryDeallocationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA memory deallocation" +) +CUDAStreamCreationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA stream creation" +) +CUDADeviceSynchronizationCallbacks: "CallbackRegistry[[]]" = CallbackRegistry( + "CUDA device synchronization" +) +CUDAStreamSynchronizationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA stream synchronization" +) +CUDAEventSynchronizationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( + "CUDA event synchronization" +) + + +def register_callback_for_cuda_event_creation(cb: Callable[[int], None]) -> None: + CUDAEventCreationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_event_deletion(cb: Callable[[int], None]) -> None: + CUDAEventDeletionCallbacks.add_callback(cb) + + +def register_callback_for_cuda_event_record(cb: Callable[[int, int], None]) -> None: + CUDAEventRecordCallbacks.add_callback(cb) + + +def register_callback_for_cuda_event_wait(cb: Callable[[int, int], None]) -> None: + CUDAEventWaitCallbacks.add_callback(cb) + + +def register_callback_for_cuda_memory_allocation(cb: Callable[[int], None]) -> None: + CUDAMemoryAllocationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_memory_deallocation(cb: Callable[[int], None]) -> None: + CUDAMemoryDeallocationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_stream_creation(cb: Callable[[int], None]) -> None: + CUDAStreamCreationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_device_synchronization(cb: Callable[[], None]) -> None: + CUDADeviceSynchronizationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_stream_synchronization( + cb: Callable[[int], None] +) -> None: + CUDAStreamSynchronizationCallbacks.add_callback(cb) + + +def register_callback_for_cuda_event_synchronization(cb: Callable[[int], None]) -> None: + CUDAEventSynchronizationCallbacks.add_callback(cb) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_cxx_pytree.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_cxx_pytree.py new file mode 100644 index 0000000000000000000000000000000000000000..93605d3b0ba8490b1cf1892839b27a4c95b6713c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_cxx_pytree.py @@ -0,0 +1,970 @@ +""" +Contains utility functions for working with nested python data structures. + +A *pytree* is Python nested data structure. It is a tree in the sense that +nodes are Python collections (e.g., list, tuple, dict) and the leaves are +Python values. Furthermore, a pytree should not contain reference cycles. + +pytrees are useful for working with nested collections of Tensors. For example, +one can use `tree_map` to map a function over all Tensors inside some nested +collection of Tensors and `tree_leaves` to get a flat list of all Tensors +inside some nested collection. pytrees are helpful for implementing nested +collection support for PyTorch APIs. +""" + +import functools +import sys +import types +import warnings +from typing import ( + Any, + Callable, + Iterable, + List, + Optional, + overload, + Tuple, + Type, + TypeVar, + Union, +) + +import torch + +if torch._running_with_deploy(): # type: ignore[no-untyped-call] + raise ImportError("C++ pytree utilities do not work with torch::deploy.") + +import optree +from optree import PyTreeSpec # direct import for type annotations + +from torch.utils._pytree import KeyEntry + + +__all__ = [ + "PyTree", + "Context", + "FlattenFunc", + "UnflattenFunc", + "DumpableContext", + "ToDumpableContextFn", + "FromDumpableContextFn", + "TreeSpec", + "LeafSpec", + "keystr", + "key_get", + "register_pytree_node", + "tree_flatten", + "tree_flatten_with_path", + "tree_unflatten", + "tree_leaves", + "tree_leaves_with_path", + "tree_structure", + "tree_map", + "tree_map_with_path", + "tree_map_", + "tree_map_only", + "tree_map_only_", + "tree_all", + "tree_any", + "tree_all_only", + "tree_any_only", + "treespec_dumps", + "treespec_loads", + "treespec_pprint", +] + + +T = TypeVar("T") +S = TypeVar("S") +U = TypeVar("U") +R = TypeVar("R") + + +Context = Any +PyTree = Any +TreeSpec = PyTreeSpec +FlattenFunc = Callable[[PyTree], Tuple[List[Any], Context]] +UnflattenFunc = Callable[[Iterable[Any], Context], PyTree] +OpTreeUnflattenFunc = Callable[[Context, Iterable[Any]], PyTree] +DumpableContext = Any # Any json dumpable text +ToDumpableContextFn = Callable[[Context], DumpableContext] +FromDumpableContextFn = Callable[[DumpableContext], Context] +KeyPath = Tuple[KeyEntry, ...] +FlattenWithKeysFunc = Callable[[PyTree], Tuple[List[Tuple[KeyEntry, Any]], Any]] + + +def _reverse_args(func: UnflattenFunc) -> OpTreeUnflattenFunc: + @functools.wraps(func) + def wrapped(*args: Any, **kwargs: Any) -> Any: + return func(*reversed(args), **kwargs) + + return wrapped + + +def register_pytree_node( + cls: Type[Any], + flatten_fn: FlattenFunc, + unflatten_fn: UnflattenFunc, + *, + serialized_type_name: Optional[str] = None, + to_dumpable_context: Optional[ToDumpableContextFn] = None, + from_dumpable_context: Optional[FromDumpableContextFn] = None, + flatten_with_keys_fn: Optional[FlattenWithKeysFunc] = None, +) -> None: + """Register a container-like type as pytree node. + + Args: + cls (type): A Python type to treat as an internal pytree node. + flatten_fn (callable): A function to be used during flattening, taking an instance of + ``cls`` and returning a pair, with (1) an iterable for the children to be flattened + recursively, and (2) some hashable auxiliary data to be stored in the treespec and to be + passed to the ``unflatten_fn``. + unflatten_fn (callable): A function taking two arguments: the auxiliary data that was + returned by ``flatten_fn`` and stored in the treespec, and the unflattened children. + The function should return an instance of ``cls``. + serialized_type_name (str, optional): A keyword argument used to specify the fully + qualified name used when serializing the tree spec. + to_dumpable_context (callable, optional): An optional keyword argument to custom specify how + to convert the context of the pytree to a custom json dumpable representation. This is + used for json serialization, which is being used in :mod:`torch.export` right now. + from_dumpable_context (callable, optional): An optional keyword argument to custom specify + how to convert the custom json dumpable representation of the context back to the + original context. This is used for json deserialization, which is being used in + :mod:`torch.export` right now. + + Example:: + + >>> # xdoctest: +SKIP + >>> # Registry a Python type with lambda functions + >>> register_pytree_node( + ... set, + ... lambda s: (sorted(s), None, None), + ... lambda children, _: set(children), + ... ) + """ + if flatten_with_keys_fn is not None: + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") + + _private_register_pytree_node( + cls, + flatten_fn, + unflatten_fn, + serialized_type_name=serialized_type_name, + to_dumpable_context=to_dumpable_context, + from_dumpable_context=from_dumpable_context, + ) + + from . import _pytree as python + + python._private_register_pytree_node( + cls, + flatten_fn, + unflatten_fn, + serialized_type_name=serialized_type_name, + to_dumpable_context=to_dumpable_context, + from_dumpable_context=from_dumpable_context, + ) + + +def _register_pytree_node( + cls: Type[Any], + flatten_fn: FlattenFunc, + unflatten_fn: UnflattenFunc, + *, + serialized_type_name: Optional[str] = None, + to_dumpable_context: Optional[ToDumpableContextFn] = None, + from_dumpable_context: Optional[FromDumpableContextFn] = None, +) -> None: + """Register a container-like type as pytree node for the C++ pytree only. + + The ``namespace`` argument is used to avoid collisions that occur when different libraries + register the same Python type with different behaviors. It is recommended to add a unique prefix + to the namespace to avoid conflicts with other libraries. Namespaces can also be used to specify + the same class in different namespaces for different use cases. + + .. warning:: + For safety reasons, a ``namespace`` must be specified while registering a custom type. It is + used to isolate the behavior of flattening and unflattening a pytree node type. This is to + prevent accidental collisions between different libraries that may register the same type. + + Args: + cls (type): A Python type to treat as an internal pytree node. + flatten_fn (callable): A function to be used during flattening, taking an instance of + ``cls`` and returning a pair, with (1) an iterable for the children to be flattened + recursively, and (2) some hashable auxiliary data to be stored in the treespec and to be + passed to the ``unflatten_fn``. + unflatten_fn (callable): A function taking two arguments: the auxiliary data that was + returned by ``flatten_fn`` and stored in the treespec, and the unflattened children. + The function should return an instance of ``cls``. + serialized_type_name (str, optional): A keyword argument used to specify the fully + qualified name used when serializing the tree spec. + to_dumpable_context (callable, optional): An optional keyword argument to custom specify how + to convert the context of the pytree to a custom json dumpable representation. This is + used for json serialization, which is being used in :mod:`torch.export` right now. + from_dumpable_context (callable, optional): An optional keyword argument to custom specify + how to convert the custom json dumpable representation of the context back to the + original context. This is used for json deserialization, which is being used in + :mod:`torch.export` right now. + """ + warnings.warn( + "torch.utils._cxx_pytree._register_pytree_node is deprecated. " + "Please use torch.utils._cxx_pytree.register_pytree_node instead.", + stacklevel=2, + ) + + _private_register_pytree_node( + cls, + flatten_fn, + unflatten_fn, + serialized_type_name=serialized_type_name, + to_dumpable_context=to_dumpable_context, + from_dumpable_context=from_dumpable_context, + ) + + +def _private_register_pytree_node( + cls: Type[Any], + flatten_fn: FlattenFunc, + unflatten_fn: UnflattenFunc, + *, + serialized_type_name: Optional[str] = None, + to_dumpable_context: Optional[ToDumpableContextFn] = None, + from_dumpable_context: Optional[FromDumpableContextFn] = None, +) -> None: + """This is an internal function that is used to register a pytree node type + for the C++ pytree only. End-users should use :func:`register_pytree_node` + instead. + """ + # TODO(XuehaiPan): remove this condition when we make Python pytree out-of-box support + # PyStructSequence types + if not optree.is_structseq_class(cls): + optree.register_pytree_node( + cls, + flatten_fn, + _reverse_args(unflatten_fn), + namespace="torch", + ) + + +def tree_flatten( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> Tuple[List[Any], TreeSpec]: + """Flatten a pytree. + + See also :func:`tree_unflatten`. + + The flattening order (i.e., the order of elements in the output list) is deterministic, + corresponding to a left-to-right depth-first tree traversal. + + >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5} + >>> tree_flatten(tree) + ([1, 2, 3, 4, None, 5], PyTreeSpec({'a': *, 'b': (*, [*, *]), 'c': *, 'd': *}, NoneIsLeaf)) + >>> tree_flatten(1) + ([1], PyTreeSpec(*, NoneIsLeaf)) + >>> tree_flatten(None) + ([None], PyTreeSpec(*, NoneIsLeaf)) + + For unordered dictionaries, :class:`dict` and :class:`collections.defaultdict`, the order is + dependent on the **sorted** keys in the dictionary. Please use :class:`collections.OrderedDict` + if you want to keep the keys in the insertion order. + + >>> from collections import OrderedDict + >>> tree = OrderedDict([('b', (2, [3, 4])), ('a', 1), ('c', None), ('d', 5)]) + >>> tree_flatten(tree) + ([2, 3, 4, 1, None, 5], PyTreeSpec(OrderedDict([('b', (*, [*, *])), ('a', *), ('c', *), ('d', *)]), NoneIsLeaf)) + + Args: + tree (pytree): A pytree to flatten. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + A pair ``(leaves, treespec)`` where the first element is a list of leaf values and the + second element is a treespec representing the structure of the pytree. + """ + return optree.tree_flatten( # type: ignore[return-value] + tree, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree: + """Reconstruct a pytree from the treespec and the leaves. + + The inverse of :func:`tree_flatten`. + + >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5} + >>> leaves, treespec = tree_flatten(tree) + >>> tree == tree_unflatten(leaves, treespec) + True + + Args: + leaves (iterable): The list of leaves to use for reconstruction. The list must match the + number of leaves of the treespec. + treespec (TreeSpec): The treespec to reconstruct. + + Returns: + The reconstructed pytree, containing the ``leaves`` placed in the structure described by + ``treespec``. + """ + if not isinstance(treespec, TreeSpec): + raise TypeError( + f"tree_unflatten(values, spec): Expected `spec` to be instance of " + f"TreeSpec but got item of type {type(treespec)}." + ) + return optree.tree_unflatten(treespec, leaves) # type: ignore[arg-type] + + +def tree_leaves( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> List[Any]: + """Get the leaves of a pytree. + + See also :func:`tree_flatten`. + + >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5} + >>> tree_leaves(tree) + [1, 2, 3, 4, None, 5] + >>> tree_leaves(1) + [1] + >>> tree_leaves(None) + [None] + + Args: + tree (pytree): A pytree to flatten. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + A list of leaf values. + """ + return optree.tree_leaves( + tree, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +def tree_structure( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> TreeSpec: + """Get the treespec for a pytree. + + See also :func:`tree_flatten`. + + >>> tree = {'b': (2, [3, 4]), 'a': 1, 'c': None, 'd': 5} + >>> tree_structure(tree) + PyTreeSpec({'a': *, 'b': (*, [*, *]), 'c': *, 'd': *}, NoneIsLeaf) + >>> tree_structure(1) + PyTreeSpec(*, NoneIsLeaf) + >>> tree_structure(None) + PyTreeSpec(*, NoneIsLeaf) + + Args: + tree (pytree): A pytree to flatten. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + A treespec object representing the structure of the pytree. + """ + return optree.tree_structure( # type: ignore[return-value] + tree, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +def tree_map( + func: Callable[..., Any], + tree: PyTree, + *rests: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + """Map a multi-input function over pytree args to produce a new pytree. + + See also :func:`tree_map_`. + + >>> tree_map(lambda x: x + 1, {'x': 7, 'y': (42, 64)}) + {'x': 8, 'y': (43, 65)} + >>> tree_map(lambda x: x is None, {'x': 7, 'y': (42, 64), 'z': None}) + {'x': False, 'y': (False, False), 'z': True} + + If multiple inputs are given, the structure of the tree is taken from the first input; + subsequent inputs need only have ``tree`` as a prefix: + + >>> tree_map(lambda x, y: [x] + y, [5, 6], [[7, 9], [1, 2]]) + [[5, 7, 9], [6, 1, 2]] + + Args: + func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the + corresponding leaves of the pytrees. + tree (pytree): A pytree to be mapped over, with each leaf providing the first positional + argument to function ``func``. + rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as + ``tree`` or has ``tree`` as a prefix. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + A new pytree with the same structure as ``tree`` but with the value at each leaf given by + ``func(x, *xs)`` where ``x`` is the value at the corresponding leaf in ``tree`` and ``xs`` + is the tuple of values at corresponding nodes in ``rests``. + """ + return optree.tree_map( + func, + tree, + *rests, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +def tree_map_( + func: Callable[..., Any], + tree: PyTree, + *rests: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + """Like :func:`tree_map`, but do an inplace call on each leaf and return the original tree. + + See also :func:`tree_map`. + + Args: + func (callable): A function that takes ``1 + len(rests)`` arguments, to be applied at the + corresponding leaves of the pytrees. + tree (pytree): A pytree to be mapped over, with each leaf providing the first positional + argument to function ``func``. + rests (tuple of pytree): A tuple of pytrees, each of which has the same structure as + ``tree`` or has ``tree`` as a prefix. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + The original ``tree`` with the value at each leaf is given by the side-effect of function + ``func(x, *xs)`` (not the return value) where ``x`` is the value at the corresponding leaf + in ``tree`` and ``xs`` is the tuple of values at values at corresponding nodes in ``rests``. + """ + return optree.tree_map_( + func, + tree, + *rests, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +Type2 = Tuple[Type[T], Type[S]] +Type3 = Tuple[Type[T], Type[S], Type[U]] +if sys.version_info >= (3, 10): + TypeAny = Union[Type[Any], Tuple[Type[Any], ...], types.UnionType] +else: + TypeAny = Union[Type[Any], Tuple[Type[Any], ...]] + +Fn2 = Callable[[Union[T, S]], R] +Fn3 = Callable[[Union[T, S, U]], R] +Fn = Callable[[T], R] +FnAny = Callable[[Any], R] + +MapOnlyFn = Callable[[T], Callable[[Any], Any]] + + +# These specializations help with type inference on the lambda passed to this +# function +@overload +def map_only(__type_or_types_or_pred: Type2[T, S]) -> MapOnlyFn[Fn2[T, S, Any]]: + ... + + +@overload +def map_only(__type_or_types_or_pred: Type3[T, S, U]) -> MapOnlyFn[Fn3[T, S, U, Any]]: + ... + + +@overload +def map_only(__type_or_types_or_pred: Type[T]) -> MapOnlyFn[Fn[T, Any]]: + ... + + +# This specialization is needed for the implementations below that call +@overload +def map_only(__type_or_types_or_pred: TypeAny) -> MapOnlyFn[FnAny[Any]]: + ... + + +@overload +def map_only(__type_or_types_or_pred: Callable[[Any], bool]) -> MapOnlyFn[FnAny[Any]]: + ... + + +def map_only( + __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]] +) -> MapOnlyFn[FnAny[Any]]: + """ + Suppose you are writing a tree_map over tensors, leaving everything + else unchanged. Ordinarily you would have to write: + + def go(t): + if isinstance(t, Tensor): + return ... + else: + return t + + With this function, you only need to write: + + @map_only(Tensor) + def go(t): + return ... + + You can also directly use 'tree_map_only' + """ + if isinstance(__type_or_types_or_pred, (type, tuple)) or ( + sys.version_info >= (3, 10) + and isinstance(__type_or_types_or_pred, types.UnionType) + ): + + def pred(x: Any) -> bool: + return isinstance(x, __type_or_types_or_pred) # type: ignore[arg-type] + + elif callable(__type_or_types_or_pred): + pred = __type_or_types_or_pred # type: ignore[assignment] + else: + raise TypeError("Argument must be a type, a tuple of types, or a callable.") + + def wrapper(func: Callable[[T], Any]) -> Callable[[Any], Any]: + @functools.wraps(func) + def wrapped(x: T) -> Any: + if pred(x): + return func(x) + return x + + return wrapped + + return wrapper + + +@overload +def tree_map_only( + __type_or_types_or_pred: Type[T], + func: Fn[T, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only( + __type_or_types_or_pred: Type2[T, S], + func: Fn2[T, S, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only( + __type_or_types_or_pred: Type3[T, S, U], + func: Fn3[T, S, U, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only( + __type_or_types_or_pred: Callable[[Any], bool], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +def tree_map_only( + __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + return tree_map(map_only(__type_or_types_or_pred)(func), tree, is_leaf=is_leaf) + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Type[T], + func: Fn[T, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Type2[T, S], + func: Fn2[T, S, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Type3[T, S, U], + func: Fn3[T, S, U, Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +@overload +def tree_map_only_( + __type_or_types_or_pred: Callable[[Any], bool], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + ... + + +def tree_map_only_( + __type_or_types_or_pred: Union[TypeAny, Callable[[Any], bool]], + func: FnAny[Any], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + return tree_map_(map_only(__type_or_types_or_pred)(func), tree, is_leaf=is_leaf) + + +def tree_all( + pred: Callable[[Any], bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return all(map(pred, flat_args)) + + +def tree_any( + pred: Callable[[Any], bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return any(map(pred, flat_args)) + + +@overload +def tree_all_only( + __type_or_types: Type[T], + pred: Fn[T, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_all_only( + __type_or_types: Type2[T, S], + pred: Fn2[T, S, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_all_only( + __type_or_types: Type3[T, S, U], + pred: Fn3[T, S, U, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +def tree_all_only( + __type_or_types: TypeAny, + pred: FnAny[bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return all(pred(x) for x in flat_args if isinstance(x, __type_or_types)) + + +@overload +def tree_any_only( + __type_or_types: Type[T], + pred: Fn[T, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_any_only( + __type_or_types: Type2[T, S], + pred: Fn2[T, S, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +@overload +def tree_any_only( + __type_or_types: Type3[T, S, U], + pred: Fn3[T, S, U, bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + ... + + +def tree_any_only( + __type_or_types: TypeAny, + pred: FnAny[bool], + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> bool: + flat_args = tree_leaves(tree, is_leaf=is_leaf) + return any(pred(x) for x in flat_args if isinstance(x, __type_or_types)) + + +def broadcast_prefix( + prefix_tree: PyTree, + full_tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> List[Any]: + """Return a list of broadcasted leaves in ``prefix_tree`` to match the number of leaves in ``full_tree``. + + If a ``prefix_tree`` is a prefix of a ``full_tree``, this means the ``full_tree`` can be + constructed by replacing the leaves of ``prefix_tree`` with appropriate **subtrees**. + + This function returns a list of leaves with the same size as ``full_tree``. The leaves are + replicated from ``prefix_tree``. The number of replicas is determined by the corresponding + subtree in ``full_tree``. + + >>> broadcast_prefix(1, [1, 2, 3]) + [1, 1, 1] + >>> broadcast_prefix([1, 2, 3], [1, 2, 3]) + [1, 2, 3] + >>> broadcast_prefix([1, 2, 3], [1, 2, 3, 4]) + Traceback (most recent call last): + ... + ValueError: list arity mismatch; expected: 3, got: 4; list: [1, 2, 3, 4]. + >>> broadcast_prefix([1, 2, 3], [1, 2, (3, 4)]) + [1, 2, 3, 3] + >>> broadcast_prefix([1, 2, 3], [1, 2, {'a': 3, 'b': 4, 'c': (None, 5)}]) + [1, 2, 3, 3, 3, 3] + + Args: + prefix_tree (pytree): A pytree with the same structure as a prefix of ``full_tree``. + full_tree (pytree): A pytree with the same structure as a suffix of ``prefix_tree``. + is_leaf (callable, optional): An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns: + A list of leaves in ``prefix_tree`` broadcasted to match the number of leaves in ``full_tree``. + """ + return optree.broadcast_prefix( + prefix_tree, + full_tree, + is_leaf=is_leaf, + none_is_leaf=True, + namespace="torch", + ) + + +# Broadcasts a pytree to the provided TreeSpec and returns the flattened +# values. If this is not possible, then this function returns None. +# +# For example, given pytree=0 and spec=TreeSpec(list, None, [LeafSpec(), LeafSpec()]), +# would return [0, 0]. This is useful for part of the vmap implementation: +# a user can pass in vmap(fn, in_dims)(*inputs). `in_dims` should be +# broadcastable to the tree structure of `inputs` and we use +# _broadcast_to_and_flatten to check this. +def _broadcast_to_and_flatten( + tree: PyTree, + treespec: TreeSpec, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> Optional[List[Any]]: + assert isinstance(treespec, TreeSpec) + full_tree = tree_unflatten([0] * treespec.num_leaves, treespec) + try: + return broadcast_prefix(tree, full_tree, is_leaf=is_leaf) + except ValueError: + return None + + +def treespec_dumps(treespec: TreeSpec, protocol: Optional[int] = None) -> str: + """Serialize a treespec to a JSON string.""" + if not isinstance(treespec, TreeSpec): + raise TypeError( + f"treespec_dumps(spec): Expected `spec` to be instance of " + f"TreeSpec but got item of type {type(treespec)}." + ) + from ._pytree import ( + tree_structure as _tree_structure, + treespec_dumps as _treespec_dumps, + ) + + orig_treespec = _tree_structure(tree_unflatten([0] * treespec.num_leaves, treespec)) + return _treespec_dumps(orig_treespec, protocol=protocol) + + +def treespec_loads(serialized: str) -> TreeSpec: + """Deserialize a treespec from a JSON string.""" + from ._pytree import ( + tree_unflatten as _tree_unflatten, + treespec_loads as _treespec_loads, + ) + + orig_treespec = _treespec_loads(serialized) + dummy_tree = _tree_unflatten([0] * orig_treespec.num_leaves, orig_treespec) + treespec = tree_structure(dummy_tree) + return treespec + + +class _DummyLeaf: + def __repr__(self) -> str: + return "*" + + +def treespec_pprint(treespec: TreeSpec) -> str: + dummy_tree = tree_unflatten( + [_DummyLeaf() for _ in range(treespec.num_leaves)], + treespec, + ) + return repr(dummy_tree) + + +class LeafSpecMeta(type(TreeSpec)): # type: ignore[misc] + def __instancecheck__(self, instance: object) -> bool: + return isinstance(instance, TreeSpec) and instance.is_leaf() + + +class LeafSpec(TreeSpec, metaclass=LeafSpecMeta): + def __new__(cls) -> "LeafSpec": + return optree.treespec_leaf(none_is_leaf=True) # type: ignore[return-value] + + +def tree_flatten_with_path( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> Tuple[List[Tuple[KeyPath, Any]], TreeSpec]: + """Flattens a pytree like :func:`tree_flatten`, but also returns each leaf's key path. + + Args: + tree: a pytree to flatten. If it contains a custom type, that type must be + registered with an appropriate `tree_flatten_with_path_fn` when registered + with :func:`register_pytree_node`. + is_leaf: An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + Returns: + A tuple where the first element is a list of (key path, leaf) pairs, and the + second element is a :class:`TreeSpec` representing the structure of the flattened + tree. + """ + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") + + +def tree_leaves_with_path( + tree: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> List[Tuple[KeyPath, Any]]: + """Gets the leaves of a pytree like ``tree_leaves`` and returns each leaf's key path. + + Args: + tree: a pytree. If it contains a custom type, that type must be + registered with an appropriate `tree_flatten_with_path_fn` when registered + with :func:`register_pytree_node`. + is_leaf: An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + Returns: + A list of (key path, leaf) pairs. + """ + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") + + +def tree_map_with_path( + func: Callable[..., Any], + tree: PyTree, + *rests: PyTree, + is_leaf: Optional[Callable[[PyTree], bool]] = None, +) -> PyTree: + """Like :func:`tree_map`, but the provided callable takes an additional key path argument. + + Args: + func: A function that takes ``2 + len(rests)`` arguments, to be applied at the + corresponding leaves of the pytrees. The first positional argument + to ``func`` is the key path of the leaf in question. The second + positional argument is the value of the leaf. + tree: A pytree to be mapped over, with each leaf providing the first positional + argument to function ``func``. + rests: A tuple of pytrees, each of which has the same structure as + ``tree`` or has ``tree`` as a prefix. + is_leaf: An extra leaf predicate function that will be called at each + flattening step. The function should have a single argument with signature + ``is_leaf(node) -> bool``. If it returns :data:`True`, the whole subtree being treated + as a leaf. Otherwise, the default pytree registry will be used to determine a node is a + leaf or not. If the function is not specified, the default pytree registry will be used. + + Returns + A new pytree with the same structure as ``tree`` but with the value at each leaf given by + ``func(keypath, x, *xs)`` where ``keypath`` is the key path at the + corresponding leaf in ``tree``, ``x`` is the value at that leaf, and + ``xs`` is the tuple of values at corresponding nodes in ``rests``. + """ + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") + + +def keystr(kp: KeyPath) -> str: + """Given a key path, return a pretty-printed representation.""" + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") + + +def key_get(obj: Any, kp: KeyPath) -> Any: + """Given an object and a key path, return the value at the key path.""" + raise NotImplementedError("KeyPaths are not yet supported in cxx_pytree.") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_import_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_import_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b7756a6fa62f94d76ad0149a93511aa56c4468d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_import_utils.py @@ -0,0 +1,42 @@ +import functools +import importlib.util + +import torch + + +def _check_module_exists(name: str) -> bool: + r"""Returns if a top-level module with :attr:`name` exists *without** + importing it. This is generally safer than try-catch block around a + `import X`. It avoids third party libraries breaking assumptions of some of + our tests, e.g., setting multiprocessing start method when imported + (see librosa/#747, torchvision/#544). + """ + try: + spec = importlib.util.find_spec(name) + return spec is not None + except ImportError: + return False + + +@functools.lru_cache +def dill_available(): + return ( + _check_module_exists("dill") + # dill fails to import under torchdeploy + and not torch._running_with_deploy() + ) + + +@functools.lru_cache +def import_dill(): + if not dill_available(): + return None + + import dill + + # XXX: By default, dill writes the Pickler dispatch table to inject its + # own logic there. This globally affects the behavior of the standard library + # pickler for any user who transitively depends on this module! + # Undo this extension to avoid altering the behavior of the pickler globally. + dill.extend(use_dill=False) + return dill diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_stats.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_stats.py new file mode 100644 index 0000000000000000000000000000000000000000..5b33f7b8cb025cfc2d0f249fc45a6f25bb1eea26 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_stats.py @@ -0,0 +1,21 @@ +# NOTE! PLEASE KEEP THIS FILE *FREE* OF TORCH DEPS! IT SHOULD BE IMPORTABLE ANYWHERE. +# IF YOU FEEL AN OVERWHELMING URGE TO ADD A TORCH DEP, MAKE A TRAMPOLINE FILE A LA torch._dynamo.utils +# AND SCRUB AWAY TORCH NOTIONS THERE. +import collections +import functools +from typing import OrderedDict + +simple_call_counter: OrderedDict[str, int] = collections.OrderedDict() + +def count_label(label): + prev = simple_call_counter.setdefault(label, 0) + simple_call_counter[label] = prev + 1 + +def count(fn): + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if fn.__qualname__ not in simple_call_counter: + simple_call_counter[fn.__qualname__] = 0 + simple_call_counter[fn.__qualname__] = simple_call_counter[fn.__qualname__] + 1 + return fn(*args, **kwargs) + return wrapper diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/_traceback.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/_traceback.py new file mode 100644 index 0000000000000000000000000000000000000000..fa73b9f41cd66fb21bdb30bdcc100ca9cd648816 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/_traceback.py @@ -0,0 +1,254 @@ +from types import TracebackType +from typing import List, Optional +import tempfile +import traceback +import contextlib +import inspect +import os.path + +# This file contains utilities for ensuring dynamically compile()'d +# code fragments display their line numbers in backtraces. +# +# The constraints: +# +# - We don't have control over the user exception printer (in particular, +# we cannot assume the linecache trick will work, c.f. +# https://stackoverflow.com/q/50515651/23845 ) +# +# - We don't want to create temporary files every time we compile() +# some code; file creation should happen lazily only at exception +# time. Arguably, you *should* be willing to write out your +# generated Python code to file system, but in some situations +# (esp. library code) it would violate user expectation to write +# to the file system, so we try to avoid it. In particular, we'd +# like to keep the files around, so users can open up the files +# mentioned in the trace; if the file is invisible, we want to +# avoid clogging up the filesystem. +# +# If this is not a constraint for you, there is a substantially simpler +# way to implement the functionality in this PR: instead of using +# eval/exec directly, just always write a Python file to filesystem +# and compile that. +# +# - You have control over a context where the compiled code will get +# executed, so that we can interpose while the stack is unwinding +# (otherwise, we have no way to interpose on the exception printing +# process.) +# +# There are two things you have to do to make use of the utilities here: +# +# - When you compile your source code, you must save its string source +# in its f_globals under the magic name "__compile_source__" +# +# - Before running the compiled code, enter the +# report_compile_source_on_error() context manager. + +@contextlib.contextmanager +def report_compile_source_on_error(): + try: + yield + except Exception as exc: + tb = exc.__traceback__ + + # Walk the traceback, looking for frames that have + # source attached + stack = [] + while tb is not None: + filename = tb.tb_frame.f_code.co_filename + source = tb.tb_frame.f_globals.get("__compile_source__") + + if filename == "" and source is not None: + # What black magic are we doing here? Intuitively, what + # we would like to do is overwrite the co_filename on any + # frames that were generated from exec/eval so that they + # point to a temporary file that has the actual line + # information, so Python's default error printer can print + # useful line information on it. + # + # Writing out the temporary file is easy. But overwriting + # co_filename is not! You can't modify the code object + # associated with a frame. You can, however, reconstruct + # a traceback with entirely new frames from scratch, so that's + # what we do. But there's another problem, which is how to + # make the frame? + # + # The black magic is we make a frankenstein frame and code + # object which resembles the original frame/code enough so + # that it will print properly under traceback and the default + # error printer, but IT IS NOT THE ORIGINAL FRAME (you + # couldn't, e.g., execute its code with different variables + # and expect it to work.) + + # Don't delete the temporary file so the user can inspect it + # TODO: This creates a temporary file for every frame, but we + # technically only need one per distinct __compile_source__ + with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=".py") as f: + f.write(source) + # Create a frame. Python doesn't let you construct + # FrameType directly, so just make one with compile + frame = tb.tb_frame + code = compile('__inspect_currentframe()', f.name, 'eval') + code = code.replace(co_name=frame.f_code.co_name) + # Python 3.11 only + if hasattr(frame.f_code, 'co_linetable'): + # We can't copy ALL of the metadata over, because you + # can cause Python to segfault this way. What exactly + # do we need? We need enough information for + # traceback to be able to print the exception + # correctly. Code reading Lib/traceback.py reveals + # that traceback calls code.co_positions() in order to + # get the augmented line/col numbers. Objects/codeobject.c, + # specifically _PyCode_InitAddressRange, reveals that + # this iterator is initialized from co_linetable and + # co_firstfileno. So copy these we must! + code = code.replace( # type: ignore[call-arg] + co_linetable=frame.f_code.co_linetable, # type: ignore[attr-defined] + co_firstlineno=frame.f_code.co_firstlineno, # type: ignore[attr-defined] + ) + fake_frame = eval( + code, + frame.f_globals, + { + **frame.f_locals, + '__inspect_currentframe': inspect.currentframe + } + ) + fake_tb = TracebackType( + None, fake_frame, tb.tb_lasti, tb.tb_lineno + ) + stack.append(fake_tb) + else: + stack.append(tb) + + tb = tb.tb_next + + # Reconstruct the linked list + tb_next = None + for tb in reversed(stack): + tb.tb_next = tb_next + tb_next = tb + + raise exc.with_traceback(tb_next) # noqa: TRY200 + +def shorten_filename(fn, *, base=None): + """Shorten a source filepath, with the assumption that torch/ subdirectories don't need to be shown to user.""" + if base is None: + base = os.path.dirname(os.path.dirname(__file__)) + # Truncate torch/foo.py to foo.py + try: + prefix = os.path.commonpath([fn, base]) + except ValueError: + return fn + else: + return fn[len(prefix) + 1:] + +def format_frame(frame, *, base=None, line=False): + """ + Format a FrameSummary in a short way, without printing full absolute path or code. + + The idea is the result fits on a single line. + """ + extra_line = "" + if line: + extra_line = f"{frame.line} # " + return f"{extra_line}{shorten_filename(frame.filename, base=base)}:{frame.lineno} in {frame.name}" + +def format_traceback_short(tb): + """Format a TracebackType in a short way, printing only the inner-most frame.""" + return format_frame(traceback.extract_tb(tb)[-1]) + +class CapturedTraceback: + __slots__ = ['tb', 'skip'] + + def __init__(self, tb, skip=0): + self.tb = tb + self.skip = skip + + def cleanup(self): + self.tb = None + + def summary(self): + import torch._C._profiler + + if self.tb is None: + # TODO: Maybe indicate that the traceback was elided? + return traceback.StackSummary() + + return _extract_symbolized_tb( + torch._C._profiler.symbolize_tracebacks([self.tb])[0], + self.skip + ) + + def __getstate__(self): + return (None, { + 'tb': None, # TB is not pickleable + 'skip': self.skip, + }) + + @staticmethod + def extract(*, script=False, cpp=False, skip=0): + """ + Like traceback.extract_stack(), but faster (approximately 20x faster); it + is fast enough that you can unconditionally log stacks this way as part of + normal execution. It returns a torch._C._profiler.CapturedTraceback + object that must be formatted specially with format_captured_tb. + + By default, this only reports Python backtraces (like extract_stack). You + can set the script/cpp kwargs to also turn on TorchScript/C++ trace + reporting. + """ + import torch._C._profiler + + if script or cpp: + assert skip == 0, "skip with script/cpp NYI" + + return CapturedTraceback( + torch._C._profiler.gather_traceback(python=True, script=script, cpp=cpp), + # Elide extract() frame if we don't have script/cpp frames. If + # we do have those frames, it doesn't work so force zero. + 0 if script or cpp else skip + 1 + ) + + def format(self): + """ + Formats a single torch._C._profiler.CapturedTraceback into a list of + strings equivalent to the output of traceback.format_list. Note that if + pass it CapturedTraceback with C++ traces, it is better not to use this + function and use the batch formatting API format_captured_tbs to amortize + the cost of symbolization + """ + return traceback.format_list(self.summary()) + + @staticmethod + def format_all(tbs): + """ + Bulk version of CapturedTraceback.format. Returns a list of list of strings. + """ + import torch._C._profiler + + # Directly populate tracebacks that already have cached summaries + rs: List[Optional[List[str]]] = [] + delayed_idxs = [] + for i, tb in enumerate(tbs): + if tb.tb is None: + rs.append([]) + else: + rs.append(None) + delayed_idxs.append(i) + + stbs = torch._C._profiler.symbolize_tracebacks([tbs[i].tb for i in delayed_idxs]) + for i, stb in zip(delayed_idxs, stbs): + rs[i] = traceback.format_list(tbs[i].summary()) + + return rs + + +def _extract_symbolized_tb(tb, skip): + """ + Given a symbolized traceback from symbolize_tracebacks, return a StackSummary object of + pre-processed stack trace entries. + """ + stack = traceback.StackSummary() + for f in reversed(tb[skip:]): + stack.append(traceback.FrameSummary(f['filename'], f['line'], f['name'])) + return stack diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/backend_registration.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/backend_registration.py new file mode 100644 index 0000000000000000000000000000000000000000..aee7964c42589acd01ade950caede06671df5861 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/backend_registration.py @@ -0,0 +1,339 @@ +import torch +from torch._C import _rename_privateuse1_backend, _get_privateuse1_backend_name +from typing import List, Optional, Union + +__all__ = ["rename_privateuse1_backend", "generate_methods_for_privateuse1_backend"] + +# TODO: Should use `torch._C._get_privateuse1_backend_name()` to get +# renamed-backend name for `privateuse1`, but the func will cause an +# error with torch.jit.script, so we use the global variable named +# `_privateuse1_backend_name`. +_privateuse1_backend_name = "privateuseone" + +def rename_privateuse1_backend(backend_name: str) -> None: + r""" + Rename the privateuse1 backend device to make it more convenient to use as a device name within PyTorch APIs. + + The steps are: + + (1) (In C++) implement kernels for various torch operations, and register them + to the PrivateUse1 dispatch key. + (2) (In python) call torch.utils.rename_privateuse1_backend("foo") + + You can now use "foo" as an ordinary device string in python. + + Note: this API can only be called once per process. Attempting to change + the external backend after it's already been set will result in an error. + + Note(AMP): If you want to support AMP on your device, you can register a custom backend module. + The backend must register a custom backend module with ``torch._register_device_module("foo", BackendModule)``. + BackendModule needs to have the following API's: + + (1) ``get_amp_supported_dtype() -> List[torch.dtype]`` + get the supported dtypes on your "foo" device in AMP, maybe the "foo" device supports one more dtype. + + (2) ``is_autocast_enabled() -> bool`` + check the AMP is enabled or not on your "foo" device. + + (3) ``get_autocast_dtype() -> torch.dtype`` + get the supported dtype on your "foo" device in AMP, which is set by ``set_autocast_dtype`` or the + default dtype, and the default dtype is ``torch.float16``. + + (4) ``set_autocast_enabled(bool) -> None`` + enable the AMP or not on your "foo" device. + + (5) ``set_autocast_dtype(dtype) -> None`` + set the supported dtype on your "foo" device in AMP, and the dtype be contained in the dtypes got + from ``get_amp_supported_dtype``. + + Note(random): If you want to support to set seed for your device, BackendModule needs to have the following API's: + + (1) ``_is_in_bad_fork() -> bool`` + Return ``True`` if now it is in bad_fork, else return ``False``. + + (2) ``manual_seed_all(seed int) -> None`` + Sets the seed for generating random numbers for your devices. + + (3) ``device_count() -> int`` + Returns the number of "foo"s available. + + (4) ``get_rng_state(device: Union[int, str, torch.device] = 'foo') -> Tensor`` + Returns a list of ByteTensor representing the random number states of all devices. + + (5) ``set_rng_state(new_state: Tensor, device: Union[int, str, torch.device] = 'foo') -> None`` + Sets the random number generator state of the specified "foo" device. + + And there are some common funcs: + + (1) ``is_available() -> bool`` + Returns a bool indicating if "foo" is currently available. + + (2) ``current_device() -> int`` + Returns the index of a currently selected device. + + For more details, see https://pytorch.org/tutorials/advanced/extend_dispatcher.html#get-a-dispatch-key-for-your-backend + For an existing example, see https://github.com/bdhirsh/pytorch_open_registration_example + + Example:: + + >>> # xdoctest: +SKIP("failing") + >>> torch.utils.rename_privateuse1_backend("foo") + # This will work, assuming that you've implemented the right C++ kernels + # to implement torch.ones. + >>> a = torch.ones(2, device="foo") + + """ + _rename_privateuse1_backend(backend_name) + global _privateuse1_backend_name + _privateuse1_backend_name = backend_name + +def _check_register_once(module, attr): + if hasattr(module, attr): + raise RuntimeError(f"The custom device module of {module} has already been registered with {attr}") + + +def _normalization_device(custom_backend_name: str, device: Optional[Union[int, str, torch.device]] = None) -> int: + def _get_current_device_index(): + _get_device_index = "current_device" + if hasattr(torch, custom_backend_name) and \ + hasattr(getattr(torch, custom_backend_name), _get_device_index): + return getattr(getattr(torch, custom_backend_name), _get_device_index)() + else: + # The default device index is 0. + return 0 + + if device is None: + return _get_current_device_index() + # if isinstance(device, str), this means that the parameter passed in is in the string format "foo:0" + # convert str object to torch.device object, and then process it uniformly + elif isinstance(device, str): + device = torch.device(device) + + # variable devcie can only be torch.device type or int type + if isinstance(device, torch.device): + if device.type != custom_backend_name: + raise RuntimeError(f"Invalid device, must be {custom_backend_name} device") + elif device.index is None: + device_idx = _get_current_device_index() + else: + device_idx = device.index + # if isinstance(device, int), we can take the index number directly + else: + device_idx = device + return device_idx + + +def _generate_tensor_methods_for_privateuse1_backend(custom_backend_name: str) -> None: + @property # type: ignore[misc] + def wrap_tensor_backend(self: torch.Tensor) -> bool: + return self.device.type == custom_backend_name + + _check_register_once(torch.Tensor, f'is_{custom_backend_name}') + setattr(torch.Tensor, f'is_{custom_backend_name}', wrap_tensor_backend) + + def wrap_tensor_to(self: torch.Tensor, device: Optional[Union[int, torch.device]] = None, non_blocking=False, + **kwargs) -> torch.Tensor: + r"""Perform Tensor device conversion. Call the to operator implementation. + + .. note:: + If the ``self`` Tensor already + has the correct :class:`torch.device`, then ``self`` is returned. + Otherwise, the returned tensor is a copy of ``self`` with the desired :class:`torch.device`. + + Args: + device (int, optional): if specified, all parameters will be copied to that device + non_blocking (bool): If ``True`` and the source is in pinned memory, + the copy will be asynchronous with respect to the host. Otherwise, + the argument has no effect. + **kwargs (dict): For compatibility, may contain the key ``memory_format`` argument. + """ + device_idx = _normalization_device(custom_backend_name, device) + return self.to(device=torch.device(f'{custom_backend_name}:{device_idx}'), non_blocking=non_blocking, **kwargs) + + _check_register_once(torch.Tensor, custom_backend_name) + setattr(torch.Tensor, custom_backend_name, wrap_tensor_to) + + +def _generate_module_methods_for_privateuse1_backend(custom_backend_name: str) -> None: + # Generate Module attributes and methods depends on Tensor methods, + # so we need to check whether Tensor methods is already registered. + if not hasattr(torch.Tensor, custom_backend_name): + raise RuntimeError( + f"Can not automatically generate {custom_backend_name}() method for torch.nn.Module." + f"Because torch.Tensor doesn't has the method {custom_backend_name}()." + f"For this error, you can try setting for_tensor=True.") + + def wrap_module_to(self: torch.nn.modules.module.T, + device: Optional[Union[int, torch.device]] = None) -> torch.nn.modules.module.T: + r"""Move all model parameters and buffers to the custom device. + + This also makes associated parameters and buffers different objects. So + it should be called before constructing optimizer if the module will + live on device while being optimized. + + .. note:: + This method modifies the module in-place. + + Args: + device (int, optional): if specified, all parameters will be copied to that device + """ + return self._apply(lambda t: getattr(t, custom_backend_name)(device)) + + _check_register_once(torch.nn.Module, custom_backend_name) + setattr(torch.nn.Module, custom_backend_name, wrap_module_to) + + +def _generate_storage_methods_for_privateuse1_backend(custom_backend_name: str, + unsupported_dtype: Optional[List[torch.dtype]] = None) -> None: + # Attribute is registered in the _StorageBase class + # and UntypedStorage obtains through inheritance. + @property # type: ignore[misc] + def wrap_storage_backend(self: torch.storage._StorageBase) -> bool: + r"""Return the internal :class:`torch.UntypedStorage`.""" + return self.device.type == custom_backend_name + + _check_register_once(torch.storage._StorageBase, f'is_{custom_backend_name}') + setattr(torch.storage._StorageBase, f'is_{custom_backend_name}', wrap_storage_backend) + + def wrap_storage_to(self, device=None, non_blocking=False): + r"""Return a copy of this object in custom device memory. + + If this object is already in device memory and on the correct device, then + no copy is performed and the original object is returned. + + Args: + device (int): The destination device id. Defaults to the current device. + non_blocking (bool): If ``True`` and the source is in pinned memory, + the copy will be asynchronous with respect to the host. Otherwise, + the argument has no effect. + """ + # There should be a judgment related to storage device and a judgment related to storage type, + # but it depends on the extended function, so this part is temporarily omitted in the automatic generation. + device_idx = _normalization_device(custom_backend_name, device) + + if getattr(self, f'is_{custom_backend_name}'): + # storage has already on expected device. + if self.get_device() == device_idx: + return self + # For sparse storage, custom need to extend the implementation by themselves. + if self.is_sparse: + raise RuntimeError(f"Can not support a sparse storage move to {custom_backend_name} backend") + # create untyped_storage and copy data + untyped_storage = torch.UntypedStorage( + self.size(), device=torch.device(f'{custom_backend_name}:{device_idx}') + ) + untyped_storage.copy_(self, non_blocking) + return untyped_storage + + _check_register_once(torch.storage._StorageBase, custom_backend_name) + setattr(torch.storage._StorageBase, custom_backend_name, wrap_storage_to) + + # Register the corresponding attribute for the TypedStorage class. + # When the TypedStorage class is removed, the registration is also removed. + + @property # type: ignore[misc] + def wrap_typed_storage_backend(self: torch.storage.TypedStorage) -> bool: + torch.storage._warn_typed_storage_removal() + return self._untyped_storage.device.type == custom_backend_name + + _check_register_once(torch.TypedStorage, f'is_{custom_backend_name}') + setattr(torch.storage.TypedStorage, f'is_{custom_backend_name}', wrap_typed_storage_backend) + + def wrap_typed_storage_to(self: torch.storage.TypedStorage, + device=None, non_blocking=False, **kwargs) -> torch.storage.TypedStorage: + torch.storage._warn_typed_storage_removal() + if unsupported_dtype and self.dtype in unsupported_dtype: + raise RuntimeError(f"Cannot create {custom_backend_name} storage " + f"as {self.dtype} dtype is not supported by this backend") + custom_backend_storage: torch.UntypedStorage = getattr( + self._untyped_storage, custom_backend_name)(device, non_blocking, **kwargs) + return self._new_wrapped_storage(custom_backend_storage) + + _check_register_once(torch.TypedStorage, custom_backend_name) + setattr(torch.TypedStorage, custom_backend_name, wrap_typed_storage_to) + + +def generate_methods_for_privateuse1_backend(for_tensor: bool = True, for_module: bool = True, + for_storage: bool = False, + unsupported_dtype: Optional[List[torch.dtype]] = None) -> None: + r""" + Automatically generate attributes and methods for the custom backend after rename privateuse1 backend. + + In the default scenario, storage-related methods will not be generated automatically. + + When you implement kernels for various torch operations, and register them to the PrivateUse1 dispatch key. + And call the function torch.rename_privateuse1_backend("foo") to rename your backend name. + At this point, you can easily register specific methods and attributes by calling this function. + Just like torch.Tensor.foo(), torch.Tensor.is_foo, torch.Storage.foo(), torch.Storage.is_foo. + + Note: We recommend you use generic functions (check devices are equal or to(device=)). + We provide these methods for convenience only and they will be "monkey patched" onto the objects + and so will not be properly typed. For Storage methods generate, if you need to support sparse data storage, + you need to extend the implementation yourself. + + Args: + for_tensor (bool): whether register related methods for torch.Tensor class. + for_module (bool): whether register related methods for torch.nn.Module class. + for_storage (bool): whether register related methods for torch.Storage class. + unsupported_dtype (List[torch.dtype]): takes effect only when the storage method needs to be generated, + indicating that the storage does not support the torch.dtype type. + + Example:: + + >>> # xdoctest: +SKIP("failing") + >>> torch.utils.rename_privateuse1_backend("foo") + >>> torch.utils.generate_methods_for_privateuse1_backend() + # Then automatically generate backend-related attributes and methods. + >>> a = torch.tensor(2).foo() + >>> a.is_foo + >>> hasattr(torch.nn.Module, 'foo') + """ + custom_backend_name = _get_privateuse1_backend_name() + + if for_tensor: + _generate_tensor_methods_for_privateuse1_backend(custom_backend_name) + + if for_module: + _generate_module_methods_for_privateuse1_backend(custom_backend_name) + + if for_storage: + _generate_storage_methods_for_privateuse1_backend(custom_backend_name, unsupported_dtype) + +def _get_custom_mod_func(func_name: str): + r""" + Return the func named `func_name` defined in custom device module. If not defined, + return `None`. And the func is registered with `torch.utils.rename_privateuse1_backend('foo')` + and `torch._register_device_module('foo', BackendModule)`. + If the custom device module or the func is not defined, it will give warning or error message. + Args: + func_name (str): return the callable func named func_name defined in custom device module. + Example:: + class DummyfooModule: + @staticmethod + def is_available(): + return True + @staticmethod + def func_name(*args, **kwargs): + .... + torch.utils.rename_privateuse1_backend("foo") + torch._register_device_module("foo", DummyfooModule) + foo_is_available_func = torch.utils.backend_registration._get_custom_mod_func("is_available") + if foo_is_available_func: + foo_is_available = foo_is_available_func() + func_ = torch.utils.backend_registration._get_custom_mod_func("func_name") + if func_: + result = func_(*args, **kwargs) + Attention: This function is not meant to be used directly by users, which is why + it is marked as private. It is a convenience function for backend implementers to + more easily call the hooks into their backend extensions. + """ + assert isinstance(func_name, str), f"func_name must be `str`, but got `{type(func_name)}`." + backend_name = _get_privateuse1_backend_name() + custom_device_mod = getattr(torch, backend_name, None) # type: ignore[arg-type] + function = getattr(custom_device_mod, func_name, None) # type: ignore[arg-type] + if custom_device_mod is None or function is None: + message = f'Try to call torch.{backend_name}.{func_name}. The backend must register a custom backend ' + message += f"module with `torch._register_device_module('{backend_name}', BackendModule)`. And " + message += f"BackendModule needs to have the following API's:\n `{func_name}(*args, **kwargs)`. \n" + raise RuntimeError(message) + return function diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/checkpoint.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..aa2c2c513c24e979a09a86772a9d16e3e07d3565 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/checkpoint.py @@ -0,0 +1,1439 @@ +import contextlib +import platform +import uuid +import warnings +import weakref +from collections import defaultdict +from itertools import count +from typing import ( + Any, + Callable, + ContextManager, + DefaultDict, + Dict, + Iterable, + List, + Optional, + Tuple, +) +from weakref import ReferenceType + +import torch +import torch.fx.traceback as fx_traceback +from torch._functorch._aot_autograd.functional_utils import is_fun +from torch.utils._pytree import tree_map +from torch.testing._internal.logging_tensor import capture_logs, LoggingTensorMode +from torch.utils._python_dispatch import TorchDispatchMode + +__all__ = [ + "checkpoint", + "checkpoint_sequential", + "CheckpointError", + "CheckpointFunction", + "check_backward_validity", + "detach_variable", + "get_device_states", + "set_device_states", + "noop_context_fn", + "set_checkpoint_early_stop", + "DefaultDeviceType", + "set_checkpoint_debug_enabled", +] + +_DEFAULT_DETERMINISM_MODE = "default" + +_checkpoint_debug_enabled: Optional[bool] = None + + +@contextlib.contextmanager +def set_checkpoint_debug_enabled(enabled: Optional[bool]): + """ + Context manager that sets whether checkpoint should print additional debug + information when running. See the ``debug`` flag for + :func:`~torch.utils.checkpoint.checkpoint` for more information. Note that + when set, this context manager overrides the value of ``debug`` passed to + checkpoint. To defer to the local setting, pass ``None`` to this context. + + Args: + enabled (bool): Whether checkpoint should print debug information. + Default is 'None'. + """ + global _checkpoint_debug_enabled + try: + prev = _checkpoint_debug_enabled + _checkpoint_debug_enabled = enabled + yield + finally: + _checkpoint_debug_enabled = prev + + +def detach_variable(inputs: Tuple[Any, ...]) -> Tuple[torch.Tensor, ...]: + if isinstance(inputs, tuple): + out = [] + for inp in inputs: + if not isinstance(inp, torch.Tensor): + out.append(inp) + continue + + x = inp.detach() + x.requires_grad = inp.requires_grad + out.append(x) + return tuple(out) + else: + raise RuntimeError( + "Only tuple of tensors is supported. Got Unsupported input type: ", + type(inputs).__name__, + ) + + +def check_backward_validity(inputs: Iterable[Any]) -> None: + if not any(inp.requires_grad for inp in inputs if isinstance(inp, torch.Tensor)): + warnings.warn( + "None of the inputs have requires_grad=True. Gradients will be None" + ) + + +def _get_device_module(device="cuda"): + device_module = getattr(torch, device) + return device_module + + +class DefaultDeviceType: + r""" + A class that manages the default device type for checkpointing. + + If no non-CPU tensors are present, the default device type will + be used. The default value is 'cuda'. The device type is used in + the checkpointing process when determining which device states + to save and restore for recomputation. + """ + + _default_device_type = "cuda" + + @staticmethod + def set_device_type(device: str = "cuda"): + """ + Set the default device type for checkpointing. + + Args: + device (str): The device type to be set as default. Default is 'cuda'. + """ + DefaultDeviceType._default_device_type = device + + @staticmethod + def get_device_type() -> str: + """ + Get the current default device type for checkpointing. + + Returns: + str: The current default device type. + """ + return DefaultDeviceType._default_device_type + + +def _infer_device_type(*args): + device_types = list( + { + arg.device.type + for arg in args + if isinstance(arg, torch.Tensor) and not arg.device.type == "cpu" + } + ) + if len(device_types) > 1: + warnings.warn( + "Tensor arguments, excluding CPU tensors, are detected on at least two types of devices. " + "Device state will only be saved for devices of a single device type, and the remaining " + "devices will be ignored. Consequently, if any checkpointed functions involve randomness, " + "this may result in incorrect gradients. (Note that if CUDA devices are among the devices " + "detected, it will be prioritized; otherwise, the first device encountered will be selected.)" + ) + if len(device_types) == 0: + return DefaultDeviceType.get_device_type() + elif "cuda" in device_types: + return "cuda" + else: + return device_types[0] + + +# We can't know if the run_fn will internally move some args to different devices, +# which would require logic to preserve rng states for those devices as well. +# We could paranoically stash and restore ALL the rng states for all visible devices, +# but that seems very wasteful for most cases. Compromise: Stash the RNG state for +# the device of all Tensor args. +# +# To consider: maybe get_device_states and set_device_states should reside in torch/random.py? +def get_device_states(*args) -> Tuple[List[int], List[torch.Tensor]]: + # This will not error out if "arg" is a CPU tensor or a non-tensor type because + # the conditionals short-circuit. + fwd_device_ids = list( + { + arg.get_device() + for arg in args + if isinstance(arg, torch.Tensor) and not arg.device.type == "cpu" + } + ) + + fwd_device_states = [] + device_module = _get_device_module(_infer_device_type(*args)) + + for device_id in fwd_device_ids: + with device_module.device(device_id): + fwd_device_states.append(device_module.get_rng_state()) + + return fwd_device_ids, fwd_device_states + + +def set_device_states(devices, states) -> None: + device_module = _get_device_module(_infer_device_type(*states)) + for device, state in zip(devices, states): + with device_module.device(device): + device_module.set_rng_state(state) + + +def _get_autocast_kwargs(device="cuda"): + if device == "cuda": + device_autocast_kwargs = { + "enabled": torch.is_autocast_enabled(), + "dtype": torch.get_autocast_gpu_dtype(), + "cache_enabled": torch.is_autocast_cache_enabled(), + } + elif _supports_autocast(device): + device_module = _get_device_module(device) + device_autocast_kwargs = { + "enabled": device_module.is_autocast_enabled(), + "dtype": device_module.get_autocast_dtype(), + "cache_enabled": torch.is_autocast_cache_enabled(), + } + else: + device_autocast_kwargs = None + + cpu_autocast_kwargs = { + "enabled": torch.is_autocast_cpu_enabled(), + "dtype": torch.get_autocast_cpu_dtype(), + "cache_enabled": torch.is_autocast_cache_enabled(), + } + + return device_autocast_kwargs, cpu_autocast_kwargs + +def _supports_autocast(device): + device_module = _get_device_module(device) + return device == "cuda" or (hasattr(device_module, "is_autocast_enabled") + and hasattr(device_module, "get_autocast_dtype")) + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, preserve_rng_state, *args): + check_backward_validity(args) + ctx.run_function = run_function + ctx.preserve_rng_state = preserve_rng_state + # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu. + ctx.device = _infer_device_type(*args) + ctx.device_autocast_kwargs, ctx.cpu_autocast_kwargs = _get_autocast_kwargs( + ctx.device + ) + if preserve_rng_state: + ctx.fwd_cpu_state = torch.get_rng_state() + # Don't eagerly initialize the cuda context by accident. + # (If the user intends that the context is initialized later, within their + # run_function, we SHOULD actually stash the cuda state here. Unfortunately, + # we have no way to anticipate this will happen before we run the function.) + ctx.had_device_in_fwd = False + device_module = _get_device_module(ctx.device) + if getattr(device_module, "_initialized", False): + ctx.had_device_in_fwd = True + ctx.fwd_devices, ctx.fwd_device_states = get_device_states(*args) + + # Save non-tensor inputs in ctx, keep a placeholder None for tensors + # to be filled out during the backward. + ctx.inputs = [] + ctx.tensor_indices = [] + tensor_inputs = [] + for i, arg in enumerate(args): + if torch.is_tensor(arg): + tensor_inputs.append(arg) + ctx.tensor_indices.append(i) + ctx.inputs.append(None) + else: + ctx.inputs.append(arg) + + ctx.save_for_backward(*tensor_inputs) + + with torch.no_grad(): + outputs = run_function(*args) + return outputs + + @staticmethod + def backward(ctx, *args): + if not torch.autograd._is_checkpoint_valid(): + raise RuntimeError( + "Checkpointing is not compatible with .grad() or when an `inputs` parameter" + " is passed to .backward(). Please use .backward() and do not pass its `inputs`" + " argument." + ) + # Copy the list to avoid modifying original list. + inputs = list(ctx.inputs) + tensor_indices = ctx.tensor_indices + tensors = ctx.saved_tensors + device_module = _get_device_module(ctx.device) + + # Fill in inputs with appropriate saved tensors. + for i, idx in enumerate(tensor_indices): + inputs[idx] = tensors[i] + + # Stash the surrounding rng state, and mimic the state that was + # present at this time during forward. Restore the surrounding state + # when we're done. + rng_devices = [] + if ctx.preserve_rng_state and ctx.had_device_in_fwd: + rng_devices = ctx.fwd_devices + with torch.random.fork_rng( + devices=rng_devices, enabled=ctx.preserve_rng_state, device_type=ctx.device + ): + if ctx.preserve_rng_state: + torch.set_rng_state(ctx.fwd_cpu_state) + if ctx.had_device_in_fwd: + set_device_states(ctx.fwd_devices, ctx.fwd_device_states) + detached_inputs = detach_variable(tuple(inputs)) + + device_autocast_ctx = device_module.amp.autocast( + **ctx.device_autocast_kwargs + ) if _supports_autocast(ctx.device) else contextlib.nullcontext() + with torch.enable_grad(), device_autocast_ctx, \ + torch.cpu.amp.autocast(**ctx.cpu_autocast_kwargs): + outputs = ctx.run_function(*detached_inputs) + + if isinstance(outputs, torch.Tensor): + outputs = (outputs,) + + # run backward() with only tensor that requires grad + outputs_with_grad = [] + args_with_grad = [] + for i in range(len(outputs)): + if torch.is_tensor(outputs[i]) and outputs[i].requires_grad: + outputs_with_grad.append(outputs[i]) + args_with_grad.append(args[i]) + if len(outputs_with_grad) == 0: + raise RuntimeError( + "none of output has requires_grad=True," + " this checkpoint() is not necessary" + ) + torch.autograd.backward(outputs_with_grad, args_with_grad) + grads = tuple( + inp.grad if isinstance(inp, torch.Tensor) else None + for inp in detached_inputs + ) + + return (None, None) + grads + + +def noop_context_fn(): + return contextlib.nullcontext(), contextlib.nullcontext() + +# TorchDynamo does not step inside utils.checkpoint function. The flow +# looks likes this +# 1) TorchDynamo tries to wrap utils.checkpoint in a HigherOrderOp by +# speculatively checking if the forward function is safe to trace. +# 2) If yes, then Dynamo-generated Fx graph has the wrapped higher +# order op. As a result, TorchDynamo does not look inside utils.checkpoint. +# 3) If not, then TorchDynamo falls back to eager by performing a graph +# break. And here, the following disable wrapper ensures that +# TorchDynamo does not trigger again on the frames created by +# utils.checkpoint innards. +@torch._disable_dynamo +def checkpoint( + function, + *args, + use_reentrant: Optional[bool] = None, + context_fn: Callable[[], Tuple[ContextManager, ContextManager]] = noop_context_fn, + determinism_check: str = _DEFAULT_DETERMINISM_MODE, + debug: bool = False, + **kwargs +): + r"""Checkpoint a model or part of the model. + + Activation checkpointing is a technique that trades compute for memory. + Instead of keeping tensors needed for backward alive until they are used in + gradient computation during backward, forward computation in checkpointed + regions omits saving tensors for backward and recomputes them during the + backward pass. Activation checkpointing can be applied to any part of a + model. + + There are currently two checkpointing implementations available, determined + by the :attr:`use_reentrant` parameter. It is recommended that you use + ``use_reentrant=False``. Please refer the note below for a discussion of + their differences. + + .. warning:: + + If the :attr:`function` invocation during the backward pass differs + from the forward pass, e.g., due to a global variable, the checkpointed + version may not be equivalent, potentially causing an + error being raised or leading to silently incorrect gradients. + + .. warning:: + + The ``use_reentrant`` parameter should be passed explicitly. In version + 2.4 we will raise an exception if ``use_reentrant`` is not passed. + If you are using the ``use_reentrant=True`` variant, please refer to the + note below for important considerations and potential limitations. + + .. note:: + + The reentrant variant of checkpoint (``use_reentrant=True``) and + the non-reentrant variant of checkpoint (``use_reentrant=False``) + differ in the following ways: + + * Non-reentrant checkpoint stops recomputation as soon as all needed + intermediate activations have been recomputed. This feature is enabled + by default, but can be disabled with :func:`set_checkpoint_early_stop`. + Reentrant checkpoint always recomputes :attr:`function` in its + entirety during the backward pass. + + * The reentrant variant does not record the autograd graph during the + forward pass, as it runs with the forward pass under + :func:`torch.no_grad`. The non-reentrant version does record the + autograd graph, allowing one to perform backward on the graph within + checkpointed regions. + + * The reentrant checkpoint only supports the + :func:`torch.autograd.backward` API for the backward pass without its + `inputs` argument, while the non-reentrant version supports all ways + of performing the backward pass. + + * At least one input and output must have ``requires_grad=True`` for the + reentrant variant. If this condition is unmet, the checkpointed part + of the model will not have gradients. The non-reentrant version does + not have this requirement. + + * The reentrant version does not consider tensors in nested structures + (e.g., custom objects, lists, dicts, etc) as participating in + autograd, while the non-reentrant version does. + + * The reentrant checkpoint does not support checkpointed regions with + detached tensors from the computational graph, whereas the + non-reentrant version does. For the reentrant variant, if the + checkpointed segment contains tensors detached using ``detach()`` or + with :func:`torch.no_grad`, the backward pass will raise an error. + This is because ``checkpoint`` makes all the outputs require gradients + and this causes issues when a tensor is defined to have no gradient in + the model. To avoid this, detach the tensors outside of the + ``checkpoint`` function. + + Args: + function: describes what to run in the forward pass of the model or + part of the model. It should also know how to handle the inputs + passed as the tuple. For example, in LSTM, if user passes + ``(activation, hidden)``, :attr:`function` should correctly use the + first input as ``activation`` and the second input as ``hidden`` + preserve_rng_state(bool, optional): Omit stashing and restoring + the RNG state during each checkpoint. Note that under torch.compile, + this flag doesn't take effect and we always preserve RNG state. + Default: ``True`` + use_reentrant(bool): + specify whether to use the activation checkpoint variant that + requires reentrant autograd. This parameter should be passed + explicitly. In version 2.4 we will raise an exception if + ``use_reentrant`` is not passed. If ``use_reentrant=False``, + ``checkpoint`` will use an implementation that does not require + reentrant autograd. This allows ``checkpoint`` to support additional + functionality, such as working as expected with + ``torch.autograd.grad`` and support for keyword arguments input into + the checkpointed function. + context_fn(Callable, optional): A callable returning a tuple of two + context managers. The function and its recomputation will be run + under the first and second context managers respectively. + This argument is only supported if ``use_reentrant=False``. + determinism_check(str, optional): A string specifying the determinism + check to perform. By default it is set to ``"default"`` which + compares the shapes, dtypes, and devices of the recomputed tensors + against those the saved tensors. To turn off this check, specify + ``"none"``. Currently these are the only two supported values. + Please open an issue if you would like to see more determinism + checks. This argument is only supported if ``use_reentrant=False``, + if ``use_reentrant=True``, the determinism check is always disabled. + debug(bool, optional): If ``True``, error messages will also include + a trace of the operators ran during the original forward computation + as well as the recomputation. This argument is only supported if + ``use_reentrant=False``. + args: tuple containing inputs to the :attr:`function` + + Returns: + Output of running :attr:`function` on :attr:`*args` + """ + if use_reentrant is None: + warnings.warn( + "torch.utils.checkpoint: the use_reentrant parameter should be " + "passed explicitly. In version 2.4 we will raise an exception " + "if use_reentrant is not passed. use_reentrant=False is " + "recommended, but if you need to preserve the current default " + "behavior, you can pass use_reentrant=True. Refer to docs for more " + "details on the differences between the two variants." + ) + use_reentrant = True + + # Hack to mix *args with **kwargs in a python 2.7-compliant way + preserve = kwargs.pop("preserve_rng_state", True) + if kwargs and use_reentrant: + raise ValueError( + "Unexpected keyword arguments: " + ",".join(arg for arg in kwargs) + ) + + if use_reentrant: + if context_fn is not noop_context_fn or debug is not False: + raise ValueError( + "Passing `context_fn` or `debug` is only supported when " + "use_reentrant=False." + ) + return CheckpointFunction.apply(function, preserve, *args) + else: + gen = _checkpoint_without_reentrant_generator( + function, preserve, context_fn, determinism_check, debug, *args, **kwargs + ) + # Runs pre-forward logic + next(gen) + ret = function(*args, **kwargs) + # Runs post-forward logic + try: + next(gen) + except StopIteration: + return ret + + +def checkpoint_sequential(functions, segments, input, use_reentrant=None, **kwargs): + r"""Checkpoint a sequential model to save memory. + + Sequential models execute a list of modules/functions in order + (sequentially). Therefore, we can divide such a model in various segments + and checkpoint each segment. All segments except the last will not store + the intermediate activations. The inputs of each checkpointed segment will + be saved for re-running the segment in the backward pass. + + .. warning:: + The ``use_reentrant`` parameter should be passed explicitly. In version + 2.4 we will raise an exception if ``use_reentrant`` is not passed. + If you are using the ``use_reentrant=True` variant, please see + :func:`~torch.utils.checkpoint.checkpoint` for + the important considerations and limitations of this variant. It is + recommended that you use ``use_reentrant=False``. + + .. warning: + Since PyTorch 1.4, it allows only one Tensor as the input and + intermediate outputs, just like :class:`torch.nn.Sequential`. + + Args: + functions: A :class:`torch.nn.Sequential` or the list of modules or + functions (comprising the model) to run sequentially. + segments: Number of chunks to create in the model + input: A Tensor that is input to :attr:`functions` + preserve_rng_state(bool, optional): Omit stashing and restoring + the RNG state during each checkpoint. + Default: ``True`` + use_reentrant(bool): + specify whether to use the activation checkpoint variant that + requires reentrant autograd. This parameter should be passed + explicitly. In version 2.4 we will raise an exception if + ``use_reentrant`` is not passed. If ``use_reentrant=False``, + ``checkpoint`` will use an implementation that does not require + reentrant autograd. This allows ``checkpoint`` to support additional + functionality, such as working as expected with + ``torch.autograd.grad`` and support for keyword arguments input into + the checkpointed function. + + Returns: + Output of running :attr:`functions` sequentially on :attr:`*inputs` + + Example: + >>> # xdoctest: +SKIP("stub") + >>> model = nn.Sequential(...) + >>> input_var = checkpoint_sequential(model, chunks, input_var) + """ + if use_reentrant is None: + warnings.warn( + "torch.utils.checkpoint.checkpoint_sequential: the use_reentrant " + "parameter should be passed explicitly. " + "In version 2.4 we will raise an exception if use_reentrant " + "is not passed. use_reentrant=False is " + "recommended, but if you need to preserve the current default " + "behavior, you can pass use_reentrant=True. Refer to docs for more " + "details on the differences between the two variants." + ) + use_reentrant = True + + # Hack for keyword-only parameter in a python 2.7-compliant way + preserve = kwargs.pop("preserve_rng_state", True) + if kwargs: + raise ValueError( + "Unexpected keyword arguments: " + ",".join(arg for arg in kwargs) + ) + + def run_function(start, end, functions): + def forward(input): + for j in range(start, end + 1): + input = functions[j](input) + return input + + return forward + + if isinstance(functions, torch.nn.Sequential): + functions = list(functions.children()) + + segment_size = len(functions) // segments + # the last chunk has to be non-volatile + end = -1 + for start in range(0, segment_size * (segments - 1), segment_size): + end = start + segment_size - 1 + input = checkpoint( + run_function(start, end, functions), + input, + use_reentrant=use_reentrant, + preserve_rng_state=preserve, + ) + return run_function(end + 1, len(functions) - 1, functions)(input) + + +def _internal_assert(cond): + if not cond: + raise AssertionError( + "Something went unexpectedly wrong in activation checkpoint. " + "Please report this bug by filing an issue to PyTorch." + ) + + +# NOTE [ Nestable Checkpoint ] +# +# The semantics of nested checkpoint can be defined by two basic rules. +# Following the two rules leads to an important implication that is central +# to motivating the design. +# +# Rule 1. Saved tensors are managed by inner-most checkpoint only and hidden +# from any outer layers of checkpoint. +# +# Rule 2. The inputs of inner checkpoints are treated as tensors saved to its +# parent checkpoint. +# +# Implication: To recompute any given saved tensor, we need to recompute all of +# the checkpoints wrapping it. +# +# Why is this implied? To unpack a saved tensor X during backward we need to +# recompute the inner-most checkpoint (#1), and in order to recompute that +# checkpoint I need to have its inputs, which are managed by that checkpoint's +# parent (#2), which thus also needs to be recomputed first. Continue this line +# of reasoning and we realize that in order to unpack X, all checkpoints that +# were active at the time X was saved need to be recomputed. (unless we have +# already done so in that backward for some other saved tensor). +# +# In practice, we use a noop autograd Function to save inputs as saved tensors. +# During unpack calling ctx.saved_tensor triggers the parent checkpoint to +# recompute. +# +# Rule 3. We should start recomputation as if there are no checkpoints currently +# active. Checkpoints encountered during recomputation are still +# respected. +# +# When we start recomputation, we push the saved variable hook meant for +# recomputation on the stack. See examples in Rule 6 for more context. +# +# * * * * +# +# Beyond the basic semantics specific to nested checkpoint, we impose several +# more constraints that may apply to checkpointing in general. +# +# Rule 4. Lifetime of recomputed tensors +# +# Recomputed tensors are considered specific to particular invocations +# of backward and are always cleared immediately as they are unpacked +# Particularly, we require this to happen even if retain_graph=True. +# +# [ Implementation details of Rule 4 ] +# +# If we were okay with recomputed tensors staying alive after backward is run +# with retain_graph=True, we would store recomputed variables as the values of a +# WeakKeyDictionary and pack strong references to the keys, so that as we +# backward, those packed keys would be cleared as long as retain_graph=False. +# Clearing the packed key clears the corresponding entry in the WKD. +# +# If we wish recomputed variables to be immediately cleared as we unpack them in +# the retain_graph=True case, we cannot rely on the packed keys to be cleared by +# backward automatically. Instead of packing the strong reference to the key +# directly, we pack a container object, which we manually clear as we unpack. +# +# An important detail is that if a second backward happens, the second +# recomputation needs to reset the container with a newly created key. +# +# Rule 5. Stop recomputation as soon as we've recomputed the saved tensors we +# know we need. +# +# [ Implementation details of Rule 5 ] +# +# During recomputation, raise an exception if the number of recomputed tensors +# matches the number of tensors that we expected to recompute. We wrap the +# recomputation call with a try-catch to catch this specific exception. See +# Rule #6 below for some examples. +# +# Rule 6. We support doing backward inside checkpoint context +# +# [ retain_graph is True] +# +# def fn(x): +# y = x.sin() +# z = y.cos() +# gx, = torch.autograd.grad(z, x, retains_grad=True) +# return gx, z +# +# out = checkpoint(fn)(inp) +# out.backward() +# +# Because z is saved by cos while checkpoint is enabled, it would not be +# actually saved, and so the .grad() call inside must trigger a recomputation. +# +# During recomputation the "inner pack hook" has two responsibilities: +# +# 1) As usual, populating the WeakKeyDictionary storing recomputed tensors +# 2) Pack the actual tensor (detached) so that one may perform backward on the +# recomputed graph. The tensors saved to this graph will live until the end +# of recomputation, or die earlier if someone performs backward with +# retain_graph=False. +# +# More generally performing backward on the recomputed graph occurs in the +# following cases: +# - If backward is performed inside forward, +# - During the original forward IF early-stop is disabled +# - During the original backward +# - If there are multiple .grad()/.backward() calls, we would perform backward +# on the recomputed graph even if early-stop is enabled (see the example below) +# +# [ retain_graph is False ] +# +# The example below shows what happens if during recomputation we find that some +# of the tensors we are trying to recompute have already been cleared. +# +# Spoiler: we don't do anything special, we just skip over them! +# +# def fn(x): +# y = x.sin() # (1) +# z = y.cos() # (2) +# gx, = torch.autograd.grad(z, x) # (3) +# return x.cos() * gx # (4) +# +# out = checkpoint(fn)(inp) +# out.backward() # (5) +# +# 1, 2. Don't save x and y since we are inside a checkpoint. +# 3. Trigger a recompute of fn since x and y weren't saved. +# And depending on whether early stop is enabled, either stop at (2) or +# continue running the function. +# Because we are running backward with retain_graph=False, we clear x and y's +# holders. +# 4. Don't save x since we are inside a checkpoint. +# 5. Calling backward triggers another recompute of fn. During recompute, we see +# that x and y have already been cleared in the original graph as indicated +# by holder=None. We skip over them. We still save x at (4) (since its holder +# is still alive.) + +_enable_checkpoint_early_stop = True + + +@contextlib.contextmanager +def set_checkpoint_early_stop(enable: bool): + """Context manager that sets whether checkpoint should stop recomputation early. + + By default, non-reentrant checkpoint stops recomputation as soon as it + has computed all needed Tensors. This context manager can be used to disable + that feature if it is problematic for your specific application. + + This context manager only needs to be active when forward is run. It does + not need to be active during backward. + + Example:: + + >>> # xdoctest: +SKIP(failing) + >>> message = "saved tensors default hooks are disabled" + >>> with set_checkpoint_early_stop(False): + ... # Any checkpoint under this context manager will respect this + ... # context manager, even if its backward is performed outside. + ... out = checkpoint(fn, inputs) + ... + >>> out.backward() + """ + global _enable_checkpoint_early_stop + try: + prev = _enable_checkpoint_early_stop + _enable_checkpoint_early_stop = enable + yield + finally: + _enable_checkpoint_early_stop = prev + + +class _Handle: + pass + + +class _Holder: + def __init__(self): + self.handles: Dict[int, Optional[_Handle]] = dict() + + +class _NoopSaveInputs(torch.autograd.Function): + @staticmethod + def forward(*args): + return torch.empty((0,)) + + @staticmethod + def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: + # Only tensors can be saved with ctx.save_for_backward, everything else + # is captured by get_args, which is saved directly on ctx + tensor_indices, tensors = zip( + *[(i, o) for i, o in enumerate(inputs) if isinstance(o, torch.Tensor)] + ) + idx2saved_idx = {b: a for a, b in enumerate(tensor_indices)} + # args but with tensors replaced with None as placeholders + args = [None if isinstance(o, torch.Tensor) else o for o in inputs] + + def get_args(saved_tensors): + # restore the placeholders with the original tensors grabbed from + # ctx.saved_tensors (which may be saved on a parent checkpoint if + # this checkpoint is nested, and that would trigger a recursive + # unpack!) + ret = [ + saved_tensors[idx2saved_idx[i]] if i in tensor_indices else o + for i, o in enumerate(args) + ] + # grab the tail since we also saved the dummy to avoid having to explicitly + # handle the case where there are no tensor inputs + return ret[1:] + + ctx.get_args = get_args + ctx.save_for_backward(*tensors) + + @staticmethod + def backward(ctx, *grad_outputs): + raise AssertionError("Did not expect to backward on this graph") + + +class _CheckpointFrame: + def __init__(self, recompute_fn, early_stop, unpack_error_cb, metadata_fn): + self.recompute_fn = recompute_fn + self.input_saver = None + self.weak_holders: List[ReferenceType] = [] + # We store this as a weakkeydictionary so that in the case of a partial + # backward, the entries in the dict are cleared alongside the Holder + # which will be removed when the SavedVariable is cleared. + self.recomputed: DefaultDict[ + int, weakref.WeakKeyDictionary[_Handle, torch.Tensor] + ] = defaultdict(weakref.WeakKeyDictionary) + # We need both recomp_counter and recomputed since they can diverge + # https://github.com/pytorch/pytorch/pull/90105#discussion_r1135889885 + self.recomp_counter: DefaultDict[int, int] = defaultdict(int) + self.is_recomputed: DefaultDict[int, bool] = defaultdict(bool) + + # See Rule 5 + self.early_stop = early_stop + + # Debugging + self.metadata_fn = metadata_fn + self.unpack_error_cb = unpack_error_cb + self.x_metadatas = [] + self.forward_completed = False + self.ignore_saved_mismatch = False + + def check_recomputed_tensors_match(self, gid): + if self.ignore_saved_mismatch: + # TODO: we can probably make this check stricter by checking that + # the metadata of the first tensors still match. + return + # NOTE [ Error handling for checkpoint ] + # + # At a high level, we need to check that the tensors saved + # during original forward matches tensors saved during recompute + # This means handling 3 cases: + # + # 1. During recompute, more tensors were saved. + # + # Usually this is hidden due to the StopRecomputationError + # but if early stop is not enabled, or we would have errored + # anyway because there aren't enough weak_holders. But we + # do want to have a nice error. See the _recomputation_hook + # for details. + if not len(self.weak_holders) == self.recomp_counter[gid]: + # 2. During recompute, fewer tensors were saved + # + # We know that everytime we save something do original forward + # we append to weak_holder, and every time we save a tensor + # during recompute we increment recompute_counter. + raise CheckpointError( + "torch.utils.checkpoint: A different number of tensors was saved " + "during the original forward and recomputation.\n" + f"Number of tensors saved during forward: {len(self.weak_holders)}\n" + f"Number of tensors saved during recomputation: {self.recomp_counter[gid]}" + ) + + # 3. During recompute, the same tensors were saved, but they + # have different metadata + nb_meta_different = [] + for idx, weak_holder in enumerate(self.weak_holders): + holder = weak_holder() + if holder is None: + continue + # We've seen all holders since we iterate over them in order + # For every holder that is still alive now, it must've been + # alive when we saw it during recompute, therefore, the + # gid must be set. + _internal_assert(gid in holder.handles) + # We know this is the first unpack, so it couldn't have been set + # to None yet. + _internal_assert(holder.handles[gid] is not None) + # We always set these together in the recomputation hook + _internal_assert(holder.handles[gid] in self.recomputed[gid]) + # see pack hook, x_metadata is 1:1 with weak_holders. + x_meta = self.x_metadatas[idx] + recomputed_x = self.recomputed[gid][holder.handles[gid]] + if x_meta != self.metadata_fn(recomputed_x): + nb_meta_different.append((idx, x_meta, self.metadata_fn(recomputed_x))) + + if len(nb_meta_different) > 0: + mismatched_tensors = "" + for idx, x_meta, recomputed_meta in nb_meta_different: + mismatched_tensors += ( + f"tensor at position {idx}:\n" + f"saved metadata: {x_meta}\n" + f"recomputed metadata: {recomputed_meta}\n" + ) + raise CheckpointError( + "torch.utils.checkpoint: Recomputed values for the following tensors " + "have different metadata than during the forward pass.\n" + f"{mismatched_tensors}" + ) + + +_checkpoint_error_template = """ \ +An error happened while unpacking tensors; dumping logs of latest computation +because you passed `debug=True` to `torch.utils.checkpoint.checkpoint()`. +Scroll all the way down for guidance on how to navigate these logs. + ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ +| 1. Stack traces of the operators that ran in the original forward | ++------------------------------------------------------------------------------+ + +{forward_traces} ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ +| 2. Stack traces of the operators that ran during recomputation | ++------------------------------------------------------------------------------+ + +{recompute_traces} ++~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+ +| 3. Log of operators in the original forward and recomputation | ++------------------------------------------------------------------------------+ +(Scroll up to correlate stack traces with each operation listed below. This + helps identify their source in the code.) + +IMPORTANT: Differences in "detach" calls between the original forward and the + recomputation are expected. They are introduced by the checkpointing + mechanism and can be ignored. + +Operations executed during the original forward: + +{forward_ops} + +Operations executed during recomputation: + +{recompute_ops} + ++------------------------------------------------------------------------------+ + ERROR: Detected non-determinism while running activation checkpointing + + You are seeing this error because you passed `debug=True` to checkpoint and + tensors to be saved during the original forward and differ between those saved + during recomputation. This can happen if different operators were ran in the + original forward and in the recomputation. + + To identify where the mismatch may be coming from, you can do the following: + + 1) Compare the operators ran during original forward and recomputation to + see where they differ. These operators are printed above in the order they + were executed. + + 2) Review the stack trace for each operator to locate its invocation source. + Each operator's stack trace is printed in their execution order. + + Note that the logs can be quite long. Here's how they are structured: + (Tip: you can Ctrl-f for these headers) + + 1. Stack traces of the operators that ran in the original forward + 2. Stack traces of the operators that ran during recomputation + 3. Log of operators in the original forward and recomputation + 4. Error message <--- You are here +-------------------------------------------------------------------------------- +""" + +class CheckpointError(RuntimeError): + pass + + +def _get_debug_context_and_cb() -> Tuple[Callable[[], Any], Callable[[CheckpointError], None]]: + # This function returns the context_fn and error_cb to be used by the + # checkpointing mechanism. error_cb is invoked when an error is detected + # during unpack. + + # record_context_cpp is not support on non-linux non-x86_64 platforms + cpp_tb = platform.machine() == 'x86_64' and platform.system() == 'Linux' + + class CaptureLogs: + def __init__(self): + self.logs = None + self.tbs = None + + def get_context_manager(self): + @contextlib.contextmanager + def logging_mode(): + with LoggingTensorMode(), \ + capture_logs(True, python_tb=True, script_tb=True, cpp_tb=cpp_tb) as logs_and_tb: + self.logs, self.tbs = logs_and_tb + yield logs_and_tb + return logging_mode() + + capture_logs_fwd = CaptureLogs() + capture_logs_recompute = CaptureLogs() + + def unpack_error_cb(e: CheckpointError): + def get_str_tb(label, capture_logs): + out = "" + total_len = len(capture_logs.logs) + for i, (log, tb) in enumerate(zip(capture_logs.logs, capture_logs.tbs)): + out += f"{log} ({i + 1} of {total_len} in {label})\n\n" + found_torch_dispatch = False + for line in tb: + # Start printing stack trace only after __torch_dispatch__ is found + is_torch_dispatch = line['name'] == '__torch_dispatch__' + if not found_torch_dispatch and not is_torch_dispatch: + continue + elif is_torch_dispatch: + found_torch_dispatch = True + continue + out += f"{line['filename']}:{line['line']}:{line['name']}\n" + out += "\n\n" + return out + assert capture_logs_fwd.logs is not None + assert capture_logs_recompute.logs is not None + raise CheckpointError( + _checkpoint_error_template.format( + forward_traces=get_str_tb("original", capture_logs_fwd), + recompute_traces=get_str_tb("recompute", capture_logs_recompute), + forward_ops="\n".join(capture_logs_fwd.logs), + recompute_ops="\n".join(capture_logs_recompute.logs) + ) + ) from e + + def context_fn(): + return capture_logs_fwd.get_context_manager(), capture_logs_recompute.get_context_manager() + + return context_fn, unpack_error_cb + +def _default_meta_extractor(x: torch.Tensor) -> Dict[str, Any]: + # These properties are fast to check, easy to understand + return { + "shape": x.shape, + "dtype": x.dtype, + "device": x.device + } + +_allowed_determinism_checks_to_fns: Dict[str, Callable[[torch.Tensor], Any]] = { + _DEFAULT_DETERMINISM_MODE: _default_meta_extractor, + "none": lambda _: None, +} + +# See Rule 5 +class _StopRecomputationError(Exception): + pass + + +class _recomputation_hook(torch.autograd.graph.saved_tensors_hooks): + def __init__(self, target_frame_ref: ReferenceType, gid: int): + def pack_hook(x): + target_frame = target_frame_ref() + assert target_frame is not None # appease mypy + recomp_idx = target_frame.recomp_counter[gid] + target_frame.recomp_counter[gid] += 1 + + if recomp_idx >= len(target_frame.weak_holders): + assert not target_frame.early_stop + if not target_frame.forward_completed: + # We run into this case when early stop is not enabled and do + # grad within checkpoint. + # We need to set this flag, so we don't error out later when + # we check if the number of tensors saved during forward and + # recomputation match. + target_frame.ignore_saved_mismatch = True + return x.detach() + raise CheckpointError( + "torch.utils.checkpoint: trying to save more tensors during " + "recomputation than during the original forward pass." + ) + + holder = target_frame.weak_holders[recomp_idx]() + + # This holder may have been cleared because someone may have called + # backward within forward. If so, we don't need to save. + if holder is not None: + _internal_assert(holder.handles.get(gid, None) is None) + holder.handles[gid] = _Handle() + target_frame.recomputed[gid][holder.handles[gid]] = x.detach() + + if target_frame.early_stop and target_frame.recomp_counter[gid] == len( + target_frame.weak_holders + ): + raise _StopRecomputationError() + # See Rule 6: [ retain_graph is True ] above + return x.detach() + + def unpack_hook(x): + # See Rule 6: [ retain_graph is True ] above for an example of when + # the graph created during recomputation could be backwarded. + return x + + super().__init__(pack_hook, unpack_hook) + + +class _checkpoint_hook(torch.autograd.graph.saved_tensors_hooks): + def __init__(self, frame): + def pack_hook(x): + # See Rule 4 above + holder = _Holder() + frame.weak_holders.append(weakref.ref(holder)) + # Save metadata to detect non-determinism + if frame.metadata_fn is not None: + with torch.no_grad(): + frame.x_metadatas.append(frame.metadata_fn(x)) + return holder + + def unpack_hook(holder): + gid = torch._C._current_graph_task_id() + if gid == -1: + # generate a temporary id if we trigger unpack outside of a backward call + gid = int(uuid.uuid4()) + + if not frame.is_recomputed[gid]: + ctx = frame.input_saver.grad_fn + args = ctx.get_args(ctx.saved_tensors) + + try: + with _recomputation_hook( + weakref.ref(frame), gid + ), torch.autograd.enable_grad(): + frame.recompute_fn(*args) + except _StopRecomputationError: + pass + frame.is_recomputed[gid] = True + frame.check_recomputed_tensors_match(gid) + + _internal_assert(gid in holder.handles) + + if holder.handles[gid] is None: + raise CheckpointError( + "torch.utils.checkpoint: Unpack is being triggered for a tensor that was already " + "unpacked once. If you are calling ctx.saved_tensors in backward, make sure to do " + "so only once. Otherwise please open an issue with details on your use case." + ) + _internal_assert(holder.handles[gid] in frame.recomputed[gid]) + ret = frame.recomputed[gid][holder.handles[gid]] + holder.handles[gid] = None + return ret + + if frame.unpack_error_cb is not None: + def unpack_hook_with_error_cb(holder): + try: + return unpack_hook(holder) + except CheckpointError as e: + frame.unpack_error_cb(e) + super().__init__(pack_hook, unpack_hook_with_error_cb) + else: + super().__init__(pack_hook, unpack_hook) + + +def _is_compiling(func, args, kwargs): + # Check if we are under AOTAutograd tracing + # There should probably be a better way to do this... + # TODO: unify _is_compiling across all compile stacks + for arg in args: + if isinstance(arg, torch.Tensor) and is_fun(arg): + return True + return False + + +def _detach(x): + if isinstance(x, torch.Tensor): + return x.detach() + return x + + +uid = count(1) + + +# NOTE: torch.utils.checkpoint internal logic will call these two functions unknown number of times +# (i.e. there could be _CachedTorchDispatchMode calls that doesn't map to a _CachingTorchDispatchMode call), +# so we ignore these ops and just always recompute them. +_ignored_ops = { + torch.ops.prim.device.default, + torch.ops.aten.detach.default, +} | set(torch._subclasses.functional_tensor.FunctionalTensor.metadata_fns) + + +class _CachingTorchDispatchMode(TorchDispatchMode): + r""" + A :class:`TorchDispatchMode` to implement selective activation checkpointing + that's compatible with torch.compile. Used together with _CachedTorchDispatchMode. + """ + def __init__(self, policy_fn, storage): + self.policy_fn = policy_fn + self.storage = storage + + def push_into_storage(self, out, func, args, kwargs): + out_detached = tree_map(_detach, out) + self.storage[func].append(out_detached) + + def _handle_compile_in_forward_ctx(self, should_not_recompute, func, args, kwargs): + if func in _ignored_ops: + return func(*args, **kwargs) + if should_not_recompute: + fx_traceback.current_meta["recompute"] = 0 + # NOTE: Here we just store and reuse output of all ops, since in torch.compile mode + # we decide and handle recomputation in the partitioner. + out = func(*args, **kwargs) + self.push_into_storage(out, func, args, kwargs) + return out + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + should_not_recompute = self.policy_fn("forward", func, *args, **kwargs) + if _is_compiling(func, args, kwargs): + return self._handle_compile_in_forward_ctx(should_not_recompute, func, args, kwargs) + else: + if should_not_recompute: + out = func(*args, **kwargs) + self.push_into_storage(out, func, args, kwargs) + else: + out = func(*args, **kwargs) + return out + + +class _CachedTorchDispatchMode(TorchDispatchMode): + r""" + A :class:`TorchDispatchMode` to implement selective activation checkpointing + that's compatible with torch.compile. Used together with _CachingTorchDispatchMode. + """ + def __init__(self, policy_fn, storage): + self.policy_fn = policy_fn + self.storage = storage + + def pop_from_storage(self, func, args, kwargs): + assert func in self.storage + out = self.storage[func].pop(0) + return out + + def _handle_compile_in_recompute_ctx(self, should_not_recompute, func, args, kwargs): + if func in _ignored_ops: + return func(*args, **kwargs) + out = self.pop_from_storage(func, args, kwargs) + return out + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + should_not_recompute = self.policy_fn("recompute", func, *args, **kwargs) + if _is_compiling(func, args, kwargs): + return self._handle_compile_in_recompute_ctx(should_not_recompute, func, args, kwargs) + else: + if should_not_recompute: + out = self.pop_from_storage(func, args, kwargs) + else: + out = func(*args, **kwargs) + return out + + +def _pt2_selective_checkpoint_context_fn_gen(policy_fn): + """ + A helper function that generates a pair of contexts to be later passed into + `torch.utils.checkpoint` API to implment selective checkpointing. + + .. warning:: + This is context_fn is intended for use with torch.compile only. + + Args: + policy_fn (Callable[[Callable, List[Any], Dict[str, Any]], bool]): Policy function + to decide whether a particular op should be recomputed in backward pass or not. + In eager mode: + If policy_fn(...) returns True, the op is guaranteed to NOT be recomputed. + If policy_fn(...) returns False, the op is guaranteed to be recomputed. + In torch.compile mode: + If policy_fn(...) returns True, the op is guaranteed to NOT be recomputed. + If policy_fn(...) returns False, the op may or may not be recomputed + (it's up to the partitioner to decide). + + Returns: + A pair of generated contexts. + + Example: + >>> # xdoctest: +REQUIRES(LINUX) + >>> + >>> def get_custom_policy(): + >>> no_recompute_list = [ + >>> torch.ops.aten.mm.default, + >>> ] + >>> def custom_policy(mode, func, *args, **kwargs): + >>> return func in no_recompute_list + >>> return custom_policy + >>> + >>> def selective_checkpointing_context_fn(): + >>> return _pt2_selective_checkpoint_context_fn_gen(get_custom_policy()) + >>> + >>> def gn(x, y): + >>> return torch.sigmoid(torch.matmul(torch.matmul(x, y), y)) * y + >>> + >>> def fn(x, y): + >>> return torch.utils.checkpoint.checkpoint( + >>> gn, x, y, + >>> use_reentrant=False, + >>> context_fn=selective_checkpointing_context_fn, + >>> ) + >>> + >>> x = torch.randn(4, 4, requires_grad=True) + >>> y = torch.randn(4, 4, requires_grad=True) + >>> + >>> compiled_fn = torch.compile(fn) + """ + storage: Dict[Any, List[Any]] = defaultdict(list) + return _CachingTorchDispatchMode(policy_fn, storage), _CachedTorchDispatchMode(policy_fn, storage) + + +# NB: this helper wraps fn before calling checkpoint_impl. kwargs and +# saving/restoring of global state is handled here. + +def _checkpoint_without_reentrant_generator( + fn, + preserve_rng_state=True, + context_fn: Callable[[], Tuple[ContextManager, ContextManager]] = noop_context_fn, + determinism_check: str = _DEFAULT_DETERMINISM_MODE, + debug: bool = False, + *args, + **kwargs +): + """Checkpointing without reentrant autograd. + + Args: + function: describes what to run in the forward pass of the model or + part of the model. It should also know how to handle the inputs + passed as the tuple. For example, in LSTM, if user passes + ``(activation, hidden)``, :attr:`function` should correctly use the + first input as ``activation`` and the second input as ``hidden`` + preserve_rng_state(bool, optional): Omit stashing and restoring + the RNG state during each checkpoint. + Default: ``True`` + context_fn(Callable, optional): A callable returning a tuple of two + context managers. The function and its recomputation will be run + under the first and second context managers respectively. + determinism_check(str, optional): A string specifying the determinism + check to perform. By default it is set to ``"default"`` which + compares the shapes, dtypes, and devices of the recomputed tensors + against those the saved tensors. To turn off this check, specify + ``"none"``. Currently these are the only two supported values. + Please open an issue if you would like to see more determinism + checks. + debug(bool, optional): If ``True``, error messages will also include + a trace of the operators ran during the original forward computation + as well as the recomputation. + *args: Arguments to pass in to the given ``function``. + **kwargs: Keyword arguments to pass into the given ``function``. + """ + unpack_error_cb = None + + if _checkpoint_debug_enabled if _checkpoint_debug_enabled is not None else debug: + if context_fn != noop_context_fn: + raise ValueError( + "debug=True is incompatible with non-default context_fn" + ) + context_fn, unpack_error_cb = _get_debug_context_and_cb() + + if determinism_check in _allowed_determinism_checks_to_fns: + metadata_fn = _allowed_determinism_checks_to_fns[determinism_check] + else: + raise ValueError( + f"determinism_check should be one of {list(_allowed_determinism_checks_to_fns.keys())}, " + f"but got {determinism_check}" + ) + + device = _infer_device_type(*args) + device_module = _get_device_module(device) + forward_context, recompute_context = context_fn() + if _is_compiling(fn, args, kwargs) and context_fn != noop_context_fn: + assert ( + isinstance(forward_context, TorchDispatchMode) and + isinstance(recompute_context, TorchDispatchMode) + ), \ + "In torch.compile mode, `context_fn` arg passed to `torch.utils.checkpoint` " + \ + "must generate a tuple of two `TorchDispatchMode`s." + # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu. + device_autocast_kwargs, cpu_autocast_kwargs = _get_autocast_kwargs(device=device) + + if preserve_rng_state: + fwd_cpu_state = torch.get_rng_state() + # Don't eagerly initialize the cuda context by accident. + # (If the user intends that the context is initialized later, within their + # run_function, we SHOULD actually stash the cuda state here. Unfortunately, + # we have no way to anticipate this will happen before we run the function. + # If they do so, we raise an error.) + had_device_in_fwd = False + if getattr(device_module, "_initialized", False): + had_device_in_fwd = True + fwd_devices, fwd_device_states = get_device_states(*args) + + def recompute_fn(*inputs): + kwargs, *args = inputs + # This will be called later during recomputation. This wrapping enables + # the necessary global state to be captured. + rng_devices = [] + if preserve_rng_state and had_device_in_fwd: + rng_devices = fwd_devices + with torch.random.fork_rng( + devices=rng_devices, enabled=preserve_rng_state, device_type=device + ): + if preserve_rng_state: + torch.set_rng_state(fwd_cpu_state) + if had_device_in_fwd: + set_device_states(fwd_devices, fwd_device_states) + + device_autocast_ctx = device_module.amp.autocast( + **device_autocast_kwargs + ) if _supports_autocast(device) else contextlib.nullcontext() + with device_autocast_ctx, torch.cpu.amp.autocast(**cpu_autocast_kwargs), \ + recompute_context: + fn(*args, **kwargs) + + new_frame = _CheckpointFrame( + recompute_fn, + _enable_checkpoint_early_stop, + unpack_error_cb, + metadata_fn + ) + dummy = torch.empty((0,), requires_grad=True) + new_frame.input_saver = _NoopSaveInputs.apply(dummy, kwargs, *args) + + # When ambient grad_mode is False + if new_frame.input_saver.grad_fn is None: + yield + return + + with _checkpoint_hook(new_frame), forward_context: + yield + new_frame.forward_completed = True + + if getattr(device_module, "_initialized", False) and \ + preserve_rng_state and not had_device_in_fwd: # type: ignore[possibly-undefined] + # Device was not initialized before running the forward, so we didn't + # stash the device state. + raise RuntimeError( + "PyTorch's device state was initialized in the forward pass " + "of a Checkpoint, which is not allowed. Please open an issue " + "if you need this feature." + ) + + return diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/collect_env.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/collect_env.py new file mode 100644 index 0000000000000000000000000000000000000000..6cbf598156b031c1deef8f3d7fed84961e46b4e9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/collect_env.py @@ -0,0 +1,624 @@ + +# Unlike the rest of the PyTorch this file must be python2 compliant. +# This script outputs relevant system environment info +# Run it with `python collect_env.py` or `python -m torch.utils.collect_env` +import datetime +import locale +import re +import subprocess +import sys +import os +from collections import namedtuple + + +try: + import torch + TORCH_AVAILABLE = True +except (ImportError, NameError, AttributeError, OSError): + TORCH_AVAILABLE = False + +# System Environment Information +SystemEnv = namedtuple('SystemEnv', [ + 'torch_version', + 'is_debug_build', + 'cuda_compiled_version', + 'gcc_version', + 'clang_version', + 'cmake_version', + 'os', + 'libc_version', + 'python_version', + 'python_platform', + 'is_cuda_available', + 'cuda_runtime_version', + 'cuda_module_loading', + 'nvidia_driver_version', + 'nvidia_gpu_models', + 'cudnn_version', + 'pip_version', # 'pip' or 'pip3' + 'pip_packages', + 'conda_packages', + 'hip_compiled_version', + 'hip_runtime_version', + 'miopen_runtime_version', + 'caching_allocator_config', + 'is_xnnpack_available', + 'cpu_info', +]) + +DEFAULT_CONDA_PATTERNS = { + "torch", + "numpy", + "cudatoolkit", + "soumith", + "mkl", + "magma", + "triton", + "optree", +} + +DEFAULT_PIP_PATTERNS = { + "torch", + "numpy", + "mypy", + "flake8", + "triton", + "optree", + "onnx", +} + + +def run(command): + """Return (return-code, stdout, stderr).""" + shell = True if type(command) is str else False + p = subprocess.Popen(command, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, shell=shell) + raw_output, raw_err = p.communicate() + rc = p.returncode + if get_platform() == 'win32': + enc = 'oem' + else: + enc = locale.getpreferredencoding() + output = raw_output.decode(enc) + err = raw_err.decode(enc) + return rc, output.strip(), err.strip() + + +def run_and_read_all(run_lambda, command): + """Run command using run_lambda; reads and returns entire output if rc is 0.""" + rc, out, _ = run_lambda(command) + if rc != 0: + return None + return out + + +def run_and_parse_first_match(run_lambda, command, regex): + """Run command using run_lambda, returns the first regex match if it exists.""" + rc, out, _ = run_lambda(command) + if rc != 0: + return None + match = re.search(regex, out) + if match is None: + return None + return match.group(1) + +def run_and_return_first_line(run_lambda, command): + """Run command using run_lambda and returns first line if output is not empty.""" + rc, out, _ = run_lambda(command) + if rc != 0: + return None + return out.split('\n')[0] + + +def get_conda_packages(run_lambda, patterns=None): + if patterns is None: + patterns = DEFAULT_CONDA_PATTERNS + conda = os.environ.get('CONDA_EXE', 'conda') + out = run_and_read_all(run_lambda, "{} list".format(conda)) + if out is None: + return out + + return "\n".join( + line + for line in out.splitlines() + if not line.startswith("#") + and any(name in line for name in patterns) + ) + +def get_gcc_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)') + +def get_clang_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'clang --version', r'clang version (.*)') + + +def get_cmake_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'cmake --version', r'cmake (.*)') + + +def get_nvidia_driver_version(run_lambda): + if get_platform() == 'darwin': + cmd = 'kextstat | grep -i cuda' + return run_and_parse_first_match(run_lambda, cmd, + r'com[.]nvidia[.]CUDA [(](.*?)[)]') + smi = get_nvidia_smi() + return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ') + + +def get_gpu_info(run_lambda): + if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and torch.version.hip is not None): + if TORCH_AVAILABLE and torch.cuda.is_available(): + if torch.version.hip is not None: + prop = torch.cuda.get_device_properties(0) + if hasattr(prop, "gcnArchName"): + gcnArch = " ({})".format(prop.gcnArchName) + else: + gcnArch = "NoGCNArchNameOnOldPyTorch" + else: + gcnArch = "" + return torch.cuda.get_device_name(None) + gcnArch + return None + smi = get_nvidia_smi() + uuid_regex = re.compile(r' \(UUID: .+?\)') + rc, out, _ = run_lambda(smi + ' -L') + if rc != 0: + return None + # Anonymize GPUs by removing their UUID + return re.sub(uuid_regex, '', out) + + +def get_running_cuda_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'nvcc --version', r'release .+ V(.*)') + + +def get_cudnn_version(run_lambda): + """Return a list of libcudnn.so; it's hard to tell which one is being used.""" + if get_platform() == 'win32': + system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') + cuda_path = os.environ.get('CUDA_PATH', "%CUDA_PATH%") + where_cmd = os.path.join(system_root, 'System32', 'where') + cudnn_cmd = '{} /R "{}\\bin" cudnn*.dll'.format(where_cmd, cuda_path) + elif get_platform() == 'darwin': + # CUDA libraries and drivers can be found in /usr/local/cuda/. See + # https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install + # https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac + # Use CUDNN_LIBRARY when cudnn library is installed elsewhere. + cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*' + else: + cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev' + rc, out, _ = run_lambda(cudnn_cmd) + # find will return 1 if there are permission errors or if not found + if len(out) == 0 or (rc != 1 and rc != 0): + l = os.environ.get('CUDNN_LIBRARY') + if l is not None and os.path.isfile(l): + return os.path.realpath(l) + return None + files_set = set() + for fn in out.split('\n'): + fn = os.path.realpath(fn) # eliminate symbolic links + if os.path.isfile(fn): + files_set.add(fn) + if not files_set: + return None + # Alphabetize the result because the order is non-deterministic otherwise + files = sorted(files_set) + if len(files) == 1: + return files[0] + result = '\n'.join(files) + return 'Probably one of the following:\n{}'.format(result) + + +def get_nvidia_smi(): + # Note: nvidia-smi is currently available only on Windows and Linux + smi = 'nvidia-smi' + if get_platform() == 'win32': + system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') + program_files_root = os.environ.get('PROGRAMFILES', 'C:\\Program Files') + legacy_path = os.path.join(program_files_root, 'NVIDIA Corporation', 'NVSMI', smi) + new_path = os.path.join(system_root, 'System32', smi) + smis = [new_path, legacy_path] + for candidate_smi in smis: + if os.path.exists(candidate_smi): + smi = '"{}"'.format(candidate_smi) + break + return smi + + +# example outputs of CPU infos +# * linux +# Architecture: x86_64 +# CPU op-mode(s): 32-bit, 64-bit +# Address sizes: 46 bits physical, 48 bits virtual +# Byte Order: Little Endian +# CPU(s): 128 +# On-line CPU(s) list: 0-127 +# Vendor ID: GenuineIntel +# Model name: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +# CPU family: 6 +# Model: 106 +# Thread(s) per core: 2 +# Core(s) per socket: 32 +# Socket(s): 2 +# Stepping: 6 +# BogoMIPS: 5799.78 +# Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr +# sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl +# xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq monitor ssse3 fma cx16 +# pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand +# hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced +# fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap +# avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1 +# xsaves wbnoinvd ida arat avx512vbmi pku ospke avx512_vbmi2 gfni vaes vpclmulqdq +# avx512_vnni avx512_bitalg tme avx512_vpopcntdq rdpid md_clear flush_l1d arch_capabilities +# Virtualization features: +# Hypervisor vendor: KVM +# Virtualization type: full +# Caches (sum of all): +# L1d: 3 MiB (64 instances) +# L1i: 2 MiB (64 instances) +# L2: 80 MiB (64 instances) +# L3: 108 MiB (2 instances) +# NUMA: +# NUMA node(s): 2 +# NUMA node0 CPU(s): 0-31,64-95 +# NUMA node1 CPU(s): 32-63,96-127 +# Vulnerabilities: +# Itlb multihit: Not affected +# L1tf: Not affected +# Mds: Not affected +# Meltdown: Not affected +# Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown +# Retbleed: Not affected +# Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp +# Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization +# Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence +# Srbds: Not affected +# Tsx async abort: Not affected +# * win32 +# Architecture=9 +# CurrentClockSpeed=2900 +# DeviceID=CPU0 +# Family=179 +# L2CacheSize=40960 +# L2CacheSpeed= +# Manufacturer=GenuineIntel +# MaxClockSpeed=2900 +# Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +# ProcessorType=3 +# Revision=27142 +# +# Architecture=9 +# CurrentClockSpeed=2900 +# DeviceID=CPU1 +# Family=179 +# L2CacheSize=40960 +# L2CacheSpeed= +# Manufacturer=GenuineIntel +# MaxClockSpeed=2900 +# Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz +# ProcessorType=3 +# Revision=27142 + +def get_cpu_info(run_lambda): + rc, out, err = 0, '', '' + if get_platform() == 'linux': + rc, out, err = run_lambda('lscpu') + elif get_platform() == 'win32': + rc, out, err = run_lambda('wmic cpu get Name,Manufacturer,Family,Architecture,ProcessorType,DeviceID, \ + CurrentClockSpeed,MaxClockSpeed,L2CacheSize,L2CacheSpeed,Revision /VALUE') + elif get_platform() == 'darwin': + rc, out, err = run_lambda("sysctl -n machdep.cpu.brand_string") + cpu_info = 'None' + if rc == 0: + cpu_info = out + else: + cpu_info = err + return cpu_info + + +def get_platform(): + if sys.platform.startswith('linux'): + return 'linux' + elif sys.platform.startswith('win32'): + return 'win32' + elif sys.platform.startswith('cygwin'): + return 'cygwin' + elif sys.platform.startswith('darwin'): + return 'darwin' + else: + return sys.platform + + +def get_mac_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)') + + +def get_windows_version(run_lambda): + system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') + wmic_cmd = os.path.join(system_root, 'System32', 'Wbem', 'wmic') + findstr_cmd = os.path.join(system_root, 'System32', 'findstr') + return run_and_read_all(run_lambda, '{} os get Caption | {} /v Caption'.format(wmic_cmd, findstr_cmd)) + + +def get_lsb_version(run_lambda): + return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)') + + +def check_release_file(run_lambda): + return run_and_parse_first_match(run_lambda, 'cat /etc/*-release', + r'PRETTY_NAME="(.*)"') + + +def get_os(run_lambda): + from platform import machine + platform = get_platform() + + if platform == 'win32' or platform == 'cygwin': + return get_windows_version(run_lambda) + + if platform == 'darwin': + version = get_mac_version(run_lambda) + if version is None: + return None + return 'macOS {} ({})'.format(version, machine()) + + if platform == 'linux': + # Ubuntu/Debian based + desc = get_lsb_version(run_lambda) + if desc is not None: + return '{} ({})'.format(desc, machine()) + + # Try reading /etc/*-release + desc = check_release_file(run_lambda) + if desc is not None: + return '{} ({})'.format(desc, machine()) + + return '{} ({})'.format(platform, machine()) + + # Unknown platform + return platform + + +def get_python_platform(): + import platform + return platform.platform() + + +def get_libc_version(): + import platform + if get_platform() != 'linux': + return 'N/A' + return '-'.join(platform.libc_ver()) + + +def get_pip_packages(run_lambda, patterns=None): + """Return `pip list` output. Note: will also find conda-installed pytorch and numpy packages.""" + if patterns is None: + patterns = DEFAULT_PIP_PATTERNS + + # People generally have `pip` as `pip` or `pip3` + # But here it is invoked as `python -mpip` + def run_with_pip(pip): + out = run_and_read_all(run_lambda, pip + ["list", "--format=freeze"]) + return "\n".join( + line + for line in out.splitlines() + if any(name in line for name in patterns) + ) + + pip_version = 'pip3' if sys.version[0] == '3' else 'pip' + out = run_with_pip([sys.executable, '-mpip']) + + return pip_version, out + + +def get_cachingallocator_config(): + ca_config = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', '') + return ca_config + + +def get_cuda_module_loading_config(): + if TORCH_AVAILABLE and torch.cuda.is_available(): + torch.cuda.init() + config = os.environ.get('CUDA_MODULE_LOADING', '') + return config + else: + return "N/A" + + +def is_xnnpack_available(): + if TORCH_AVAILABLE: + import torch.backends.xnnpack + return str(torch.backends.xnnpack.enabled) # type: ignore[attr-defined] + else: + return "N/A" + +def get_env_info(): + run_lambda = run + pip_version, pip_list_output = get_pip_packages(run_lambda) + + if TORCH_AVAILABLE: + version_str = torch.__version__ + debug_mode_str = str(torch.version.debug) + cuda_available_str = str(torch.cuda.is_available()) + cuda_version_str = torch.version.cuda + if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version + hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A' + else: # HIP version + def get_version_or_na(cfg, prefix): + _lst = [s.rsplit(None, 1)[-1] for s in cfg if prefix in s] + return _lst[0] if _lst else 'N/A' + + cfg = torch._C._show_config().split('\n') + hip_runtime_version = get_version_or_na(cfg, 'HIP Runtime') + miopen_runtime_version = get_version_or_na(cfg, 'MIOpen') + cuda_version_str = 'N/A' + hip_compiled_version = torch.version.hip + else: + version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A' + hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A' + + sys_version = sys.version.replace("\n", " ") + + conda_packages = get_conda_packages(run_lambda) + + return SystemEnv( + torch_version=version_str, + is_debug_build=debug_mode_str, + python_version='{} ({}-bit runtime)'.format(sys_version, sys.maxsize.bit_length() + 1), + python_platform=get_python_platform(), + is_cuda_available=cuda_available_str, + cuda_compiled_version=cuda_version_str, + cuda_runtime_version=get_running_cuda_version(run_lambda), + cuda_module_loading=get_cuda_module_loading_config(), + nvidia_gpu_models=get_gpu_info(run_lambda), + nvidia_driver_version=get_nvidia_driver_version(run_lambda), + cudnn_version=get_cudnn_version(run_lambda), + hip_compiled_version=hip_compiled_version, + hip_runtime_version=hip_runtime_version, + miopen_runtime_version=miopen_runtime_version, + pip_version=pip_version, + pip_packages=pip_list_output, + conda_packages=conda_packages, + os=get_os(run_lambda), + libc_version=get_libc_version(), + gcc_version=get_gcc_version(run_lambda), + clang_version=get_clang_version(run_lambda), + cmake_version=get_cmake_version(run_lambda), + caching_allocator_config=get_cachingallocator_config(), + is_xnnpack_available=is_xnnpack_available(), + cpu_info=get_cpu_info(run_lambda), + ) + +env_info_fmt = """ +PyTorch version: {torch_version} +Is debug build: {is_debug_build} +CUDA used to build PyTorch: {cuda_compiled_version} +ROCM used to build PyTorch: {hip_compiled_version} + +OS: {os} +GCC version: {gcc_version} +Clang version: {clang_version} +CMake version: {cmake_version} +Libc version: {libc_version} + +Python version: {python_version} +Python platform: {python_platform} +Is CUDA available: {is_cuda_available} +CUDA runtime version: {cuda_runtime_version} +CUDA_MODULE_LOADING set to: {cuda_module_loading} +GPU models and configuration: {nvidia_gpu_models} +Nvidia driver version: {nvidia_driver_version} +cuDNN version: {cudnn_version} +HIP runtime version: {hip_runtime_version} +MIOpen runtime version: {miopen_runtime_version} +Is XNNPACK available: {is_xnnpack_available} + +CPU: +{cpu_info} + +Versions of relevant libraries: +{pip_packages} +{conda_packages} +""".strip() + + +def pretty_str(envinfo): + def replace_nones(dct, replacement='Could not collect'): + for key in dct.keys(): + if dct[key] is not None: + continue + dct[key] = replacement + return dct + + def replace_bools(dct, true='Yes', false='No'): + for key in dct.keys(): + if dct[key] is True: + dct[key] = true + elif dct[key] is False: + dct[key] = false + return dct + + def prepend(text, tag='[prepend]'): + lines = text.split('\n') + updated_lines = [tag + line for line in lines] + return '\n'.join(updated_lines) + + def replace_if_empty(text, replacement='No relevant packages'): + if text is not None and len(text) == 0: + return replacement + return text + + def maybe_start_on_next_line(string): + # If `string` is multiline, prepend a \n to it. + if string is not None and len(string.split('\n')) > 1: + return '\n{}\n'.format(string) + return string + + mutable_dict = envinfo._asdict() + + # If nvidia_gpu_models is multiline, start on the next line + mutable_dict['nvidia_gpu_models'] = \ + maybe_start_on_next_line(envinfo.nvidia_gpu_models) + + # If the machine doesn't have CUDA, report some fields as 'No CUDA' + dynamic_cuda_fields = [ + 'cuda_runtime_version', + 'nvidia_gpu_models', + 'nvidia_driver_version', + ] + all_cuda_fields = dynamic_cuda_fields + ['cudnn_version'] + all_dynamic_cuda_fields_missing = all( + mutable_dict[field] is None for field in dynamic_cuda_fields) + if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing: + for field in all_cuda_fields: + mutable_dict[field] = 'No CUDA' + if envinfo.cuda_compiled_version is None: + mutable_dict['cuda_compiled_version'] = 'None' + + # Replace True with Yes, False with No + mutable_dict = replace_bools(mutable_dict) + + # Replace all None objects with 'Could not collect' + mutable_dict = replace_nones(mutable_dict) + + # If either of these are '', replace with 'No relevant packages' + mutable_dict['pip_packages'] = replace_if_empty(mutable_dict['pip_packages']) + mutable_dict['conda_packages'] = replace_if_empty(mutable_dict['conda_packages']) + + # Tag conda and pip packages with a prefix + # If they were previously None, they'll show up as ie '[conda] Could not collect' + if mutable_dict['pip_packages']: + mutable_dict['pip_packages'] = prepend(mutable_dict['pip_packages'], + '[{}] '.format(envinfo.pip_version)) + if mutable_dict['conda_packages']: + mutable_dict['conda_packages'] = prepend(mutable_dict['conda_packages'], + '[conda] ') + mutable_dict['cpu_info'] = envinfo.cpu_info + return env_info_fmt.format(**mutable_dict) + + +def get_pretty_env_info(): + return pretty_str(get_env_info()) + + +def main(): + print("Collecting environment information...") + output = get_pretty_env_info() + print(output) + + if TORCH_AVAILABLE and hasattr(torch, 'utils') and hasattr(torch.utils, '_crash_handler'): + minidump_dir = torch.utils._crash_handler.DEFAULT_MINIDUMP_DIR + if sys.platform == "linux" and os.path.exists(minidump_dir): + dumps = [os.path.join(minidump_dir, dump) for dump in os.listdir(minidump_dir)] + latest = max(dumps, key=os.path.getctime) + ctime = os.path.getctime(latest) + creation_time = datetime.datetime.fromtimestamp(ctime).strftime('%Y-%m-%d %H:%M:%S') + msg = "\n*** Detected a minidump at {} created on {}, ".format(latest, creation_time) + \ + "if this is related to your bug please include it when you file a report ***" + print(msg, file=sys.stderr) + + + +if __name__ == '__main__': + main() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/cpp_backtrace.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/cpp_backtrace.py new file mode 100644 index 0000000000000000000000000000000000000000..40dbbb5b913af0ca725ef5c6cab9fee1a3ffec70 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/cpp_backtrace.py @@ -0,0 +1,11 @@ +from torch._C import _get_cpp_backtrace + +def get_cpp_backtrace(frames_to_skip=0, maximum_number_of_frames=64) -> str: + r""" + Return a string containing the C++ stack trace of the current thread. + + Args: + frames_to_skip (int): the number of frames to skip from the top of the stack + maximum_number_of_frames (int): the maximum number of frames to return + """ + return _get_cpp_backtrace(frames_to_skip, maximum_number_of_frames) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/cpp_extension.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/cpp_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..d3d264d61729586aa421a18388a684eb118a0df2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/cpp_extension.py @@ -0,0 +1,2428 @@ +import copy +import glob +import importlib +import importlib.abc +import os +import re +import shlex +import shutil +import setuptools +import subprocess +import sys +import sysconfig +import warnings +import collections +from pathlib import Path +import errno + +import torch +import torch._appdirs +from .file_baton import FileBaton +from ._cpp_extension_versioner import ExtensionVersioner +from .hipify import hipify_python +from .hipify.hipify_python import GeneratedFileCleaner +from typing import Dict, List, Optional, Union, Tuple +from torch.torch_version import TorchVersion, Version + +from setuptools.command.build_ext import build_ext + +IS_WINDOWS = sys.platform == 'win32' +IS_MACOS = sys.platform.startswith('darwin') +IS_LINUX = sys.platform.startswith('linux') +LIB_EXT = '.pyd' if IS_WINDOWS else '.so' +EXEC_EXT = '.exe' if IS_WINDOWS else '' +CLIB_PREFIX = '' if IS_WINDOWS else 'lib' +CLIB_EXT = '.dll' if IS_WINDOWS else '.so' +SHARED_FLAG = '/DLL' if IS_WINDOWS else '-shared' + +_HERE = os.path.abspath(__file__) +_TORCH_PATH = os.path.dirname(os.path.dirname(_HERE)) +TORCH_LIB_PATH = os.path.join(_TORCH_PATH, 'lib') + + +SUBPROCESS_DECODE_ARGS = ('oem',) if IS_WINDOWS else () +MINIMUM_GCC_VERSION = (5, 0, 0) +MINIMUM_MSVC_VERSION = (19, 0, 24215) + +VersionRange = Tuple[Tuple[int, ...], Tuple[int, ...]] +VersionMap = Dict[str, VersionRange] +# The following values were taken from the following GitHub gist that +# summarizes the minimum valid major versions of g++/clang++ for each supported +# CUDA version: https://gist.github.com/ax3l/9489132 +# Or from include/crt/host_config.h in the CUDA SDK +# The second value is the exclusive(!) upper bound, i.e. min <= version < max +CUDA_GCC_VERSIONS: VersionMap = { + '11.0': (MINIMUM_GCC_VERSION, (10, 0)), + '11.1': (MINIMUM_GCC_VERSION, (11, 0)), + '11.2': (MINIMUM_GCC_VERSION, (11, 0)), + '11.3': (MINIMUM_GCC_VERSION, (11, 0)), + '11.4': ((6, 0, 0), (12, 0)), + '11.5': ((6, 0, 0), (12, 0)), + '11.6': ((6, 0, 0), (12, 0)), + '11.7': ((6, 0, 0), (12, 0)), +} + +MINIMUM_CLANG_VERSION = (3, 3, 0) +CUDA_CLANG_VERSIONS: VersionMap = { + '11.1': (MINIMUM_CLANG_VERSION, (11, 0)), + '11.2': (MINIMUM_CLANG_VERSION, (12, 0)), + '11.3': (MINIMUM_CLANG_VERSION, (12, 0)), + '11.4': (MINIMUM_CLANG_VERSION, (13, 0)), + '11.5': (MINIMUM_CLANG_VERSION, (13, 0)), + '11.6': (MINIMUM_CLANG_VERSION, (14, 0)), + '11.7': (MINIMUM_CLANG_VERSION, (14, 0)), +} + +__all__ = ["get_default_build_root", "check_compiler_ok_for_platform", "get_compiler_abi_compatibility_and_version", "BuildExtension", + "CppExtension", "CUDAExtension", "include_paths", "library_paths", "load", "load_inline", "is_ninja_available", + "verify_ninja_availability", "remove_extension_h_precompiler_headers", "get_cxx_compiler", "check_compiler_is_gcc"] +# Taken directly from python stdlib < 3.9 +# See https://github.com/pytorch/pytorch/issues/48617 +def _nt_quote_args(args: Optional[List[str]]) -> List[str]: + """Quote command-line arguments for DOS/Windows conventions. + + Just wraps every argument which contains blanks in double quotes, and + returns a new argument list. + """ + # Cover None-type + if not args: + return [] + return [f'"{arg}"' if ' ' in arg else arg for arg in args] + +def _find_cuda_home() -> Optional[str]: + """Find the CUDA install path.""" + # Guess #1 + cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH') + if cuda_home is None: + # Guess #2 + try: + which = 'where' if IS_WINDOWS else 'which' + with open(os.devnull, 'w') as devnull: + nvcc = subprocess.check_output([which, 'nvcc'], + stderr=devnull).decode(*SUBPROCESS_DECODE_ARGS).rstrip('\r\n') + cuda_home = os.path.dirname(os.path.dirname(nvcc)) + except Exception: + # Guess #3 + if IS_WINDOWS: + cuda_homes = glob.glob( + 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*') + if len(cuda_homes) == 0: + cuda_home = '' + else: + cuda_home = cuda_homes[0] + else: + cuda_home = '/usr/local/cuda' + if not os.path.exists(cuda_home): + cuda_home = None + if cuda_home and not torch.cuda.is_available(): + print(f"No CUDA runtime is found, using CUDA_HOME='{cuda_home}'", + file=sys.stderr) + return cuda_home + +def _find_rocm_home() -> Optional[str]: + """Find the ROCm install path.""" + # Guess #1 + rocm_home = os.environ.get('ROCM_HOME') or os.environ.get('ROCM_PATH') + if rocm_home is None: + # Guess #2 + hipcc_path = shutil.which('hipcc') + if hipcc_path is not None: + rocm_home = os.path.dirname(os.path.dirname( + os.path.realpath(hipcc_path))) + # can be either /hip/bin/hipcc or /bin/hipcc + if os.path.basename(rocm_home) == 'hip': + rocm_home = os.path.dirname(rocm_home) + else: + # Guess #3 + fallback_path = '/opt/rocm' + if os.path.exists(fallback_path): + rocm_home = fallback_path + if rocm_home and torch.version.hip is None: + print(f"No ROCm runtime is found, using ROCM_HOME='{rocm_home}'", + file=sys.stderr) + return rocm_home + + +def _join_rocm_home(*paths) -> str: + """ + Join paths with ROCM_HOME, or raises an error if it ROCM_HOME is not set. + + This is basically a lazy way of raising an error for missing $ROCM_HOME + only once we need to get any ROCm-specific path. + """ + if ROCM_HOME is None: + raise OSError('ROCM_HOME environment variable is not set. ' + 'Please set it to your ROCm install root.') + elif IS_WINDOWS: + raise OSError('Building PyTorch extensions using ' + 'ROCm and Windows is not supported.') + return os.path.join(ROCM_HOME, *paths) + + +ABI_INCOMPATIBILITY_WARNING = ''' + + !! WARNING !! + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +Your compiler ({}) may be ABI-incompatible with PyTorch! +Please use a compiler that is ABI-compatible with GCC 5.0 and above. +See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html. + +See https://gist.github.com/goldsborough/d466f43e8ffc948ff92de7486c5216d6 +for instructions on how to install GCC 5 or higher. +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + !! WARNING !! +''' +WRONG_COMPILER_WARNING = ''' + + !! WARNING !! + +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +Your compiler ({user_compiler}) is not compatible with the compiler Pytorch was +built with for this platform, which is {pytorch_compiler} on {platform}. Please +use {pytorch_compiler} to to compile your extension. Alternatively, you may +compile PyTorch from source using {user_compiler}, and then you can also use +{user_compiler} to compile your extension. + +See https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md for help +with compiling PyTorch from source. +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + !! WARNING !! +''' +CUDA_MISMATCH_MESSAGE = ''' +The detected CUDA version ({0}) mismatches the version that was used to compile +PyTorch ({1}). Please make sure to use the same CUDA versions. +''' +CUDA_MISMATCH_WARN = "The detected CUDA version ({0}) has a minor version mismatch with the version that was used to compile PyTorch ({1}). Most likely this shouldn't be a problem." +CUDA_NOT_FOUND_MESSAGE = ''' +CUDA was not found on the system, please set the CUDA_HOME or the CUDA_PATH +environment variable or add NVCC to your system PATH. The extension compilation will fail. +''' +ROCM_HOME = _find_rocm_home() +HIP_HOME = _join_rocm_home('hip') if ROCM_HOME else None +IS_HIP_EXTENSION = True if ((ROCM_HOME is not None) and (torch.version.hip is not None)) else False +ROCM_VERSION = None +if torch.version.hip is not None: + ROCM_VERSION = tuple(int(v) for v in torch.version.hip.split('.')[:2]) + +CUDA_HOME = _find_cuda_home() if torch.cuda._is_compiled() else None +CUDNN_HOME = os.environ.get('CUDNN_HOME') or os.environ.get('CUDNN_PATH') +# PyTorch releases have the version pattern major.minor.patch, whereas when +# PyTorch is built from source, we append the git commit hash, which gives +# it the below pattern. +BUILT_FROM_SOURCE_VERSION_PATTERN = re.compile(r'\d+\.\d+\.\d+\w+\+\w+') + +COMMON_MSVC_FLAGS = ['/MD', '/wd4819', '/wd4251', '/wd4244', '/wd4267', '/wd4275', '/wd4018', '/wd4190', '/wd4624', '/wd4067', '/wd4068', '/EHsc'] + +MSVC_IGNORE_CUDAFE_WARNINGS = [ + 'base_class_has_different_dll_interface', + 'field_without_dll_interface', + 'dll_interface_conflict_none_assumed', + 'dll_interface_conflict_dllexport_assumed' +] + +COMMON_NVCC_FLAGS = [ + '-D__CUDA_NO_HALF_OPERATORS__', + '-D__CUDA_NO_HALF_CONVERSIONS__', + '-D__CUDA_NO_BFLOAT16_CONVERSIONS__', + '-D__CUDA_NO_HALF2_OPERATORS__', + '--expt-relaxed-constexpr' +] + +COMMON_HIP_FLAGS = [ + '-fPIC', + '-D__HIP_PLATFORM_AMD__=1', + '-DUSE_ROCM=1', +] + +if ROCM_VERSION is not None and ROCM_VERSION >= (6, 0): + COMMON_HIP_FLAGS.append('-DHIPBLAS_V2') + +COMMON_HIPCC_FLAGS = [ + '-DCUDA_HAS_FP16=1', + '-D__HIP_NO_HALF_OPERATORS__=1', + '-D__HIP_NO_HALF_CONVERSIONS__=1', +] + +JIT_EXTENSION_VERSIONER = ExtensionVersioner() + +PLAT_TO_VCVARS = { + 'win32' : 'x86', + 'win-amd64' : 'x86_amd64', +} + +def get_cxx_compiler(): + if IS_WINDOWS: + compiler = os.environ.get('CXX', 'cl') + else: + compiler = os.environ.get('CXX', 'c++') + return compiler + +def _is_binary_build() -> bool: + return not BUILT_FROM_SOURCE_VERSION_PATTERN.match(torch.version.__version__) + + +def _accepted_compilers_for_platform() -> List[str]: + # gnu-c++ and gnu-cc are the conda gcc compilers + return ['clang++', 'clang'] if IS_MACOS else ['g++', 'gcc', 'gnu-c++', 'gnu-cc', 'clang++', 'clang'] + +def _maybe_write(filename, new_content): + r''' + Equivalent to writing the content into the file but will not touch the file + if it already had the right content (to avoid triggering recompile). + ''' + if os.path.exists(filename): + with open(filename) as f: + content = f.read() + + if content == new_content: + # The file already contains the right thing! + return + + with open(filename, 'w') as source_file: + source_file.write(new_content) + +def get_default_build_root() -> str: + """ + Return the path to the root folder under which extensions will built. + + For each extension module built, there will be one folder underneath the + folder returned by this function. For example, if ``p`` is the path + returned by this function and ``ext`` the name of an extension, the build + folder for the extension will be ``p/ext``. + + This directory is **user-specific** so that multiple users on the same + machine won't meet permission issues. + """ + return os.path.realpath(torch._appdirs.user_cache_dir(appname='torch_extensions')) + + +def check_compiler_ok_for_platform(compiler: str) -> bool: + """ + Verify that the compiler is the expected one for the current platform. + + Args: + compiler (str): The compiler executable to check. + + Returns: + True if the compiler is gcc/g++ on Linux or clang/clang++ on macOS, + and always True for Windows. + """ + if IS_WINDOWS: + return True + which = subprocess.check_output(['which', compiler], stderr=subprocess.STDOUT) + # Use os.path.realpath to resolve any symlinks, in particular from 'c++' to e.g. 'g++'. + compiler_path = os.path.realpath(which.decode(*SUBPROCESS_DECODE_ARGS).strip()) + # Check the compiler name + if any(name in compiler_path for name in _accepted_compilers_for_platform()): + return True + # If compiler wrapper is used try to infer the actual compiler by invoking it with -v flag + env = os.environ.copy() + env['LC_ALL'] = 'C' # Don't localize output + version_string = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS) + if IS_LINUX: + # Check for 'gcc' or 'g++' for sccache wrapper + pattern = re.compile("^COLLECT_GCC=(.*)$", re.MULTILINE) + results = re.findall(pattern, version_string) + if len(results) != 1: + # Clang is also a supported compiler on Linux + # Though on Ubuntu it's sometimes called "Ubuntu clang version" + return 'clang version' in version_string + compiler_path = os.path.realpath(results[0].strip()) + # On RHEL/CentOS c++ is a gcc compiler wrapper + if os.path.basename(compiler_path) == 'c++' and 'gcc version' in version_string: + return True + return any(name in compiler_path for name in _accepted_compilers_for_platform()) + if IS_MACOS: + # Check for 'clang' or 'clang++' + return version_string.startswith("Apple clang") + return False + + +def get_compiler_abi_compatibility_and_version(compiler) -> Tuple[bool, TorchVersion]: + """ + Determine if the given compiler is ABI-compatible with PyTorch alongside its version. + + Args: + compiler (str): The compiler executable name to check (e.g. ``g++``). + Must be executable in a shell process. + + Returns: + A tuple that contains a boolean that defines if the compiler is (likely) ABI-incompatible with PyTorch, + followed by a `TorchVersion` string that contains the compiler version separated by dots. + """ + if not _is_binary_build(): + return (True, TorchVersion('0.0.0')) + if os.environ.get('TORCH_DONT_CHECK_COMPILER_ABI') in ['ON', '1', 'YES', 'TRUE', 'Y']: + return (True, TorchVersion('0.0.0')) + + # First check if the compiler is one of the expected ones for the particular platform. + if not check_compiler_ok_for_platform(compiler): + warnings.warn(WRONG_COMPILER_WARNING.format( + user_compiler=compiler, + pytorch_compiler=_accepted_compilers_for_platform()[0], + platform=sys.platform)) + return (False, TorchVersion('0.0.0')) + + if IS_MACOS: + # There is no particular minimum version we need for clang, so we're good here. + return (True, TorchVersion('0.0.0')) + try: + if IS_LINUX: + minimum_required_version = MINIMUM_GCC_VERSION + versionstr = subprocess.check_output([compiler, '-dumpfullversion', '-dumpversion']) + version = versionstr.decode(*SUBPROCESS_DECODE_ARGS).strip().split('.') + else: + minimum_required_version = MINIMUM_MSVC_VERSION + compiler_info = subprocess.check_output(compiler, stderr=subprocess.STDOUT) + match = re.search(r'(\d+)\.(\d+)\.(\d+)', compiler_info.decode(*SUBPROCESS_DECODE_ARGS).strip()) + version = ['0', '0', '0'] if match is None else list(match.groups()) + except Exception: + _, error, _ = sys.exc_info() + warnings.warn(f'Error checking compiler version for {compiler}: {error}') + return (False, TorchVersion('0.0.0')) + + if tuple(map(int, version)) >= minimum_required_version: + return (True, TorchVersion('.'.join(version))) + + compiler = f'{compiler} {".".join(version)}' + warnings.warn(ABI_INCOMPATIBILITY_WARNING.format(compiler)) + + return (False, TorchVersion('.'.join(version))) + + +def _check_cuda_version(compiler_name: str, compiler_version: TorchVersion) -> None: + if not CUDA_HOME: + raise RuntimeError(CUDA_NOT_FOUND_MESSAGE) + + nvcc = os.path.join(CUDA_HOME, 'bin', 'nvcc') + cuda_version_str = subprocess.check_output([nvcc, '--version']).strip().decode(*SUBPROCESS_DECODE_ARGS) + cuda_version = re.search(r'release (\d+[.]\d+)', cuda_version_str) + if cuda_version is None: + return + + cuda_str_version = cuda_version.group(1) + cuda_ver = Version(cuda_str_version) + if torch.version.cuda is None: + return + + torch_cuda_version = Version(torch.version.cuda) + if cuda_ver != torch_cuda_version: + # major/minor attributes are only available in setuptools>=49.4.0 + if getattr(cuda_ver, "major", None) is None: + raise ValueError("setuptools>=49.4.0 is required") + if cuda_ver.major != torch_cuda_version.major: + raise RuntimeError(CUDA_MISMATCH_MESSAGE.format(cuda_str_version, torch.version.cuda)) + warnings.warn(CUDA_MISMATCH_WARN.format(cuda_str_version, torch.version.cuda)) + + if not (sys.platform.startswith('linux') and + os.environ.get('TORCH_DONT_CHECK_COMPILER_ABI') not in ['ON', '1', 'YES', 'TRUE', 'Y'] and + _is_binary_build()): + return + + cuda_compiler_bounds: VersionMap = CUDA_CLANG_VERSIONS if compiler_name.startswith('clang') else CUDA_GCC_VERSIONS + + if cuda_str_version not in cuda_compiler_bounds: + warnings.warn(f'There are no {compiler_name} version bounds defined for CUDA version {cuda_str_version}') + else: + min_compiler_version, max_excl_compiler_version = cuda_compiler_bounds[cuda_str_version] + # Special case for 11.4.0, which has lower compiler bounds than 11.4.1 + if "V11.4.48" in cuda_version_str and cuda_compiler_bounds == CUDA_GCC_VERSIONS: + max_excl_compiler_version = (11, 0) + min_compiler_version_str = '.'.join(map(str, min_compiler_version)) + max_excl_compiler_version_str = '.'.join(map(str, max_excl_compiler_version)) + + version_bound_str = f'>={min_compiler_version_str}, <{max_excl_compiler_version_str}' + + if compiler_version < TorchVersion(min_compiler_version_str): + raise RuntimeError( + f'The current installed version of {compiler_name} ({compiler_version}) is less ' + f'than the minimum required version by CUDA {cuda_str_version} ({min_compiler_version_str}). ' + f'Please make sure to use an adequate version of {compiler_name} ({version_bound_str}).' + ) + if compiler_version >= TorchVersion(max_excl_compiler_version_str): + raise RuntimeError( + f'The current installed version of {compiler_name} ({compiler_version}) is greater ' + f'than the maximum required version by CUDA {cuda_str_version}. ' + f'Please make sure to use an adequate version of {compiler_name} ({version_bound_str}).' + ) + + +class BuildExtension(build_ext): + """ + A custom :mod:`setuptools` build extension . + + This :class:`setuptools.build_ext` subclass takes care of passing the + minimum required compiler flags (e.g. ``-std=c++17``) as well as mixed + C++/CUDA compilation (and support for CUDA files in general). + + When using :class:`BuildExtension`, it is allowed to supply a dictionary + for ``extra_compile_args`` (rather than the usual list) that maps from + languages (``cxx`` or ``nvcc``) to a list of additional compiler flags to + supply to the compiler. This makes it possible to supply different flags to + the C++ and CUDA compiler during mixed compilation. + + ``use_ninja`` (bool): If ``use_ninja`` is ``True`` (default), then we + attempt to build using the Ninja backend. Ninja greatly speeds up + compilation compared to the standard ``setuptools.build_ext``. + Fallbacks to the standard distutils backend if Ninja is not available. + + .. note:: + By default, the Ninja backend uses #CPUS + 2 workers to build the + extension. This may use up too many resources on some systems. One + can control the number of workers by setting the `MAX_JOBS` environment + variable to a non-negative number. + """ + + @classmethod + def with_options(cls, **options): + """Return a subclass with alternative constructor that extends any original keyword arguments to the original constructor with the given options.""" + class cls_with_options(cls): # type: ignore[misc, valid-type] + def __init__(self, *args, **kwargs): + kwargs.update(options) + super().__init__(*args, **kwargs) + + return cls_with_options + + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.no_python_abi_suffix = kwargs.get("no_python_abi_suffix", False) + + self.use_ninja = kwargs.get('use_ninja', True) + if self.use_ninja: + # Test if we can use ninja. Fallback otherwise. + msg = ('Attempted to use ninja as the BuildExtension backend but ' + '{}. Falling back to using the slow distutils backend.') + if not is_ninja_available(): + warnings.warn(msg.format('we could not find ninja.')) + self.use_ninja = False + + def finalize_options(self) -> None: + super().finalize_options() + if self.use_ninja: + self.force = True + + def build_extensions(self) -> None: + compiler_name, compiler_version = self._check_abi() + + cuda_ext = False + extension_iter = iter(self.extensions) + extension = next(extension_iter, None) + while not cuda_ext and extension: + for source in extension.sources: + _, ext = os.path.splitext(source) + if ext == '.cu': + cuda_ext = True + break + extension = next(extension_iter, None) + + if cuda_ext and not IS_HIP_EXTENSION: + _check_cuda_version(compiler_name, compiler_version) + + for extension in self.extensions: + # Ensure at least an empty list of flags for 'cxx' and 'nvcc' when + # extra_compile_args is a dict. Otherwise, default torch flags do + # not get passed. Necessary when only one of 'cxx' and 'nvcc' is + # passed to extra_compile_args in CUDAExtension, i.e. + # CUDAExtension(..., extra_compile_args={'cxx': [...]}) + # or + # CUDAExtension(..., extra_compile_args={'nvcc': [...]}) + if isinstance(extension.extra_compile_args, dict): + for ext in ['cxx', 'nvcc']: + if ext not in extension.extra_compile_args: + extension.extra_compile_args[ext] = [] + + self._add_compile_flag(extension, '-DTORCH_API_INCLUDE_EXTENSION_H') + # See note [Pybind11 ABI constants] + for name in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]: + val = getattr(torch._C, f"_PYBIND11_{name}") + if val is not None and not IS_WINDOWS: + self._add_compile_flag(extension, f'-DPYBIND11_{name}="{val}"') + self._define_torch_extension_name(extension) + self._add_gnu_cpp_abi_flag(extension) + + if 'nvcc_dlink' in extension.extra_compile_args: + assert self.use_ninja, f"With dlink=True, ninja is required to build cuda extension {extension.name}." + + # Register .cu, .cuh, .hip, and .mm as valid source extensions. + self.compiler.src_extensions += ['.cu', '.cuh', '.hip'] + if torch.backends.mps.is_built(): + self.compiler.src_extensions += ['.mm'] + # Save the original _compile method for later. + if self.compiler.compiler_type == 'msvc': + self.compiler._cpp_extensions += ['.cu', '.cuh'] + original_compile = self.compiler.compile + original_spawn = self.compiler.spawn + else: + original_compile = self.compiler._compile + + def append_std17_if_no_std_present(cflags) -> None: + # NVCC does not allow multiple -std to be passed, so we avoid + # overriding the option if the user explicitly passed it. + cpp_format_prefix = '/{}:' if self.compiler.compiler_type == 'msvc' else '-{}=' + cpp_flag_prefix = cpp_format_prefix.format('std') + cpp_flag = cpp_flag_prefix + 'c++17' + if not any(flag.startswith(cpp_flag_prefix) for flag in cflags): + cflags.append(cpp_flag) + + def unix_cuda_flags(cflags): + cflags = (COMMON_NVCC_FLAGS + + ['--compiler-options', "'-fPIC'"] + + cflags + _get_cuda_arch_flags(cflags)) + + # NVCC does not allow multiple -ccbin/--compiler-bindir to be passed, so we avoid + # overriding the option if the user explicitly passed it. + _ccbin = os.getenv("CC") + if ( + _ccbin is not None + and not any(flag.startswith(('-ccbin', '--compiler-bindir')) for flag in cflags) + ): + cflags.extend(['-ccbin', _ccbin]) + + return cflags + + def convert_to_absolute_paths_inplace(paths): + # Helper function. See Note [Absolute include_dirs] + if paths is not None: + for i in range(len(paths)): + if not os.path.isabs(paths[i]): + paths[i] = os.path.abspath(paths[i]) + + def unix_wrap_single_compile(obj, src, ext, cc_args, extra_postargs, pp_opts) -> None: + # Copy before we make any modifications. + cflags = copy.deepcopy(extra_postargs) + try: + original_compiler = self.compiler.compiler_so + if _is_cuda_file(src): + nvcc = [_join_rocm_home('bin', 'hipcc') if IS_HIP_EXTENSION else _join_cuda_home('bin', 'nvcc')] + self.compiler.set_executable('compiler_so', nvcc) + if isinstance(cflags, dict): + cflags = cflags['nvcc'] + if IS_HIP_EXTENSION: + cflags = COMMON_HIPCC_FLAGS + cflags + _get_rocm_arch_flags(cflags) + else: + cflags = unix_cuda_flags(cflags) + elif isinstance(cflags, dict): + cflags = cflags['cxx'] + if IS_HIP_EXTENSION: + cflags = COMMON_HIP_FLAGS + cflags + append_std17_if_no_std_present(cflags) + + original_compile(obj, src, ext, cc_args, cflags, pp_opts) + finally: + # Put the original compiler back in place. + self.compiler.set_executable('compiler_so', original_compiler) + + def unix_wrap_ninja_compile(sources, + output_dir=None, + macros=None, + include_dirs=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + depends=None): + r"""Compiles sources by outputting a ninja file and running it.""" + # NB: I copied some lines from self.compiler (which is an instance + # of distutils.UnixCCompiler). See the following link. + # https://github.com/python/cpython/blob/f03a8f8d5001963ad5b5b28dbd95497e9cc15596/Lib/distutils/ccompiler.py#L564-L567 + # This can be fragile, but a lot of other repos also do this + # (see https://github.com/search?q=_setup_compile&type=Code) + # so it is probably OK; we'll also get CI signal if/when + # we update our python version (which is when distutils can be + # upgraded) + + # Use absolute path for output_dir so that the object file paths + # (`objects`) get generated with absolute paths. + output_dir = os.path.abspath(output_dir) + + # See Note [Absolute include_dirs] + convert_to_absolute_paths_inplace(self.compiler.include_dirs) + + _, objects, extra_postargs, pp_opts, _ = \ + self.compiler._setup_compile(output_dir, macros, + include_dirs, sources, + depends, extra_postargs) + common_cflags = self.compiler._get_cc_args(pp_opts, debug, extra_preargs) + extra_cc_cflags = self.compiler.compiler_so[1:] + with_cuda = any(map(_is_cuda_file, sources)) + + # extra_postargs can be either: + # - a dict mapping cxx/nvcc to extra flags + # - a list of extra flags. + if isinstance(extra_postargs, dict): + post_cflags = extra_postargs['cxx'] + else: + post_cflags = list(extra_postargs) + if IS_HIP_EXTENSION: + post_cflags = COMMON_HIP_FLAGS + post_cflags + append_std17_if_no_std_present(post_cflags) + + cuda_post_cflags = None + cuda_cflags = None + if with_cuda: + cuda_cflags = common_cflags + if isinstance(extra_postargs, dict): + cuda_post_cflags = extra_postargs['nvcc'] + else: + cuda_post_cflags = list(extra_postargs) + if IS_HIP_EXTENSION: + cuda_post_cflags = cuda_post_cflags + _get_rocm_arch_flags(cuda_post_cflags) + cuda_post_cflags = COMMON_HIP_FLAGS + COMMON_HIPCC_FLAGS + cuda_post_cflags + else: + cuda_post_cflags = unix_cuda_flags(cuda_post_cflags) + append_std17_if_no_std_present(cuda_post_cflags) + cuda_cflags = [shlex.quote(f) for f in cuda_cflags] + cuda_post_cflags = [shlex.quote(f) for f in cuda_post_cflags] + + if isinstance(extra_postargs, dict) and 'nvcc_dlink' in extra_postargs: + cuda_dlink_post_cflags = unix_cuda_flags(extra_postargs['nvcc_dlink']) + else: + cuda_dlink_post_cflags = None + _write_ninja_file_and_compile_objects( + sources=sources, + objects=objects, + cflags=[shlex.quote(f) for f in extra_cc_cflags + common_cflags], + post_cflags=[shlex.quote(f) for f in post_cflags], + cuda_cflags=cuda_cflags, + cuda_post_cflags=cuda_post_cflags, + cuda_dlink_post_cflags=cuda_dlink_post_cflags, + build_directory=output_dir, + verbose=True, + with_cuda=with_cuda) + + # Return *all* object filenames, not just the ones we just built. + return objects + + def win_cuda_flags(cflags): + return (COMMON_NVCC_FLAGS + + cflags + _get_cuda_arch_flags(cflags)) + + def win_wrap_single_compile(sources, + output_dir=None, + macros=None, + include_dirs=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + depends=None): + + self.cflags = copy.deepcopy(extra_postargs) + extra_postargs = None + + def spawn(cmd): + # Using regex to match src, obj and include files + src_regex = re.compile('/T(p|c)(.*)') + src_list = [ + m.group(2) for m in (src_regex.match(elem) for elem in cmd) + if m + ] + + obj_regex = re.compile('/Fo(.*)') + obj_list = [ + m.group(1) for m in (obj_regex.match(elem) for elem in cmd) + if m + ] + + include_regex = re.compile(r'((\-|\/)I.*)') + include_list = [ + m.group(1) + for m in (include_regex.match(elem) for elem in cmd) if m + ] + + if len(src_list) >= 1 and len(obj_list) >= 1: + src = src_list[0] + obj = obj_list[0] + if _is_cuda_file(src): + nvcc = _join_cuda_home('bin', 'nvcc') + if isinstance(self.cflags, dict): + cflags = self.cflags['nvcc'] + elif isinstance(self.cflags, list): + cflags = self.cflags + else: + cflags = [] + + cflags = win_cuda_flags(cflags) + ['-std=c++17', '--use-local-env'] + for flag in COMMON_MSVC_FLAGS: + cflags = ['-Xcompiler', flag] + cflags + for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS: + cflags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cflags + cmd = [nvcc, '-c', src, '-o', obj] + include_list + cflags + elif isinstance(self.cflags, dict): + cflags = COMMON_MSVC_FLAGS + self.cflags['cxx'] + append_std17_if_no_std_present(cflags) + cmd += cflags + elif isinstance(self.cflags, list): + cflags = COMMON_MSVC_FLAGS + self.cflags + append_std17_if_no_std_present(cflags) + cmd += cflags + + return original_spawn(cmd) + + try: + self.compiler.spawn = spawn + return original_compile(sources, output_dir, macros, + include_dirs, debug, extra_preargs, + extra_postargs, depends) + finally: + self.compiler.spawn = original_spawn + + def win_wrap_ninja_compile(sources, + output_dir=None, + macros=None, + include_dirs=None, + debug=0, + extra_preargs=None, + extra_postargs=None, + depends=None): + + if not self.compiler.initialized: + self.compiler.initialize() + output_dir = os.path.abspath(output_dir) + + # Note [Absolute include_dirs] + # Convert relative path in self.compiler.include_dirs to absolute path if any, + # For ninja build, the build location is not local, the build happens + # in a in script created build folder, relative path lost their correctness. + # To be consistent with jit extension, we allow user to enter relative include_dirs + # in setuptools.setup, and we convert the relative path to absolute path here + convert_to_absolute_paths_inplace(self.compiler.include_dirs) + + _, objects, extra_postargs, pp_opts, _ = \ + self.compiler._setup_compile(output_dir, macros, + include_dirs, sources, + depends, extra_postargs) + common_cflags = extra_preargs or [] + cflags = [] + if debug: + cflags.extend(self.compiler.compile_options_debug) + else: + cflags.extend(self.compiler.compile_options) + common_cflags.extend(COMMON_MSVC_FLAGS) + cflags = cflags + common_cflags + pp_opts + with_cuda = any(map(_is_cuda_file, sources)) + + # extra_postargs can be either: + # - a dict mapping cxx/nvcc to extra flags + # - a list of extra flags. + if isinstance(extra_postargs, dict): + post_cflags = extra_postargs['cxx'] + else: + post_cflags = list(extra_postargs) + append_std17_if_no_std_present(post_cflags) + + cuda_post_cflags = None + cuda_cflags = None + if with_cuda: + cuda_cflags = ['-std=c++17', '--use-local-env'] + for common_cflag in common_cflags: + cuda_cflags.append('-Xcompiler') + cuda_cflags.append(common_cflag) + for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS: + cuda_cflags.append('-Xcudafe') + cuda_cflags.append('--diag_suppress=' + ignore_warning) + cuda_cflags.extend(pp_opts) + if isinstance(extra_postargs, dict): + cuda_post_cflags = extra_postargs['nvcc'] + else: + cuda_post_cflags = list(extra_postargs) + cuda_post_cflags = win_cuda_flags(cuda_post_cflags) + + cflags = _nt_quote_args(cflags) + post_cflags = _nt_quote_args(post_cflags) + if with_cuda: + cuda_cflags = _nt_quote_args(cuda_cflags) + cuda_post_cflags = _nt_quote_args(cuda_post_cflags) + if isinstance(extra_postargs, dict) and 'nvcc_dlink' in extra_postargs: + cuda_dlink_post_cflags = win_cuda_flags(extra_postargs['nvcc_dlink']) + else: + cuda_dlink_post_cflags = None + + _write_ninja_file_and_compile_objects( + sources=sources, + objects=objects, + cflags=cflags, + post_cflags=post_cflags, + cuda_cflags=cuda_cflags, + cuda_post_cflags=cuda_post_cflags, + cuda_dlink_post_cflags=cuda_dlink_post_cflags, + build_directory=output_dir, + verbose=True, + with_cuda=with_cuda) + + # Return *all* object filenames, not just the ones we just built. + return objects + + # Monkey-patch the _compile or compile method. + # https://github.com/python/cpython/blob/dc0284ee8f7a270b6005467f26d8e5773d76e959/Lib/distutils/ccompiler.py#L511 + if self.compiler.compiler_type == 'msvc': + if self.use_ninja: + self.compiler.compile = win_wrap_ninja_compile + else: + self.compiler.compile = win_wrap_single_compile + else: + if self.use_ninja: + self.compiler.compile = unix_wrap_ninja_compile + else: + self.compiler._compile = unix_wrap_single_compile + + build_ext.build_extensions(self) + + def get_ext_filename(self, ext_name): + # Get the original shared library name. For Python 3, this name will be + # suffixed with ".so", where will be something like + # cpython-37m-x86_64-linux-gnu. + ext_filename = super().get_ext_filename(ext_name) + # If `no_python_abi_suffix` is `True`, we omit the Python 3 ABI + # component. This makes building shared libraries with setuptools that + # aren't Python modules nicer. + if self.no_python_abi_suffix: + # The parts will be e.g. ["my_extension", "cpython-37m-x86_64-linux-gnu", "so"]. + ext_filename_parts = ext_filename.split('.') + # Omit the second to last element. + without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:] + ext_filename = '.'.join(without_abi) + return ext_filename + + def _check_abi(self) -> Tuple[str, TorchVersion]: + # On some platforms, like Windows, compiler_cxx is not available. + if hasattr(self.compiler, 'compiler_cxx'): + compiler = self.compiler.compiler_cxx[0] + else: + compiler = get_cxx_compiler() + _, version = get_compiler_abi_compatibility_and_version(compiler) + # Warn user if VC env is activated but `DISTUILS_USE_SDK` is not set. + if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' in os.environ and 'DISTUTILS_USE_SDK' not in os.environ: + msg = ('It seems that the VC environment is activated but DISTUTILS_USE_SDK is not set.' + 'This may lead to multiple activations of the VC env.' + 'Please set `DISTUTILS_USE_SDK=1` and try again.') + raise UserWarning(msg) + return compiler, version + + def _add_compile_flag(self, extension, flag): + extension.extra_compile_args = copy.deepcopy(extension.extra_compile_args) + if isinstance(extension.extra_compile_args, dict): + for args in extension.extra_compile_args.values(): + args.append(flag) + else: + extension.extra_compile_args.append(flag) + + def _define_torch_extension_name(self, extension): + # pybind11 doesn't support dots in the names + # so in order to support extensions in the packages + # like torch._C, we take the last part of the string + # as the library name + names = extension.name.split('.') + name = names[-1] + define = f'-DTORCH_EXTENSION_NAME={name}' + self._add_compile_flag(extension, define) + + def _add_gnu_cpp_abi_flag(self, extension): + # use the same CXX ABI as what PyTorch was compiled with + self._add_compile_flag(extension, '-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))) + + +def CppExtension(name, sources, *args, **kwargs): + """ + Create a :class:`setuptools.Extension` for C++. + + Convenience method that creates a :class:`setuptools.Extension` with the + bare minimum (but often sufficient) arguments to build a C++ extension. + + All arguments are forwarded to the :class:`setuptools.Extension` + constructor. Full list arguments can be found at + https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#extension-api-reference + + Example: + >>> # xdoctest: +SKIP + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT) + >>> from setuptools import setup + >>> from torch.utils.cpp_extension import BuildExtension, CppExtension + >>> setup( + ... name='extension', + ... ext_modules=[ + ... CppExtension( + ... name='extension', + ... sources=['extension.cpp'], + ... extra_compile_args=['-g'], + ... extra_link_flags=['-Wl,--no-as-needed', '-lm']) + ... ], + ... cmdclass={ + ... 'build_ext': BuildExtension + ... }) + """ + include_dirs = kwargs.get('include_dirs', []) + include_dirs += include_paths() + kwargs['include_dirs'] = include_dirs + + library_dirs = kwargs.get('library_dirs', []) + library_dirs += library_paths() + kwargs['library_dirs'] = library_dirs + + libraries = kwargs.get('libraries', []) + libraries.append('c10') + libraries.append('torch') + libraries.append('torch_cpu') + libraries.append('torch_python') + kwargs['libraries'] = libraries + + kwargs['language'] = 'c++' + return setuptools.Extension(name, sources, *args, **kwargs) + + +def CUDAExtension(name, sources, *args, **kwargs): + """ + Create a :class:`setuptools.Extension` for CUDA/C++. + + Convenience method that creates a :class:`setuptools.Extension` with the + bare minimum (but often sufficient) arguments to build a CUDA/C++ + extension. This includes the CUDA include path, library path and runtime + library. + + All arguments are forwarded to the :class:`setuptools.Extension` + constructor. Full list arguments can be found at + https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#extension-api-reference + + Example: + >>> # xdoctest: +SKIP + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT) + >>> from setuptools import setup + >>> from torch.utils.cpp_extension import BuildExtension, CUDAExtension + >>> setup( + ... name='cuda_extension', + ... ext_modules=[ + ... CUDAExtension( + ... name='cuda_extension', + ... sources=['extension.cpp', 'extension_kernel.cu'], + ... extra_compile_args={'cxx': ['-g'], + ... 'nvcc': ['-O2']}, + ... extra_link_flags=['-Wl,--no-as-needed', '-lcuda']) + ... ], + ... cmdclass={ + ... 'build_ext': BuildExtension + ... }) + + Compute capabilities: + + By default the extension will be compiled to run on all archs of the cards visible during the + building process of the extension, plus PTX. If down the road a new card is installed the + extension may need to be recompiled. If a visible card has a compute capability (CC) that's + newer than the newest version for which your nvcc can build fully-compiled binaries, Pytorch + will make nvcc fall back to building kernels with the newest version of PTX your nvcc does + support (see below for details on PTX). + + You can override the default behavior using `TORCH_CUDA_ARCH_LIST` to explicitly specify which + CCs you want the extension to support: + + ``TORCH_CUDA_ARCH_LIST="6.1 8.6" python build_my_extension.py`` + ``TORCH_CUDA_ARCH_LIST="5.2 6.0 6.1 7.0 7.5 8.0 8.6+PTX" python build_my_extension.py`` + + The +PTX option causes extension kernel binaries to include PTX instructions for the specified + CC. PTX is an intermediate representation that allows kernels to runtime-compile for any CC >= + the specified CC (for example, 8.6+PTX generates PTX that can runtime-compile for any GPU with + CC >= 8.6). This improves your binary's forward compatibility. However, relying on older PTX to + provide forward compat by runtime-compiling for newer CCs can modestly reduce performance on + those newer CCs. If you know exact CC(s) of the GPUs you want to target, you're always better + off specifying them individually. For example, if you want your extension to run on 8.0 and 8.6, + "8.0+PTX" would work functionally because it includes PTX that can runtime-compile for 8.6, but + "8.0 8.6" would be better. + + Note that while it's possible to include all supported archs, the more archs get included the + slower the building process will be, as it will build a separate kernel image for each arch. + + Note that CUDA-11.5 nvcc will hit internal compiler error while parsing torch/extension.h on Windows. + To workaround the issue, move python binding logic to pure C++ file. + + Example use: + #include + at::Tensor SigmoidAlphaBlendForwardCuda(....) + + Instead of: + #include + torch::Tensor SigmoidAlphaBlendForwardCuda(...) + + Currently open issue for nvcc bug: https://github.com/pytorch/pytorch/issues/69460 + Complete workaround code example: https://github.com/facebookresearch/pytorch3d/commit/cb170ac024a949f1f9614ffe6af1c38d972f7d48 + + Relocatable device code linking: + + If you want to reference device symbols across compilation units (across object files), + the object files need to be built with `relocatable device code` (-rdc=true or -dc). + An exception to this rule is "dynamic parallelism" (nested kernel launches) which is not used a lot anymore. + `Relocatable device code` is less optimized so it needs to be used only on object files that need it. + Using `-dlto` (Device Link Time Optimization) at the device code compilation step and `dlink` step + help reduce the protentional perf degradation of `-rdc`. + Note that it needs to be used at both steps to be useful. + + If you have `rdc` objects you need to have an extra `-dlink` (device linking) step before the CPU symbol linking step. + There is also a case where `-dlink` is used without `-rdc`: + when an extension is linked against a static lib containing rdc-compiled objects + like the [NVSHMEM library](https://developer.nvidia.com/nvshmem). + + Note: Ninja is required to build a CUDA Extension with RDC linking. + + Example: + >>> # xdoctest: +SKIP + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT) + >>> CUDAExtension( + ... name='cuda_extension', + ... sources=['extension.cpp', 'extension_kernel.cu'], + ... dlink=True, + ... dlink_libraries=["dlink_lib"], + ... extra_compile_args={'cxx': ['-g'], + ... 'nvcc': ['-O2', '-rdc=true']}) + """ + library_dirs = kwargs.get('library_dirs', []) + library_dirs += library_paths(cuda=True) + kwargs['library_dirs'] = library_dirs + + libraries = kwargs.get('libraries', []) + libraries.append('c10') + libraries.append('torch') + libraries.append('torch_cpu') + libraries.append('torch_python') + if IS_HIP_EXTENSION: + assert ROCM_VERSION is not None + libraries.append('amdhip64' if ROCM_VERSION >= (3, 5) else 'hip_hcc') + libraries.append('c10_hip') + libraries.append('torch_hip') + else: + libraries.append('cudart') + libraries.append('c10_cuda') + libraries.append('torch_cuda') + kwargs['libraries'] = libraries + + include_dirs = kwargs.get('include_dirs', []) + + if IS_HIP_EXTENSION: + build_dir = os.getcwd() + hipify_result = hipify_python.hipify( + project_directory=build_dir, + output_directory=build_dir, + header_include_dirs=include_dirs, + includes=[os.path.join(build_dir, '*')], # limit scope to build_dir only + extra_files=[os.path.abspath(s) for s in sources], + show_detailed=True, + is_pytorch_extension=True, + hipify_extra_files_only=True, # don't hipify everything in includes path + ) + + hipified_sources = set() + for source in sources: + s_abs = os.path.abspath(source) + hipified_s_abs = (hipify_result[s_abs].hipified_path if (s_abs in hipify_result and + hipify_result[s_abs].hipified_path is not None) else s_abs) + # setup() arguments must *always* be /-separated paths relative to the setup.py directory, + # *never* absolute paths + hipified_sources.add(os.path.relpath(hipified_s_abs, build_dir)) + + sources = list(hipified_sources) + + include_dirs += include_paths(cuda=True) + kwargs['include_dirs'] = include_dirs + + kwargs['language'] = 'c++' + + dlink_libraries = kwargs.get('dlink_libraries', []) + dlink = kwargs.get('dlink', False) or dlink_libraries + if dlink: + extra_compile_args = kwargs.get('extra_compile_args', {}) + + extra_compile_args_dlink = extra_compile_args.get('nvcc_dlink', []) + extra_compile_args_dlink += ['-dlink'] + extra_compile_args_dlink += [f'-L{x}' for x in library_dirs] + extra_compile_args_dlink += [f'-l{x}' for x in dlink_libraries] + + if (torch.version.cuda is not None) and TorchVersion(torch.version.cuda) >= '11.2': + extra_compile_args_dlink += ['-dlto'] # Device Link Time Optimization started from cuda 11.2 + + extra_compile_args['nvcc_dlink'] = extra_compile_args_dlink + + kwargs['extra_compile_args'] = extra_compile_args + + return setuptools.Extension(name, sources, *args, **kwargs) + + +def include_paths(cuda: bool = False) -> List[str]: + """ + Get the include paths required to build a C++ or CUDA extension. + + Args: + cuda: If `True`, includes CUDA-specific include paths. + + Returns: + A list of include path strings. + """ + lib_include = os.path.join(_TORCH_PATH, 'include') + paths = [ + lib_include, + # Remove this once torch/torch.h is officially no longer supported for C++ extensions. + os.path.join(lib_include, 'torch', 'csrc', 'api', 'include'), + # Some internal (old) Torch headers don't properly prefix their includes, + # so we need to pass -Itorch/lib/include/TH as well. + os.path.join(lib_include, 'TH'), + os.path.join(lib_include, 'THC') + ] + if cuda and IS_HIP_EXTENSION: + paths.append(os.path.join(lib_include, 'THH')) + paths.append(_join_rocm_home('include')) + elif cuda: + cuda_home_include = _join_cuda_home('include') + # if we have the Debian/Ubuntu packages for cuda, we get /usr as cuda home. + # but gcc doesn't like having /usr/include passed explicitly + if cuda_home_include != '/usr/include': + paths.append(cuda_home_include) + if CUDNN_HOME is not None: + paths.append(os.path.join(CUDNN_HOME, 'include')) + return paths + + +def library_paths(cuda: bool = False) -> List[str]: + """ + Get the library paths required to build a C++ or CUDA extension. + + Args: + cuda: If `True`, includes CUDA-specific library paths. + + Returns: + A list of library path strings. + """ + # We need to link against libtorch.so + paths = [TORCH_LIB_PATH] + + if cuda and IS_HIP_EXTENSION: + lib_dir = 'lib' + paths.append(_join_rocm_home(lib_dir)) + if HIP_HOME is not None: + paths.append(os.path.join(HIP_HOME, 'lib')) + elif cuda: + if IS_WINDOWS: + lib_dir = os.path.join('lib', 'x64') + else: + lib_dir = 'lib64' + if (not os.path.exists(_join_cuda_home(lib_dir)) and + os.path.exists(_join_cuda_home('lib'))): + # 64-bit CUDA may be installed in 'lib' (see e.g. gh-16955) + # Note that it's also possible both don't exist (see + # _find_cuda_home) - in that case we stay with 'lib64'. + lib_dir = 'lib' + + paths.append(_join_cuda_home(lib_dir)) + if CUDNN_HOME is not None: + paths.append(os.path.join(CUDNN_HOME, lib_dir)) + return paths + + +def load(name, + sources: Union[str, List[str]], + extra_cflags=None, + extra_cuda_cflags=None, + extra_ldflags=None, + extra_include_paths=None, + build_directory=None, + verbose=False, + with_cuda: Optional[bool] = None, + is_python_module=True, + is_standalone=False, + keep_intermediates=True): + """ + Load a PyTorch C++ extension just-in-time (JIT). + + To load an extension, a Ninja build file is emitted, which is used to + compile the given sources into a dynamic library. This library is + subsequently loaded into the current Python process as a module and + returned from this function, ready for use. + + By default, the directory to which the build file is emitted and the + resulting library compiled to is ``/torch_extensions/``, where + ```` is the temporary folder on the current platform and ```` + the name of the extension. This location can be overridden in two ways. + First, if the ``TORCH_EXTENSIONS_DIR`` environment variable is set, it + replaces ``/torch_extensions`` and all extensions will be compiled + into subfolders of this directory. Second, if the ``build_directory`` + argument to this function is supplied, it overrides the entire path, i.e. + the library will be compiled into that folder directly. + + To compile the sources, the default system compiler (``c++``) is used, + which can be overridden by setting the ``CXX`` environment variable. To pass + additional arguments to the compilation process, ``extra_cflags`` or + ``extra_ldflags`` can be provided. For example, to compile your extension + with optimizations, pass ``extra_cflags=['-O3']``. You can also use + ``extra_cflags`` to pass further include directories. + + CUDA support with mixed compilation is provided. Simply pass CUDA source + files (``.cu`` or ``.cuh``) along with other sources. Such files will be + detected and compiled with nvcc rather than the C++ compiler. This includes + passing the CUDA lib64 directory as a library directory, and linking + ``cudart``. You can pass additional flags to nvcc via + ``extra_cuda_cflags``, just like with ``extra_cflags`` for C++. Various + heuristics for finding the CUDA install directory are used, which usually + work fine. If not, setting the ``CUDA_HOME`` environment variable is the + safest option. + + Args: + name: The name of the extension to build. This MUST be the same as the + name of the pybind11 module! + sources: A list of relative or absolute paths to C++ source files. + extra_cflags: optional list of compiler flags to forward to the build. + extra_cuda_cflags: optional list of compiler flags to forward to nvcc + when building CUDA sources. + extra_ldflags: optional list of linker flags to forward to the build. + extra_include_paths: optional list of include directories to forward + to the build. + build_directory: optional path to use as build workspace. + verbose: If ``True``, turns on verbose logging of load steps. + with_cuda: Determines whether CUDA headers and libraries are added to + the build. If set to ``None`` (default), this value is + automatically determined based on the existence of ``.cu`` or + ``.cuh`` in ``sources``. Set it to `True`` to force CUDA headers + and libraries to be included. + is_python_module: If ``True`` (default), imports the produced shared + library as a Python module. If ``False``, behavior depends on + ``is_standalone``. + is_standalone: If ``False`` (default) loads the constructed extension + into the process as a plain dynamic library. If ``True``, build a + standalone executable. + + Returns: + If ``is_python_module`` is ``True``: + Returns the loaded PyTorch extension as a Python module. + + If ``is_python_module`` is ``False`` and ``is_standalone`` is ``False``: + Returns nothing. (The shared library is loaded into the process as + a side effect.) + + If ``is_standalone`` is ``True``. + Return the path to the executable. (On Windows, TORCH_LIB_PATH is + added to the PATH environment variable as a side effect.) + + Example: + >>> # xdoctest: +SKIP + >>> from torch.utils.cpp_extension import load + >>> module = load( + ... name='extension', + ... sources=['extension.cpp', 'extension_kernel.cu'], + ... extra_cflags=['-O2'], + ... verbose=True) + """ + return _jit_compile( + name, + [sources] if isinstance(sources, str) else sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory or _get_build_directory(name, verbose), + verbose, + with_cuda, + is_python_module, + is_standalone, + keep_intermediates=keep_intermediates) + +def _get_pybind11_abi_build_flags(): + # Note [Pybind11 ABI constants] + # + # Pybind11 before 2.4 used to build an ABI strings using the following pattern: + # f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_BUILD_TYPE}__" + # Since 2.4 compier type, stdlib and build abi parameters are also encoded like this: + # f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_COMPILER_TYPE}{PYBIND11_STDLIB}{PYBIND11_BUILD_ABI}{PYBIND11_BUILD_TYPE}__" + # + # This was done in order to further narrow down the chances of compiler ABI incompatibility + # that can cause a hard to debug segfaults. + # For PyTorch extensions we want to relax those restrictions and pass compiler, stdlib and abi properties + # captured during PyTorch native library compilation in torch/csrc/Module.cpp + + abi_cflags = [] + for pname in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]: + pval = getattr(torch._C, f"_PYBIND11_{pname}") + if pval is not None and not IS_WINDOWS: + abi_cflags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"') + return abi_cflags + +def _get_glibcxx_abi_build_flags(): + glibcxx_abi_cflags = ['-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))] + return glibcxx_abi_cflags + +def check_compiler_is_gcc(compiler): + if not IS_LINUX: + return False + + env = os.environ.copy() + env['LC_ALL'] = 'C' # Don't localize output + try: + version_string = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS) + except Exception as e: + try: + version_string = subprocess.check_output([compiler, '--version'], stderr=subprocess.STDOUT, env=env).decode(*SUBPROCESS_DECODE_ARGS) + except Exception as e: + return False + # Check for 'gcc' or 'g++' for sccache wrapper + pattern = re.compile("^COLLECT_GCC=(.*)$", re.MULTILINE) + results = re.findall(pattern, version_string) + if len(results) != 1: + return False + compiler_path = os.path.realpath(results[0].strip()) + # On RHEL/CentOS c++ is a gcc compiler wrapper + if os.path.basename(compiler_path) == 'c++' and 'gcc version' in version_string: + return True + return False + +def _check_and_build_extension_h_precompiler_headers( + extra_cflags, + extra_include_paths, + is_standalone=False): + r''' + Precompiled Headers(PCH) can pre-build the same headers and reduce build time for pytorch load_inline modules. + GCC offical manual: https://gcc.gnu.org/onlinedocs/gcc-4.0.4/gcc/Precompiled-Headers.html + PCH only works when built pch file(header.h.gch) and build target have the same build parameters. So, We need + add a signature file to record PCH file parameters. If the build parameters(signature) changed, it should rebuild + PCH file. + + Note: + 1. Windows and MacOS have different PCH mechanism. We only support Linux currently. + 2. It only works on GCC/G++. + ''' + if not IS_LINUX: + return + + compiler = get_cxx_compiler() + + b_is_gcc = check_compiler_is_gcc(compiler) + if b_is_gcc is False: + return + + head_file = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h') + head_file_pch = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.gch') + head_file_signature = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.sign') + + def listToString(s): + # initialize an empty string + string = "" + if s is None: + return string + + # traverse in the string + for element in s: + string += (element + ' ') + # return string + return string + + def format_precompiler_header_cmd(compiler, head_file, head_file_pch, common_cflags, torch_include_dirs, extra_cflags, extra_include_paths): + return re.sub( + r"[ \n]+", + " ", + f""" + {compiler} -x c++-header {head_file} -o {head_file_pch} {torch_include_dirs} {extra_include_paths} {extra_cflags} {common_cflags} + """, + ).strip() + + def command_to_signature(cmd): + signature = cmd.replace(' ', '_') + return signature + + def check_pch_signature_in_file(file_path, signature): + b_exist = os.path.isfile(file_path) + if b_exist is False: + return False + + with open(file_path) as file: + # read all content of a file + content = file.read() + # check if string present in a file + if signature == content: + return True + else: + return False + + def _create_if_not_exist(path_dir): + if not os.path.exists(path_dir): + try: + Path(path_dir).mkdir(parents=True, exist_ok=True) + except OSError as exc: # Guard against race condition + if exc.errno != errno.EEXIST: + raise RuntimeError(f"Fail to create path {path_dir}") from exc + + def write_pch_signature_to_file(file_path, pch_sign): + _create_if_not_exist(os.path.dirname(file_path)) + with open(file_path, "w") as f: + f.write(pch_sign) + f.close() + + def build_precompile_header(pch_cmd): + try: + subprocess.check_output(pch_cmd, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + raise RuntimeError(f"Compile PreCompile Header fail, command: {pch_cmd}") from e + + extra_cflags_str = listToString(extra_cflags) + extra_include_paths_str = " ".join( + [f"-I{include}" for include in extra_include_paths] if extra_include_paths else [] + ) + + lib_include = os.path.join(_TORCH_PATH, 'include') + torch_include_dirs = [ + f"-I {lib_include}", + # Python.h + "-I {}".format(sysconfig.get_path("include")), + # torch/all.h + "-I {}".format(os.path.join(lib_include, 'torch', 'csrc', 'api', 'include')), + ] + + torch_include_dirs_str = listToString(torch_include_dirs) + + common_cflags = [] + if not is_standalone: + common_cflags += ['-DTORCH_API_INCLUDE_EXTENSION_H'] + + common_cflags += ['-std=c++17', '-fPIC'] + common_cflags += [f"{x}" for x in _get_pybind11_abi_build_flags()] + common_cflags += [f"{x}" for x in _get_glibcxx_abi_build_flags()] + common_cflags_str = listToString(common_cflags) + + pch_cmd = format_precompiler_header_cmd(compiler, head_file, head_file_pch, common_cflags_str, torch_include_dirs_str, extra_cflags_str, extra_include_paths_str) + pch_sign = command_to_signature(pch_cmd) + + if os.path.isfile(head_file_pch) is not True: + build_precompile_header(pch_cmd) + write_pch_signature_to_file(head_file_signature, pch_sign) + else: + b_same_sign = check_pch_signature_in_file(head_file_signature, pch_sign) + if b_same_sign is False: + build_precompile_header(pch_cmd) + write_pch_signature_to_file(head_file_signature, pch_sign) + +def remove_extension_h_precompiler_headers(): + def _remove_if_file_exists(path_file): + if os.path.exists(path_file): + os.remove(path_file) + + head_file_pch = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.gch') + head_file_signature = os.path.join(_TORCH_PATH, 'include', 'torch', 'extension.h.sign') + + _remove_if_file_exists(head_file_pch) + _remove_if_file_exists(head_file_signature) + +def load_inline(name, + cpp_sources, + cuda_sources=None, + functions=None, + extra_cflags=None, + extra_cuda_cflags=None, + extra_ldflags=None, + extra_include_paths=None, + build_directory=None, + verbose=False, + with_cuda=None, + is_python_module=True, + with_pytorch_error_handling=True, + keep_intermediates=True, + use_pch=False): + r''' + Load a PyTorch C++ extension just-in-time (JIT) from string sources. + + This function behaves exactly like :func:`load`, but takes its sources as + strings rather than filenames. These strings are stored to files in the + build directory, after which the behavior of :func:`load_inline` is + identical to :func:`load`. + + See `the + tests `_ + for good examples of using this function. + + Sources may omit two required parts of a typical non-inline C++ extension: + the necessary header includes, as well as the (pybind11) binding code. More + precisely, strings passed to ``cpp_sources`` are first concatenated into a + single ``.cpp`` file. This file is then prepended with ``#include + ``. + + Furthermore, if the ``functions`` argument is supplied, bindings will be + automatically generated for each function specified. ``functions`` can + either be a list of function names, or a dictionary mapping from function + names to docstrings. If a list is given, the name of each function is used + as its docstring. + + The sources in ``cuda_sources`` are concatenated into a separate ``.cu`` + file and prepended with ``torch/types.h``, ``cuda.h`` and + ``cuda_runtime.h`` includes. The ``.cpp`` and ``.cu`` files are compiled + separately, but ultimately linked into a single library. Note that no + bindings are generated for functions in ``cuda_sources`` per se. To bind + to a CUDA kernel, you must create a C++ function that calls it, and either + declare or define this C++ function in one of the ``cpp_sources`` (and + include its name in ``functions``). + + See :func:`load` for a description of arguments omitted below. + + Args: + cpp_sources: A string, or list of strings, containing C++ source code. + cuda_sources: A string, or list of strings, containing CUDA source code. + functions: A list of function names for which to generate function + bindings. If a dictionary is given, it should map function names to + docstrings (which are otherwise just the function names). + with_cuda: Determines whether CUDA headers and libraries are added to + the build. If set to ``None`` (default), this value is + automatically determined based on whether ``cuda_sources`` is + provided. Set it to ``True`` to force CUDA headers + and libraries to be included. + with_pytorch_error_handling: Determines whether pytorch error and + warning macros are handled by pytorch instead of pybind. To do + this, each function ``foo`` is called via an intermediary ``_safe_foo`` + function. This redirection might cause issues in obscure cases + of cpp. This flag should be set to ``False`` when this redirect + causes issues. + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CPP_EXT) + >>> from torch.utils.cpp_extension import load_inline + >>> source = """ + at::Tensor sin_add(at::Tensor x, at::Tensor y) { + return x.sin() + y.sin(); + } + """ + >>> module = load_inline(name='inline_extension', + ... cpp_sources=[source], + ... functions=['sin_add']) + + .. note:: + By default, the Ninja backend uses #CPUS + 2 workers to build the + extension. This may use up too many resources on some systems. One + can control the number of workers by setting the `MAX_JOBS` environment + variable to a non-negative number. + ''' + build_directory = build_directory or _get_build_directory(name, verbose) + + if isinstance(cpp_sources, str): + cpp_sources = [cpp_sources] + cuda_sources = cuda_sources or [] + if isinstance(cuda_sources, str): + cuda_sources = [cuda_sources] + + cpp_sources.insert(0, '#include ') + + if use_pch is True: + # Using PreCompile Header('torch/extension.h') to reduce compile time. + _check_and_build_extension_h_precompiler_headers(extra_cflags, extra_include_paths) + else: + remove_extension_h_precompiler_headers() + + # If `functions` is supplied, we create the pybind11 bindings for the user. + # Here, `functions` is (or becomes, after some processing) a map from + # function names to function docstrings. + if functions is not None: + module_def = [] + module_def.append('PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {') + if isinstance(functions, str): + functions = [functions] + if isinstance(functions, list): + # Make the function docstring the same as the function name. + functions = {f: f for f in functions} + elif not isinstance(functions, dict): + raise ValueError(f"Expected 'functions' to be a list or dict, but was {type(functions)}") + for function_name, docstring in functions.items(): + if with_pytorch_error_handling: + module_def.append(f'm.def("{function_name}", torch::wrap_pybind_function({function_name}), "{docstring}");') + else: + module_def.append(f'm.def("{function_name}", {function_name}, "{docstring}");') + module_def.append('}') + cpp_sources += module_def + + cpp_source_path = os.path.join(build_directory, 'main.cpp') + _maybe_write(cpp_source_path, "\n".join(cpp_sources)) + + sources = [cpp_source_path] + + if cuda_sources: + cuda_sources.insert(0, '#include ') + cuda_sources.insert(1, '#include ') + cuda_sources.insert(2, '#include ') + + cuda_source_path = os.path.join(build_directory, 'cuda.cu') + _maybe_write(cuda_source_path, "\n".join(cuda_sources)) + + sources.append(cuda_source_path) + + return _jit_compile( + name, + sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory, + verbose, + with_cuda, + is_python_module, + is_standalone=False, + keep_intermediates=keep_intermediates) + + +def _jit_compile(name, + sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory: str, + verbose: bool, + with_cuda: Optional[bool], + is_python_module, + is_standalone, + keep_intermediates=True) -> None: + if is_python_module and is_standalone: + raise ValueError("`is_python_module` and `is_standalone` are mutually exclusive.") + + if with_cuda is None: + with_cuda = any(map(_is_cuda_file, sources)) + with_cudnn = any('cudnn' in f for f in extra_ldflags or []) + old_version = JIT_EXTENSION_VERSIONER.get_version(name) + version = JIT_EXTENSION_VERSIONER.bump_version_if_changed( + name, + sources, + build_arguments=[extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths], + build_directory=build_directory, + with_cuda=with_cuda, + is_python_module=is_python_module, + is_standalone=is_standalone, + ) + if version > 0: + if version != old_version and verbose: + print(f'The input conditions for extension module {name} have changed. ' + + f'Bumping to version {version} and re-building as {name}_v{version}...', + file=sys.stderr) + name = f'{name}_v{version}' + + if version != old_version: + baton = FileBaton(os.path.join(build_directory, 'lock')) + if baton.try_acquire(): + try: + with GeneratedFileCleaner(keep_intermediates=keep_intermediates) as clean_ctx: + if IS_HIP_EXTENSION and (with_cuda or with_cudnn): + hipify_result = hipify_python.hipify( + project_directory=build_directory, + output_directory=build_directory, + header_include_dirs=(extra_include_paths if extra_include_paths is not None else []), + extra_files=[os.path.abspath(s) for s in sources], + ignores=[_join_rocm_home('*'), os.path.join(_TORCH_PATH, '*')], # no need to hipify ROCm or PyTorch headers + show_detailed=verbose, + show_progress=verbose, + is_pytorch_extension=True, + clean_ctx=clean_ctx + ) + + hipified_sources = set() + for source in sources: + s_abs = os.path.abspath(source) + hipified_sources.add(hipify_result[s_abs].hipified_path if s_abs in hipify_result else s_abs) + + sources = list(hipified_sources) + + _write_ninja_file_and_build_library( + name=name, + sources=sources, + extra_cflags=extra_cflags or [], + extra_cuda_cflags=extra_cuda_cflags or [], + extra_ldflags=extra_ldflags or [], + extra_include_paths=extra_include_paths or [], + build_directory=build_directory, + verbose=verbose, + with_cuda=with_cuda, + is_standalone=is_standalone) + finally: + baton.release() + else: + baton.wait() + elif verbose: + print('No modifications detected for re-loaded extension ' + f'module {name}, skipping build step...', + file=sys.stderr) + + if verbose: + print(f'Loading extension module {name}...', file=sys.stderr) + + if is_standalone: + return _get_exec_path(name, build_directory) + + return _import_module_from_library(name, build_directory, is_python_module) + + +def _write_ninja_file_and_compile_objects( + sources: List[str], + objects, + cflags, + post_cflags, + cuda_cflags, + cuda_post_cflags, + cuda_dlink_post_cflags, + build_directory: str, + verbose: bool, + with_cuda: Optional[bool]) -> None: + verify_ninja_availability() + + compiler = get_cxx_compiler() + + get_compiler_abi_compatibility_and_version(compiler) + if with_cuda is None: + with_cuda = any(map(_is_cuda_file, sources)) + build_file_path = os.path.join(build_directory, 'build.ninja') + if verbose: + print(f'Emitting ninja build file {build_file_path}...', file=sys.stderr) + _write_ninja_file( + path=build_file_path, + cflags=cflags, + post_cflags=post_cflags, + cuda_cflags=cuda_cflags, + cuda_post_cflags=cuda_post_cflags, + cuda_dlink_post_cflags=cuda_dlink_post_cflags, + sources=sources, + objects=objects, + ldflags=None, + library_target=None, + with_cuda=with_cuda) + if verbose: + print('Compiling objects...', file=sys.stderr) + _run_ninja_build( + build_directory, + verbose, + # It would be better if we could tell users the name of the extension + # that failed to build but there isn't a good way to get it here. + error_prefix='Error compiling objects for extension') + + +def _write_ninja_file_and_build_library( + name, + sources: List[str], + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + build_directory: str, + verbose: bool, + with_cuda: Optional[bool], + is_standalone: bool = False) -> None: + verify_ninja_availability() + + compiler = get_cxx_compiler() + + get_compiler_abi_compatibility_and_version(compiler) + if with_cuda is None: + with_cuda = any(map(_is_cuda_file, sources)) + extra_ldflags = _prepare_ldflags( + extra_ldflags or [], + with_cuda, + verbose, + is_standalone) + build_file_path = os.path.join(build_directory, 'build.ninja') + if verbose: + print(f'Emitting ninja build file {build_file_path}...', file=sys.stderr) + # NOTE: Emitting a new ninja build file does not cause re-compilation if + # the sources did not change, so it's ok to re-emit (and it's fast). + _write_ninja_file_to_build_library( + path=build_file_path, + name=name, + sources=sources, + extra_cflags=extra_cflags or [], + extra_cuda_cflags=extra_cuda_cflags or [], + extra_ldflags=extra_ldflags or [], + extra_include_paths=extra_include_paths or [], + with_cuda=with_cuda, + is_standalone=is_standalone) + + if verbose: + print(f'Building extension module {name}...', file=sys.stderr) + _run_ninja_build( + build_directory, + verbose, + error_prefix=f"Error building extension '{name}'") + + +def is_ninja_available(): + """Return ``True`` if the `ninja `_ build system is available on the system, ``False`` otherwise.""" + try: + subprocess.check_output('ninja --version'.split()) + except Exception: + return False + else: + return True + + +def verify_ninja_availability(): + """Raise ``RuntimeError`` if `ninja `_ build system is not available on the system, does nothing otherwise.""" + if not is_ninja_available(): + raise RuntimeError("Ninja is required to load C++ extensions") + + +def _prepare_ldflags(extra_ldflags, with_cuda, verbose, is_standalone): + if IS_WINDOWS: + python_lib_path = os.path.join(sys.base_exec_prefix, 'libs') + + extra_ldflags.append('c10.lib') + if with_cuda: + extra_ldflags.append('c10_cuda.lib') + extra_ldflags.append('torch_cpu.lib') + if with_cuda: + extra_ldflags.append('torch_cuda.lib') + # /INCLUDE is used to ensure torch_cuda is linked against in a project that relies on it. + # Related issue: https://github.com/pytorch/pytorch/issues/31611 + extra_ldflags.append('-INCLUDE:?warp_size@cuda@at@@YAHXZ') + extra_ldflags.append('torch.lib') + extra_ldflags.append(f'/LIBPATH:{TORCH_LIB_PATH}') + if not is_standalone: + extra_ldflags.append('torch_python.lib') + extra_ldflags.append(f'/LIBPATH:{python_lib_path}') + + else: + extra_ldflags.append(f'-L{TORCH_LIB_PATH}') + extra_ldflags.append('-lc10') + if with_cuda: + extra_ldflags.append('-lc10_hip' if IS_HIP_EXTENSION else '-lc10_cuda') + extra_ldflags.append('-ltorch_cpu') + if with_cuda: + extra_ldflags.append('-ltorch_hip' if IS_HIP_EXTENSION else '-ltorch_cuda') + extra_ldflags.append('-ltorch') + if not is_standalone: + extra_ldflags.append('-ltorch_python') + + if is_standalone and "TBB" in torch.__config__.parallel_info(): + extra_ldflags.append('-ltbb') + + if is_standalone: + extra_ldflags.append(f"-Wl,-rpath,{TORCH_LIB_PATH}") + + if with_cuda: + if verbose: + print('Detected CUDA files, patching ldflags', file=sys.stderr) + if IS_WINDOWS: + extra_ldflags.append(f'/LIBPATH:{_join_cuda_home("lib", "x64")}') + extra_ldflags.append('cudart.lib') + if CUDNN_HOME is not None: + extra_ldflags.append(f'/LIBPATH:{os.path.join(CUDNN_HOME, "lib", "x64")}') + elif not IS_HIP_EXTENSION: + extra_lib_dir = "lib64" + if (not os.path.exists(_join_cuda_home(extra_lib_dir)) and + os.path.exists(_join_cuda_home("lib"))): + # 64-bit CUDA may be installed in "lib" + # Note that it's also possible both don't exist (see _find_cuda_home) - in that case we stay with "lib64" + extra_lib_dir = "lib" + extra_ldflags.append(f'-L{_join_cuda_home(extra_lib_dir)}') + extra_ldflags.append('-lcudart') + if CUDNN_HOME is not None: + extra_ldflags.append(f'-L{os.path.join(CUDNN_HOME, "lib64")}') + elif IS_HIP_EXTENSION: + assert ROCM_VERSION is not None + extra_ldflags.append(f'-L{_join_rocm_home("lib")}') + extra_ldflags.append('-lamdhip64' if ROCM_VERSION >= (3, 5) else '-lhip_hcc') + return extra_ldflags + + +def _get_cuda_arch_flags(cflags: Optional[List[str]] = None) -> List[str]: + """ + Determine CUDA arch flags to use. + + For an arch, say "6.1", the added compile flag will be + ``-gencode=arch=compute_61,code=sm_61``. + For an added "+PTX", an additional + ``-gencode=arch=compute_xx,code=compute_xx`` is added. + + See select_compute_arch.cmake for corresponding named and supported arches + when building with CMake. + """ + # If cflags is given, there may already be user-provided arch flags in it + # (from `extra_compile_args`) + if cflags is not None: + for flag in cflags: + if 'TORCH_EXTENSION_NAME' in flag: + continue + if 'arch' in flag: + return [] + + # Note: keep combined names ("arch1+arch2") above single names, otherwise + # string replacement may not do the right thing + named_arches = collections.OrderedDict([ + ('Kepler+Tesla', '3.7'), + ('Kepler', '3.5+PTX'), + ('Maxwell+Tegra', '5.3'), + ('Maxwell', '5.0;5.2+PTX'), + ('Pascal', '6.0;6.1+PTX'), + ('Volta+Tegra', '7.2'), + ('Volta', '7.0+PTX'), + ('Turing', '7.5+PTX'), + ('Ampere+Tegra', '8.7'), + ('Ampere', '8.0;8.6+PTX'), + ('Ada', '8.9+PTX'), + ('Hopper', '9.0+PTX'), + ]) + + supported_arches = ['3.5', '3.7', '5.0', '5.2', '5.3', '6.0', '6.1', '6.2', + '7.0', '7.2', '7.5', '8.0', '8.6', '8.7', '8.9', '9.0', '9.0a'] + valid_arch_strings = supported_arches + [s + "+PTX" for s in supported_arches] + + # The default is sm_30 for CUDA 9.x and 10.x + # First check for an env var (same as used by the main setup.py) + # Can be one or more architectures, e.g. "6.1" or "3.5;5.2;6.0;6.1;7.0+PTX" + # See cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake + _arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', None) + + # If not given, determine what's best for the GPU / CUDA version that can be found + if not _arch_list: + warnings.warn( + "TORCH_CUDA_ARCH_LIST is not set, all archs for visible cards are included for compilation. \n" + "If this is not desired, please set os.environ['TORCH_CUDA_ARCH_LIST'].") + arch_list = [] + # the assumption is that the extension should run on any of the currently visible cards, + # which could be of different types - therefore all archs for visible cards should be included + for i in range(torch.cuda.device_count()): + capability = torch.cuda.get_device_capability(i) + supported_sm = [int(arch.split('_')[1]) + for arch in torch.cuda.get_arch_list() if 'sm_' in arch] + max_supported_sm = max((sm // 10, sm % 10) for sm in supported_sm) + # Capability of the device may be higher than what's supported by the user's + # NVCC, causing compilation error. User's NVCC is expected to match the one + # used to build pytorch, so we use the maximum supported capability of pytorch + # to clamp the capability. + capability = min(max_supported_sm, capability) + arch = f'{capability[0]}.{capability[1]}' + if arch not in arch_list: + arch_list.append(arch) + arch_list = sorted(arch_list) + arch_list[-1] += '+PTX' + else: + # Deal with lists that are ' ' separated (only deal with ';' after) + _arch_list = _arch_list.replace(' ', ';') + # Expand named arches + for named_arch, archval in named_arches.items(): + _arch_list = _arch_list.replace(named_arch, archval) + + arch_list = _arch_list.split(';') + + flags = [] + for arch in arch_list: + if arch not in valid_arch_strings: + raise ValueError(f"Unknown CUDA arch ({arch}) or GPU not supported") + else: + num = arch[0] + arch[2:].split("+")[0] + flags.append(f'-gencode=arch=compute_{num},code=sm_{num}') + if arch.endswith('+PTX'): + flags.append(f'-gencode=arch=compute_{num},code=compute_{num}') + + return sorted(set(flags)) + + +def _get_rocm_arch_flags(cflags: Optional[List[str]] = None) -> List[str]: + # If cflags is given, there may already be user-provided arch flags in it + # (from `extra_compile_args`) + if cflags is not None: + for flag in cflags: + if 'amdgpu-target' in flag or 'offload-arch' in flag: + return ['-fno-gpu-rdc'] + # Use same defaults as used for building PyTorch + # Allow env var to override, just like during initial cmake build. + _archs = os.environ.get('PYTORCH_ROCM_ARCH', None) + if not _archs: + archFlags = torch._C._cuda_getArchFlags() + if archFlags: + archs = archFlags.split() + else: + archs = [] + else: + archs = _archs.replace(' ', ';').split(';') + flags = [f'--offload-arch={arch}' for arch in archs] + flags += ['-fno-gpu-rdc'] + return flags + +def _get_build_directory(name: str, verbose: bool) -> str: + root_extensions_directory = os.environ.get('TORCH_EXTENSIONS_DIR') + if root_extensions_directory is None: + root_extensions_directory = get_default_build_root() + cu_str = ('cpu' if torch.version.cuda is None else + f'cu{torch.version.cuda.replace(".", "")}') # type: ignore[attr-defined] + python_version = f'py{sys.version_info.major}{sys.version_info.minor}' + build_folder = f'{python_version}_{cu_str}' + + root_extensions_directory = os.path.join( + root_extensions_directory, build_folder) + + if verbose: + print(f'Using {root_extensions_directory} as PyTorch extensions root...', file=sys.stderr) + + build_directory = os.path.join(root_extensions_directory, name) + if not os.path.exists(build_directory): + if verbose: + print(f'Creating extension directory {build_directory}...', file=sys.stderr) + # This is like mkdir -p, i.e. will also create parent directories. + os.makedirs(build_directory, exist_ok=True) + + return build_directory + + +def _get_num_workers(verbose: bool) -> Optional[int]: + max_jobs = os.environ.get('MAX_JOBS') + if max_jobs is not None and max_jobs.isdigit(): + if verbose: + print(f'Using envvar MAX_JOBS ({max_jobs}) as the number of workers...', + file=sys.stderr) + return int(max_jobs) + if verbose: + print('Allowing ninja to set a default number of workers... ' + '(overridable by setting the environment variable MAX_JOBS=N)', + file=sys.stderr) + return None + + +def _run_ninja_build(build_directory: str, verbose: bool, error_prefix: str) -> None: + command = ['ninja', '-v'] + num_workers = _get_num_workers(verbose) + if num_workers is not None: + command.extend(['-j', str(num_workers)]) + env = os.environ.copy() + # Try to activate the vc env for the users + if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' not in env: + from setuptools import distutils + + plat_name = distutils.util.get_platform() + plat_spec = PLAT_TO_VCVARS[plat_name] + + vc_env = distutils._msvccompiler._get_vc_env(plat_spec) + vc_env = {k.upper(): v for k, v in vc_env.items()} + for k, v in env.items(): + uk = k.upper() + if uk not in vc_env: + vc_env[uk] = v + env = vc_env + try: + sys.stdout.flush() + sys.stderr.flush() + # Warning: don't pass stdout=None to subprocess.run to get output. + # subprocess.run assumes that sys.__stdout__ has not been modified and + # attempts to write to it by default. However, when we call _run_ninja_build + # from ahead-of-time cpp extensions, the following happens: + # 1) If the stdout encoding is not utf-8, setuptools detachs __stdout__. + # https://github.com/pypa/setuptools/blob/7e97def47723303fafabe48b22168bbc11bb4821/setuptools/dist.py#L1110 + # (it probably shouldn't do this) + # 2) subprocess.run (on POSIX, with no stdout override) relies on + # __stdout__ not being detached: + # https://github.com/python/cpython/blob/c352e6c7446c894b13643f538db312092b351789/Lib/subprocess.py#L1214 + # To work around this, we pass in the fileno directly and hope that + # it is valid. + stdout_fileno = 1 + subprocess.run( + command, + stdout=stdout_fileno if verbose else subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd=build_directory, + check=True, + env=env) + except subprocess.CalledProcessError as e: + # Python 2 and 3 compatible way of getting the error object. + _, error, _ = sys.exc_info() + # error.output contains the stdout and stderr of the build attempt. + message = error_prefix + # `error` is a CalledProcessError (which has an `output`) attribute, but + # mypy thinks it's Optional[BaseException] and doesn't narrow + if hasattr(error, 'output') and error.output: # type: ignore[union-attr] + message += f": {error.output.decode(*SUBPROCESS_DECODE_ARGS)}" # type: ignore[union-attr] + raise RuntimeError(message) from e + + +def _get_exec_path(module_name, path): + if IS_WINDOWS and TORCH_LIB_PATH not in os.getenv('PATH', '').split(';'): + torch_lib_in_path = any( + os.path.exists(p) and os.path.samefile(p, TORCH_LIB_PATH) + for p in os.getenv('PATH', '').split(';') + ) + if not torch_lib_in_path: + os.environ['PATH'] = f"{TORCH_LIB_PATH};{os.getenv('PATH', '')}" + return os.path.join(path, f'{module_name}{EXEC_EXT}') + + +def _import_module_from_library(module_name, path, is_python_module): + filepath = os.path.join(path, f"{module_name}{LIB_EXT}") + if is_python_module: + # https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path + spec = importlib.util.spec_from_file_location(module_name, filepath) + assert spec is not None + module = importlib.util.module_from_spec(spec) + assert isinstance(spec.loader, importlib.abc.Loader) + spec.loader.exec_module(module) + return module + else: + torch.ops.load_library(filepath) + + +def _write_ninja_file_to_build_library(path, + name, + sources, + extra_cflags, + extra_cuda_cflags, + extra_ldflags, + extra_include_paths, + with_cuda, + is_standalone) -> None: + extra_cflags = [flag.strip() for flag in extra_cflags] + extra_cuda_cflags = [flag.strip() for flag in extra_cuda_cflags] + extra_ldflags = [flag.strip() for flag in extra_ldflags] + extra_include_paths = [flag.strip() for flag in extra_include_paths] + + # Turn into absolute paths so we can emit them into the ninja build + # file wherever it is. + user_includes = [os.path.abspath(file) for file in extra_include_paths] + + # include_paths() gives us the location of torch/extension.h + system_includes = include_paths(with_cuda) + # sysconfig.get_path('include') gives us the location of Python.h + # Explicitly specify 'posix_prefix' scheme on non-Windows platforms to workaround error on some MacOS + # installations where default `get_path` points to non-existing `/Library/Python/M.m/include` folder + python_include_path = sysconfig.get_path('include', scheme='nt' if IS_WINDOWS else 'posix_prefix') + if python_include_path is not None: + system_includes.append(python_include_path) + + # Windows does not understand `-isystem`. + if IS_WINDOWS: + user_includes += system_includes + system_includes.clear() + + common_cflags = [] + if not is_standalone: + common_cflags.append(f'-DTORCH_EXTENSION_NAME={name}') + common_cflags.append('-DTORCH_API_INCLUDE_EXTENSION_H') + + common_cflags += [f"{x}" for x in _get_pybind11_abi_build_flags()] + + common_cflags += [f'-I{include}' for include in user_includes] + common_cflags += [f'-isystem {include}' for include in system_includes] + + common_cflags += [f"{x}" for x in _get_glibcxx_abi_build_flags()] + + if IS_WINDOWS: + cflags = common_cflags + COMMON_MSVC_FLAGS + ['/std:c++17'] + extra_cflags + cflags = _nt_quote_args(cflags) + else: + cflags = common_cflags + ['-fPIC', '-std=c++17'] + extra_cflags + + if with_cuda and IS_HIP_EXTENSION: + cuda_flags = ['-DWITH_HIP'] + cflags + COMMON_HIP_FLAGS + COMMON_HIPCC_FLAGS + cuda_flags += extra_cuda_cflags + cuda_flags += _get_rocm_arch_flags(cuda_flags) + elif with_cuda: + cuda_flags = common_cflags + COMMON_NVCC_FLAGS + _get_cuda_arch_flags() + if IS_WINDOWS: + for flag in COMMON_MSVC_FLAGS: + cuda_flags = ['-Xcompiler', flag] + cuda_flags + for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS: + cuda_flags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cuda_flags + cuda_flags = cuda_flags + ['-std=c++17'] + cuda_flags = _nt_quote_args(cuda_flags) + cuda_flags += _nt_quote_args(extra_cuda_cflags) + else: + cuda_flags += ['--compiler-options', "'-fPIC'"] + cuda_flags += extra_cuda_cflags + if not any(flag.startswith('-std=') for flag in cuda_flags): + cuda_flags.append('-std=c++17') + cc_env = os.getenv("CC") + if cc_env is not None: + cuda_flags = ['-ccbin', cc_env] + cuda_flags + else: + cuda_flags = None + + def object_file_path(source_file: str) -> str: + # '/path/to/file.cpp' -> 'file' + file_name = os.path.splitext(os.path.basename(source_file))[0] + if _is_cuda_file(source_file) and with_cuda: + # Use a different object filename in case a C++ and CUDA file have + # the same filename but different extension (.cpp vs. .cu). + target = f'{file_name}.cuda.o' + else: + target = f'{file_name}.o' + return target + + objects = [object_file_path(src) for src in sources] + ldflags = ([] if is_standalone else [SHARED_FLAG]) + extra_ldflags + + # The darwin linker needs explicit consent to ignore unresolved symbols. + if IS_MACOS: + ldflags.append('-undefined dynamic_lookup') + elif IS_WINDOWS: + ldflags = _nt_quote_args(ldflags) + + ext = EXEC_EXT if is_standalone else LIB_EXT + library_target = f'{name}{ext}' + + _write_ninja_file( + path=path, + cflags=cflags, + post_cflags=None, + cuda_cflags=cuda_flags, + cuda_post_cflags=None, + cuda_dlink_post_cflags=None, + sources=sources, + objects=objects, + ldflags=ldflags, + library_target=library_target, + with_cuda=with_cuda) + + +def _write_ninja_file(path, + cflags, + post_cflags, + cuda_cflags, + cuda_post_cflags, + cuda_dlink_post_cflags, + sources, + objects, + ldflags, + library_target, + with_cuda) -> None: + r"""Write a ninja file that does the desired compiling and linking. + + `path`: Where to write this file + `cflags`: list of flags to pass to $cxx. Can be None. + `post_cflags`: list of flags to append to the $cxx invocation. Can be None. + `cuda_cflags`: list of flags to pass to $nvcc. Can be None. + `cuda_postflags`: list of flags to append to the $nvcc invocation. Can be None. + `sources`: list of paths to source files + `objects`: list of desired paths to objects, one per source. + `ldflags`: list of flags to pass to linker. Can be None. + `library_target`: Name of the output library. Can be None; in that case, + we do no linking. + `with_cuda`: If we should be compiling with CUDA. + """ + def sanitize_flags(flags): + if flags is None: + return [] + else: + return [flag.strip() for flag in flags] + + cflags = sanitize_flags(cflags) + post_cflags = sanitize_flags(post_cflags) + cuda_cflags = sanitize_flags(cuda_cflags) + cuda_post_cflags = sanitize_flags(cuda_post_cflags) + cuda_dlink_post_cflags = sanitize_flags(cuda_dlink_post_cflags) + ldflags = sanitize_flags(ldflags) + + # Sanity checks... + assert len(sources) == len(objects) + assert len(sources) > 0 + + compiler = get_cxx_compiler() + + # Version 1.3 is required for the `deps` directive. + config = ['ninja_required_version = 1.3'] + config.append(f'cxx = {compiler}') + if with_cuda or cuda_dlink_post_cflags: + if "PYTORCH_NVCC" in os.environ: + nvcc = os.getenv("PYTORCH_NVCC") # user can set nvcc compiler with ccache using the environment variable here + else: + if IS_HIP_EXTENSION: + nvcc = _join_rocm_home('bin', 'hipcc') + else: + nvcc = _join_cuda_home('bin', 'nvcc') + config.append(f'nvcc = {nvcc}') + + if IS_HIP_EXTENSION: + post_cflags = COMMON_HIP_FLAGS + post_cflags + flags = [f'cflags = {" ".join(cflags)}'] + flags.append(f'post_cflags = {" ".join(post_cflags)}') + if with_cuda: + flags.append(f'cuda_cflags = {" ".join(cuda_cflags)}') + flags.append(f'cuda_post_cflags = {" ".join(cuda_post_cflags)}') + flags.append(f'cuda_dlink_post_cflags = {" ".join(cuda_dlink_post_cflags)}') + flags.append(f'ldflags = {" ".join(ldflags)}') + + # Turn into absolute paths so we can emit them into the ninja build + # file wherever it is. + sources = [os.path.abspath(file) for file in sources] + + # See https://ninja-build.org/build.ninja.html for reference. + compile_rule = ['rule compile'] + if IS_WINDOWS: + compile_rule.append( + ' command = cl /showIncludes $cflags -c $in /Fo$out $post_cflags') + compile_rule.append(' deps = msvc') + else: + compile_rule.append( + ' command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags') + compile_rule.append(' depfile = $out.d') + compile_rule.append(' deps = gcc') + + if with_cuda: + cuda_compile_rule = ['rule cuda_compile'] + nvcc_gendeps = '' + # --generate-dependencies-with-compile is not supported by ROCm + # Nvcc flag `--generate-dependencies-with-compile` is not supported by sccache, which may increase build time. + if torch.version.cuda is not None and os.getenv('TORCH_EXTENSION_SKIP_NVCC_GEN_DEPENDENCIES', '0') != '1': + cuda_compile_rule.append(' depfile = $out.d') + cuda_compile_rule.append(' deps = gcc') + # Note: non-system deps with nvcc are only supported + # on Linux so use --generate-dependencies-with-compile + # to make this work on Windows too. + nvcc_gendeps = '--generate-dependencies-with-compile --dependency-output $out.d' + cuda_compile_rule.append( + f' command = $nvcc {nvcc_gendeps} $cuda_cflags -c $in -o $out $cuda_post_cflags') + + # Emit one build rule per source to enable incremental build. + build = [] + for source_file, object_file in zip(sources, objects): + is_cuda_source = _is_cuda_file(source_file) and with_cuda + rule = 'cuda_compile' if is_cuda_source else 'compile' + if IS_WINDOWS: + source_file = source_file.replace(':', '$:') + object_file = object_file.replace(':', '$:') + source_file = source_file.replace(" ", "$ ") + object_file = object_file.replace(" ", "$ ") + build.append(f'build {object_file}: {rule} {source_file}') + + if cuda_dlink_post_cflags: + devlink_out = os.path.join(os.path.dirname(objects[0]), 'dlink.o') + devlink_rule = ['rule cuda_devlink'] + devlink_rule.append(' command = $nvcc $in -o $out $cuda_dlink_post_cflags') + devlink = [f'build {devlink_out}: cuda_devlink {" ".join(objects)}'] + objects += [devlink_out] + else: + devlink_rule, devlink = [], [] + + if library_target is not None: + link_rule = ['rule link'] + if IS_WINDOWS: + cl_paths = subprocess.check_output(['where', + 'cl']).decode(*SUBPROCESS_DECODE_ARGS).split('\r\n') + if len(cl_paths) >= 1: + cl_path = os.path.dirname(cl_paths[0]).replace(':', '$:') + else: + raise RuntimeError("MSVC is required to load C++ extensions") + link_rule.append(f' command = "{cl_path}/link.exe" $in /nologo $ldflags /out:$out') + else: + link_rule.append(' command = $cxx $in $ldflags -o $out') + + link = [f'build {library_target}: link {" ".join(objects)}'] + + default = [f'default {library_target}'] + else: + link_rule, link, default = [], [], [] + + # 'Blocks' should be separated by newlines, for visual benefit. + blocks = [config, flags, compile_rule] + if with_cuda: + blocks.append(cuda_compile_rule) # type: ignore[possibly-undefined] + blocks += [devlink_rule, link_rule, build, devlink, link, default] + content = "\n\n".join("\n".join(b) for b in blocks) + # Ninja requires a new lines at the end of the .ninja file + content += "\n" + _maybe_write(path, content) + +def _join_cuda_home(*paths) -> str: + """ + Join paths with CUDA_HOME, or raises an error if it CUDA_HOME is not set. + + This is basically a lazy way of raising an error for missing $CUDA_HOME + only once we need to get any CUDA-specific path. + """ + if CUDA_HOME is None: + raise OSError('CUDA_HOME environment variable is not set. ' + 'Please set it to your CUDA install root.') + return os.path.join(CUDA_HOME, *paths) + + +def _is_cuda_file(path: str) -> bool: + valid_ext = ['.cu', '.cuh'] + if IS_HIP_EXTENSION: + valid_ext.append('.hip') + return os.path.splitext(path)[1] in valid_ext diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c3c5a6fb866dc770cbc7bce8b77e819b02865f11 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__init__.py @@ -0,0 +1,76 @@ +# TODO(VitalyFedyunin): Rearranging this imports leads to crash, +# need to cleanup dependencies and fix it +from torch.utils.data.sampler import ( + BatchSampler, + RandomSampler, + Sampler, + SequentialSampler, + SubsetRandomSampler, + WeightedRandomSampler, +) +from torch.utils.data.dataset import ( + ChainDataset, + ConcatDataset, + Dataset, + IterableDataset, + StackDataset, + Subset, + TensorDataset, + random_split, +) +from torch.utils.data.datapipes.datapipe import ( + DFIterDataPipe, + DataChunk, + IterDataPipe, + MapDataPipe, +) +from torch.utils.data.dataloader import ( + DataLoader, + _DatasetKind, + get_worker_info, + default_collate, + default_convert, +) +from torch.utils.data.distributed import DistributedSampler +from torch.utils.data.datapipes._decorator import ( + argument_validation, + functional_datapipe, + guaranteed_datapipes_determinism, + non_deterministic, + runtime_validation, + runtime_validation_disabled, +) + +__all__ = ['BatchSampler', + 'ChainDataset', + 'ConcatDataset', + 'DFIterDataPipe', + 'DataChunk', + 'DataLoader', + 'Dataset', + 'DistributedSampler', + 'IterDataPipe', + 'IterableDataset', + 'MapDataPipe', + 'RandomSampler', + 'Sampler', + 'SequentialSampler', + 'StackDataset', + 'Subset', + 'SubsetRandomSampler', + 'TensorDataset', + 'WeightedRandomSampler', + '_DatasetKind', + 'argument_validation', + 'default_collate', + 'default_convert', + 'functional_datapipe', + 'get_worker_info', + 'guaranteed_datapipes_determinism', + 'non_deterministic', + 'random_split', + 'runtime_validation', + 'runtime_validation_disabled'] + +# Please keep this list sorted +assert __all__ == sorted(__all__) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70476b3b34bbb3ae8d398753eb300a73c1160c5b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b369b4a8710f70e387d978f51a52281c1c76391b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/backward_compatibility.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9e264dba7f83c5fbf75a055e283a82b84bd83c9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataloader.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataset.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5093d9c6a5fe629941d555cd087ed20683a719d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/dataset.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/distributed.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba9b69bd02dc8eff6cce3da59e5a2e70f4d5581e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/distributed.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26589078aacad77fb47b31a8b0bfbc7d17769cc6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph_settings.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph_settings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63ab848afed5a174e7624156104b193a83f4dae3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/graph_settings.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/sampler.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/sampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c7c5326499ef1204a63159dc719e6f646db046b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/__pycache__/sampler.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..be97f016a0917a771970843a4ba70deb68cdd60d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/backward_compatibility.py @@ -0,0 +1,5 @@ +import warnings + +def worker_init_fn(worker_id): + warnings.warn("Usage of backward_compatibility.worker_init_fn is deprecated" + " as DataLoader automatically applies sharding in every worker") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/dataloader.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..f18bb602b50da3a23f9521ac003efbdeade2794d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/dataloader.py @@ -0,0 +1,1479 @@ +r"""Definition of the DataLoader and associated iterators that subclass _BaseDataLoaderIter. + +To support these two classes, in `./_utils` we define many utility methods and +functions to be run in multiprocessing. E.g., the data loading worker loop is +in `./_utils/worker.py`. +""" + +import functools +import itertools +import logging +import os +import queue +import threading +import warnings + +from typing import Any, Callable, Iterable, TypeVar, Generic, List, Optional, Union + +import multiprocessing as python_multiprocessing +import torch +import torch.distributed as dist +import torch.multiprocessing as multiprocessing +import torch.utils.data.graph_settings + +from torch._utils import ExceptionWrapper + +from . import ( + IterDataPipe, + MapDataPipe, + IterableDataset, + Sampler, + SequentialSampler, + RandomSampler, + BatchSampler, + Dataset,) + +from torch.utils.data.datapipes.datapipe import _IterDataPipeSerializationWrapper, _MapDataPipeSerializationWrapper + +from . import _utils + +__all__ = [ + "DataLoader", + "get_worker_info", + "default_collate", + "default_convert", +] + +T_co = TypeVar('T_co', covariant=True) +T = TypeVar('T') +_worker_init_fn_t = Callable[[int], None] + +# Ideally we would parameterize `DataLoader` by the return type of `collate_fn`, but there is currently no way to have that +# type parameter set to a default value if the user doesn't pass in a custom 'collate_fn'. +# See https://github.com/python/mypy/issues/3737. +_collate_fn_t = Callable[[List[T]], Any] + + +# These functions used to be defined in this file. However, it was moved to +# _utils/collate.py. Although it is rather hard to access this from user land +# (one has to explicitly directly `import torch.utils.data.dataloader`), there +# probably is user code out there using it. This aliasing maintains BC in this +# aspect. +default_collate: _collate_fn_t = _utils.collate.default_collate +default_convert = _utils.collate.default_convert + +get_worker_info = _utils.worker.get_worker_info + +logger = logging.getLogger(__name__) + + +class _DatasetKind: + Map = 0 + Iterable = 1 + + @staticmethod + def create_fetcher(kind, dataset, auto_collation, collate_fn, drop_last): + if kind == _DatasetKind.Map: + return _utils.fetch._MapDatasetFetcher(dataset, auto_collation, collate_fn, drop_last) + else: + return _utils.fetch._IterableDatasetFetcher(dataset, auto_collation, collate_fn, drop_last) + + +class _InfiniteConstantSampler(Sampler): + r"""Analogous to ``itertools.repeat(None, None)``. + + Used as sampler for :class:`~torch.utils.data.IterableDataset`. + """ + + def __iter__(self): + while True: + yield None + + +def _get_distributed_settings(): + if dist.is_available() and dist.is_initialized(): + return dist.get_world_size(), dist.get_rank() + else: + return 1, 0 + + +def _sharding_worker_init_fn(worker_init_fn, world_size, rank_id, worker_id): + global_worker_id = worker_id + info = torch.utils.data.get_worker_info() + assert info is not None + total_workers = info.num_workers + datapipe = info.dataset + assert isinstance(datapipe, (IterDataPipe, MapDataPipe)) + # To distribute elements across distributed process evenly, we should shard data on distributed + # processes first then shard on worker processes + total_workers *= world_size + global_worker_id = global_worker_id * world_size + rank_id + # For BC, use default SHARDING_PRIORITIES + torch.utils.data.graph_settings.apply_sharding(datapipe, total_workers, global_worker_id) + if worker_init_fn is not None: + worker_init_fn(worker_id) + + +def _share_dist_seed(generator, pg): + _shared_seed = torch.empty((), dtype=torch.int64).random_(generator=generator) + if isinstance(pg, dist.ProcessGroup): + dist.broadcast(_shared_seed, src=0, group=pg) + return _shared_seed.item() + + +class DataLoader(Generic[T_co]): + r""" + Data loader combines a dataset and a sampler, and provides an iterable over the given dataset. + + The :class:`~torch.utils.data.DataLoader` supports both map-style and + iterable-style datasets with single- or multi-process loading, customizing + loading order and optional automatic batching (collation) and memory pinning. + + See :py:mod:`torch.utils.data` documentation page for more details. + + Args: + dataset (Dataset): dataset from which to load the data. + batch_size (int, optional): how many samples per batch to load + (default: ``1``). + shuffle (bool, optional): set to ``True`` to have the data reshuffled + at every epoch (default: ``False``). + sampler (Sampler or Iterable, optional): defines the strategy to draw + samples from the dataset. Can be any ``Iterable`` with ``__len__`` + implemented. If specified, :attr:`shuffle` must not be specified. + batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but + returns a batch of indices at a time. Mutually exclusive with + :attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`, + and :attr:`drop_last`. + num_workers (int, optional): how many subprocesses to use for data + loading. ``0`` means that the data will be loaded in the main process. + (default: ``0``) + collate_fn (Callable, optional): merges a list of samples to form a + mini-batch of Tensor(s). Used when using batched loading from a + map-style dataset. + pin_memory (bool, optional): If ``True``, the data loader will copy Tensors + into device/CUDA pinned memory before returning them. If your data elements + are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type, + see the example below. + drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If ``False`` and + the size of dataset is not divisible by the batch size, then the last batch + will be smaller. (default: ``False``) + timeout (numeric, optional): if positive, the timeout value for collecting a batch + from workers. Should always be non-negative. (default: ``0``) + worker_init_fn (Callable, optional): If not ``None``, this will be called on each + worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as + input, after seeding and before data loading. (default: ``None``) + multiprocessing_context (str or multiprocessing.context.BaseContext, optional): If + ``None``, the default `multiprocessing context`_ of your operating system will + be used. (default: ``None``) + generator (torch.Generator, optional): If not ``None``, this RNG will be used + by RandomSampler to generate random indexes and multiprocessing to generate + ``base_seed`` for workers. (default: ``None``) + prefetch_factor (int, optional, keyword-only arg): Number of batches loaded + in advance by each worker. ``2`` means there will be a total of + 2 * num_workers batches prefetched across all workers. (default value depends + on the set value for num_workers. If value of num_workers=0 default is ``None``. + Otherwise, if value of ``num_workers > 0`` default is ``2``). + persistent_workers (bool, optional): If ``True``, the data loader will not shut down + the worker processes after a dataset has been consumed once. This allows to + maintain the workers `Dataset` instances alive. (default: ``False``) + pin_memory_device (str, optional): the device to :attr:`pin_memory` to if ``pin_memory`` is + ``True``. + + + .. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn` + cannot be an unpicklable object, e.g., a lambda function. See + :ref:`multiprocessing-best-practices` on more details related + to multiprocessing in PyTorch. + + .. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used. + When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`, + it instead returns an estimate based on ``len(dataset) / batch_size``, with proper + rounding depending on :attr:`drop_last`, regardless of multi-process loading + configurations. This represents the best guess PyTorch can make because PyTorch + trusts user :attr:`dataset` code in correctly handling multi-process + loading to avoid duplicate data. + + However, if sharding results in multiple workers having incomplete last batches, + this estimate can still be inaccurate, because (1) an otherwise complete batch can + be broken into multiple ones and (2) more than one batch worth of samples can be + dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such + cases in general. + + See `Dataset Types`_ for more details on these two types of datasets and how + :class:`~torch.utils.data.IterableDataset` interacts with + `Multi-process data loading`_. + + .. warning:: See :ref:`reproducibility`, and :ref:`dataloader-workers-random-seed`, and + :ref:`data-loading-randomness` notes for random seed related questions. + + .. _multiprocessing context: + https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods + """ + + dataset: Dataset[T_co] + batch_size: Optional[int] + num_workers: int + pin_memory: bool + drop_last: bool + timeout: float + sampler: Union[Sampler, Iterable] + pin_memory_device: str + prefetch_factor: Optional[int] + _iterator : Optional['_BaseDataLoaderIter'] + __initialized = False + + def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, + shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None, + batch_sampler: Union[Sampler[List], Iterable[List], None] = None, + num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None, + pin_memory: bool = False, drop_last: bool = False, + timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None, + multiprocessing_context=None, generator=None, + *, prefetch_factor: Optional[int] = None, + persistent_workers: bool = False, + pin_memory_device: str = ""): + torch._C._log_api_usage_once("python.data_loader") + + if num_workers < 0: + raise ValueError('num_workers option should be non-negative; ' + 'use num_workers=0 to disable multiprocessing.') + + if timeout < 0: + raise ValueError('timeout option should be non-negative') + + if num_workers == 0 and prefetch_factor is not None: + raise ValueError('prefetch_factor option could only be specified in multiprocessing.' + 'let num_workers > 0 to enable multiprocessing, otherwise set prefetch_factor to None.') + elif num_workers > 0 and prefetch_factor is None: + prefetch_factor = 2 + elif prefetch_factor is not None and prefetch_factor < 0: + raise ValueError('prefetch_factor option should be non-negative') + + if persistent_workers and num_workers == 0: + raise ValueError('persistent_workers option needs num_workers > 0') + + self.dataset = dataset + self.num_workers = num_workers + self.prefetch_factor = prefetch_factor + self.pin_memory = pin_memory + self.pin_memory_device = pin_memory_device + self.timeout = timeout + self.worker_init_fn = worker_init_fn + self.multiprocessing_context = multiprocessing_context + + # Adds forward compatibilities so classic DataLoader can work with DataPipes: + # _DataPipeSerializationWrapper container makes it easier to serialize without redefining pickler + if isinstance(self.dataset, IterDataPipe): + self.dataset = _IterDataPipeSerializationWrapper(self.dataset) + elif isinstance(self.dataset, MapDataPipe): + self.dataset = _MapDataPipeSerializationWrapper(self.dataset) + + # Arg-check dataset related before checking samplers because we want to + # tell users that iterable-style datasets are incompatible with custom + # samplers first, so that they don't learn that this combo doesn't work + # after spending time fixing the custom sampler errors. + if isinstance(dataset, IterableDataset): + self._dataset_kind = _DatasetKind.Iterable + # NOTE [ Custom Samplers and IterableDataset ] + # + # `IterableDataset` does not support custom `batch_sampler` or + # `sampler` since the key is irrelevant (unless we support + # generator-style dataset one day...). + # + # For `sampler`, we always create a dummy sampler. This is an + # infinite sampler even when the dataset may have an implemented + # finite `__len__` because in multi-process data loading, naive + # settings will return duplicated data (which may be desired), and + # thus using a sampler with length matching that of dataset will + # cause data lost (you may have duplicates of the first couple + # batches, but never see anything afterwards). Therefore, + # `Iterabledataset` always uses an infinite sampler, an instance of + # `_InfiniteConstantSampler` defined above. + # + # A custom `batch_sampler` essentially only controls the batch size. + # However, it is unclear how useful it would be since an iterable-style + # dataset can handle that within itself. Moreover, it is pointless + # in multi-process data loading as the assignment order of batches + # to workers is an implementation detail so users can not control + # how to batchify each worker's iterable. Thus, we disable this + # option. If this turns out to be useful in future, we can re-enable + # this, and support custom samplers that specify the assignments to + # specific workers. + if isinstance(dataset, IterDataPipe): + if shuffle is not None: + dataset = torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle) + # We cannot check `shuffle is not None` here, since previously `shuffle=False` was the default. + elif shuffle not in {False, None}: + raise ValueError( + f"DataLoader with IterableDataset: expected unspecified shuffle option, but got shuffle={shuffle}") + + if sampler is not None: + # See NOTE [ Custom Samplers and IterableDataset ] + raise ValueError( + f"DataLoader with IterableDataset: expected unspecified sampler option, but got sampler={sampler}") + elif batch_sampler is not None: + # See NOTE [ Custom Samplers and IterableDataset ] + raise ValueError( + "DataLoader with IterableDataset: expected unspecified " + f"batch_sampler option, but got batch_sampler={batch_sampler}") + else: + shuffle = bool(shuffle) + self._dataset_kind = _DatasetKind.Map + + + + if sampler is not None and shuffle: + raise ValueError('sampler option is mutually exclusive with ' + 'shuffle') + + if batch_sampler is not None: + # auto_collation with custom batch_sampler + if batch_size != 1 or shuffle or sampler is not None or drop_last: + raise ValueError('batch_sampler option is mutually exclusive ' + 'with batch_size, shuffle, sampler, and ' + 'drop_last') + batch_size = None + drop_last = False + elif batch_size is None: + # no auto_collation + if drop_last: + raise ValueError('batch_size=None option disables auto-batching ' + 'and is mutually exclusive with drop_last') + + if sampler is None: # give default samplers + if self._dataset_kind == _DatasetKind.Iterable: + # See NOTE [ Custom Samplers and IterableDataset ] + sampler = _InfiniteConstantSampler() + else: # map-style + if shuffle: + sampler = RandomSampler(dataset, generator=generator) # type: ignore[arg-type] + else: + sampler = SequentialSampler(dataset) # type: ignore[arg-type] + + if batch_size is not None and batch_sampler is None: + # auto_collation without custom batch_sampler + batch_sampler = BatchSampler(sampler, batch_size, drop_last) + + self.batch_size = batch_size + self.drop_last = drop_last + self.sampler = sampler + self.batch_sampler = batch_sampler + self.generator = generator + + if collate_fn is None: + if self._auto_collation: + collate_fn = _utils.collate.default_collate + else: + collate_fn = _utils.collate.default_convert + + self.collate_fn = collate_fn + self.persistent_workers = persistent_workers + + self.__initialized = True + self._IterableDataset_len_called = None # See NOTE [ IterableDataset and __len__ ] + + self._iterator = None + + self.check_worker_number_rationality() + + torch.set_vital('Dataloader', 'enabled', 'True') # type: ignore[attr-defined] + + def _get_iterator(self) -> '_BaseDataLoaderIter': + if self.num_workers == 0: + return _SingleProcessDataLoaderIter(self) + else: + self.check_worker_number_rationality() + return _MultiProcessingDataLoaderIter(self) + + @property + def multiprocessing_context(self): + return self.__multiprocessing_context + + @multiprocessing_context.setter + def multiprocessing_context(self, multiprocessing_context): + if multiprocessing_context is not None: + if self.num_workers > 0: + if isinstance(multiprocessing_context, str): + valid_start_methods = multiprocessing.get_all_start_methods() + if multiprocessing_context not in valid_start_methods: + raise ValueError( + 'multiprocessing_context option ' + f'should specify a valid start method in {valid_start_methods!r}, but got ' + f'multiprocessing_context={multiprocessing_context!r}') + multiprocessing_context = multiprocessing.get_context(multiprocessing_context) + + if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext): + raise TypeError('multiprocessing_context option should be a valid context ' + 'object or a string specifying the start method, but got ' + f'multiprocessing_context={multiprocessing_context}') + else: + raise ValueError('multiprocessing_context can only be used with ' + 'multi-process loading (num_workers > 0), but got ' + f'num_workers={self.num_workers}') + + self.__multiprocessing_context = multiprocessing_context + + def __setattr__(self, attr, val): + if self.__initialized and attr in ( + 'batch_size', 'batch_sampler', 'sampler', 'drop_last', 'dataset', 'persistent_workers'): + raise ValueError(f'{attr} attribute should not be set after {self.__class__.__name__} is initialized') + + super().__setattr__(attr, val) + + # We quote '_BaseDataLoaderIter' since it isn't defined yet and the definition can't be moved up + # since '_BaseDataLoaderIter' references 'DataLoader'. + def __iter__(self) -> '_BaseDataLoaderIter': + # When using a single worker the returned iterator should be + # created everytime to avoid resetting its state + # However, in the case of a multiple workers iterator + # the iterator is only created once in the lifetime of the + # DataLoader object so that workers can be reused + if self.persistent_workers and self.num_workers > 0: + if self._iterator is None: + self._iterator = self._get_iterator() + else: + self._iterator._reset(self) + return self._iterator + else: + return self._get_iterator() + + @property + def _auto_collation(self): + return self.batch_sampler is not None + + @property + def _index_sampler(self): + # The actual sampler used for generating indices for `_DatasetFetcher` + # (see _utils/fetch.py) to read data at each time. This would be + # `.batch_sampler` if in auto-collation mode, and `.sampler` otherwise. + # We can't change `.sampler` and `.batch_sampler` attributes for BC + # reasons. + if self._auto_collation: + return self.batch_sampler + else: + return self.sampler + + def __len__(self) -> int: + if self._dataset_kind == _DatasetKind.Iterable: + # NOTE [ IterableDataset and __len__ ] + # + # For `IterableDataset`, `__len__` could be inaccurate when one naively + # does multi-processing data loading, since the samples will be duplicated. + # However, no real use case should be actually using that behavior, so + # it should count as a user error. We should generally trust user + # code to do the proper thing (e.g., configure each replica differently + # in `__iter__`), and give us the correct `__len__` if they choose to + # implement it (this will still throw if the dataset does not implement + # a `__len__`). + # + # To provide a further warning, we track if `__len__` was called on the + # `DataLoader`, save the returned value in `self._len_called`, and warn + # if the iterator ends up yielding more than this number of samples. + + # Cannot statically verify that dataset is Sized + length = self._IterableDataset_len_called = len(self.dataset) # type: ignore[assignment, arg-type] + if self.batch_size is not None: # IterableDataset doesn't allow custom sampler or batch_sampler + from math import ceil + if self.drop_last: + length = length // self.batch_size + else: + length = ceil(length / self.batch_size) + return length + else: + return len(self._index_sampler) + + def check_worker_number_rationality(self): + # This function check whether the dataloader's worker number is rational based on + # current system's resource. Current rule is that if the number of workers this + # Dataloader will create is bigger than the number of logical cpus that is allowed to + # use, than we will pop up a warning to let user pay attention. + # + # eg. If current system has 2 physical CPUs with 16 cores each. And each core support 2 + # threads, then the total logical cpus here is 2 * 16 * 2 = 64. Let's say current + # DataLoader process can use half of them which is 32, then the rational max number of + # worker that initiated from this process is 32. + # Now, let's say the created DataLoader has num_works = 40, which is bigger than 32. + # So the warning message is triggered to notify the user to lower the worker number if + # necessary. + # + # + # [Note] Please note that this function repects `cpuset` only when os.sched_getaffinity is + # available (available in most of Linux system, but not OSX and Windows). + # When os.sched_getaffinity is not available, os.cpu_count() is called instead, but + # it doesn't repect cpuset. + # We don't take threading into account since each worker process is single threaded + # at this time. + # + # We don't set any threading flags (eg. OMP_NUM_THREADS, MKL_NUM_THREADS, etc) + # other than `torch.set_num_threads` to 1 in the worker process, if the passing + # in functions use 3rd party modules that rely on those threading flags to determine + # how many thread to create (eg. numpy, etc), then it is caller's responsibility to + # set those flags correctly. + def _create_warning_msg(num_worker_suggest, num_worker_created, cpuset_checked): + + suggested_max_worker_msg = (( + "Our suggested max number of worker in current system is {}{}, which is smaller " + "than what this DataLoader is going to create.").format( + num_worker_suggest, + ("" if cpuset_checked else " (`cpuset` is not taken into account)")) + ) if num_worker_suggest is not None else ( + "DataLoader is not able to compute a suggested max number of worker in current system.") + + warn_msg = ( + "This DataLoader will create {} worker processes in total. {} " + "Please be aware that excessive worker creation might get DataLoader running slow or even freeze, " + "lower the worker number to avoid potential slowness/freeze if necessary.").format( + num_worker_created, + suggested_max_worker_msg) + return warn_msg + + if not self.num_workers or self.num_workers == 0: + return + + # try to compute a suggested max number of worker based on system's resource + max_num_worker_suggest = None + cpuset_checked = False + if hasattr(os, 'sched_getaffinity'): + try: + max_num_worker_suggest = len(os.sched_getaffinity(0)) + cpuset_checked = True + except Exception: + pass + if max_num_worker_suggest is None: + # os.cpu_count() could return Optional[int] + # get cpu count first and check None in order to satisfy mypy check + cpu_count = os.cpu_count() + if cpu_count is not None: + max_num_worker_suggest = cpu_count + + if max_num_worker_suggest is None: + warnings.warn(_create_warning_msg( + max_num_worker_suggest, + self.num_workers, + cpuset_checked)) + return + + if self.num_workers > max_num_worker_suggest: + warnings.warn(_create_warning_msg( + max_num_worker_suggest, + self.num_workers, + cpuset_checked)) + + +class _BaseDataLoaderIter: + def __init__(self, loader: DataLoader) -> None: + self._dataset = loader.dataset + self._shared_seed = None + self._pg = None + if isinstance(self._dataset, IterDataPipe): + if dist.is_available() and dist.is_initialized(): + self._pg = dist.new_group(backend="gloo") + self._shared_seed = _share_dist_seed(loader.generator, self._pg) + shared_rng = torch.Generator() + shared_rng.manual_seed(self._shared_seed) + self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng) + self._dataset_kind = loader._dataset_kind + self._IterableDataset_len_called = loader._IterableDataset_len_called + self._auto_collation = loader._auto_collation + self._drop_last = loader.drop_last + self._index_sampler = loader._index_sampler + self._num_workers = loader.num_workers + ws, rank = _get_distributed_settings() + self._world_size = ws + self._rank = rank + # for other backends, pin_memory_device need to set. if not set + # default behaviour is CUDA device. if pin_memory_device is selected + # and pin_memory is not set, the default behaviour false. + if (len(loader.pin_memory_device) == 0): + self._pin_memory = loader.pin_memory and torch.cuda.is_available() + self._pin_memory_device = None + else: + if not loader.pin_memory: + warn_msg = ("pin memory device is set and pin_memory flag is not used then device pinned memory won't be used" + "please set pin_memory to true, if you need to use the device pin memory") + warnings.warn(warn_msg) + + self._pin_memory = loader.pin_memory + self._pin_memory_device = loader.pin_memory_device + self._timeout = loader.timeout + self._collate_fn = loader.collate_fn + self._sampler_iter = iter(self._index_sampler) + self._base_seed = torch.empty((), dtype=torch.int64).random_(generator=loader.generator).item() + self._persistent_workers = loader.persistent_workers + self._num_yielded = 0 + self._profile_name = f"enumerate(DataLoader)#{self.__class__.__name__}.__next__" + + def __iter__(self) -> '_BaseDataLoaderIter': + return self + + def _reset(self, loader, first_iter=False): + self._sampler_iter = iter(self._index_sampler) + self._num_yielded = 0 + self._IterableDataset_len_called = loader._IterableDataset_len_called + if isinstance(self._dataset, IterDataPipe): + self._shared_seed = _share_dist_seed(loader.generator, self._pg) + shared_rng = torch.Generator() + shared_rng.manual_seed(self._shared_seed) + self._dataset = torch.utils.data.graph_settings.apply_random_seed(self._dataset, shared_rng) + + def _next_index(self): + return next(self._sampler_iter) # may raise StopIteration + + def _next_data(self): + raise NotImplementedError + + def __next__(self) -> Any: + with torch.autograd.profiler.record_function(self._profile_name): + if self._sampler_iter is None: + # TODO(https://github.com/pytorch/pytorch/issues/76750) + self._reset() # type: ignore[call-arg] + data = self._next_data() + self._num_yielded += 1 + if self._dataset_kind == _DatasetKind.Iterable and \ + self._IterableDataset_len_called is not None and \ + self._num_yielded > self._IterableDataset_len_called: + warn_msg = ("Length of IterableDataset {} was reported to be {} (when accessing len(dataloader)), but {} " + "samples have been fetched. ").format(self._dataset, self._IterableDataset_len_called, + self._num_yielded) + if self._num_workers > 0: + warn_msg += ("For multiprocessing data-loading, this could be caused by not properly configuring the " + "IterableDataset replica at each worker. Please see " + "https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset for examples.") + warnings.warn(warn_msg) + return data + + def __len__(self) -> int: + return len(self._index_sampler) + + def __getstate__(self): + # TODO: add limited pickling support for sharing an iterator + # across multiple threads for HOGWILD. + # Probably the best way to do this is by moving the sample pushing + # to a separate thread and then just sharing the data queue + # but signalling the end is tricky without a non-blocking API + raise NotImplementedError("{} cannot be pickled", self.__class__.__name__) + + +class _SingleProcessDataLoaderIter(_BaseDataLoaderIter): + def __init__(self, loader): + super().__init__(loader) + assert self._timeout == 0 + assert self._num_workers == 0 + + # Adds forward compatibilities so classic DataLoader can work with DataPipes: + # Taking care of distributed sharding + if isinstance(self._dataset, (IterDataPipe, MapDataPipe)): + # For BC, use default SHARDING_PRIORITIES + torch.utils.data.graph_settings.apply_sharding(self._dataset, self._world_size, self._rank) + + self._dataset_fetcher = _DatasetKind.create_fetcher( + self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last) + + def _next_data(self): + index = self._next_index() # may raise StopIteration + data = self._dataset_fetcher.fetch(index) # may raise StopIteration + if self._pin_memory: + data = _utils.pin_memory.pin_memory(data, self._pin_memory_device) + return data + + +class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter): + r"""Iterates once over the DataLoader's dataset, as specified by the sampler.""" + + # NOTE [ Data Loader Multiprocessing Shutdown Logic ] + # + # Preliminary: + # + # Our data model looks like this (queues are indicated with curly brackets): + # + # main process || + # | || + # {index_queue} || + # | || + # worker processes || DATA + # | || + # {worker_result_queue} || FLOW + # | || + # pin_memory_thread of main process || DIRECTION + # | || + # {data_queue} || + # | || + # data output \/ + # + # P.S. `worker_result_queue` and `pin_memory_thread` part may be omitted if + # `pin_memory=False`. + # + # + # Terminating multiprocessing logic requires very careful design. In + # particular, we need to make sure that + # + # 1. The iterator gracefully exits the workers when its last reference is + # gone or it is depleted. + # + # In this case, the workers should be gracefully exited because the + # main process may still need to continue to run, and we want cleaning + # up code in the workers to be executed (e.g., releasing GPU memory). + # Naturally, we implement the shutdown logic in `__del__` of + # DataLoaderIterator. + # + # We delay the discussion on the logic in this case until later. + # + # 2. The iterator exits the workers when the loader process and/or worker + # processes exits normally or with error. + # + # We set all workers and `pin_memory_thread` to have `daemon=True`. + # + # You may ask, why can't we make the workers non-daemonic, and + # gracefully exit using the same logic as we have in `__del__` when the + # iterator gets deleted (see 1 above)? + # + # First of all, `__del__` is **not** guaranteed to be called when + # interpreter exits. Even if it is called, by the time it executes, + # many Python core library resources may already be freed, and even + # simple things like acquiring an internal lock of a queue may hang. + # Therefore, in this case, we actually need to prevent `__del__` from + # being executed, and rely on the automatic termination of daemonic + # children. + # + # Thus, we register an `atexit` hook that sets a global flag + # `_utils.python_exit_status`. Since `atexit` hooks are executed in the + # reverse order of registration, we are guaranteed that this flag is + # set before library resources we use are freed (which, at least in + # CPython, is done via an `atexit` handler defined in + # `multiprocessing/util.py` + # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/util.py#L320-L362 + # registered when an object requiring this mechanism is first + # created, e.g., `mp.Queue` + # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/context.py#L100-L103 + # https://github.com/python/cpython/blob/c606624af8d4cb3b4a052fb263bb983b3f87585b/Lib/multiprocessing/queues.py#L29 + # ) + # + # So in `__del__`, we check if `_utils.python_exit_status` is set or + # `None` (freed), and perform no-op if so. + # + # However, simply letting library clean-up codes run can also be bad, + # because such codes (i.e., `multiprocessing.util._exit_function()`) + # include join putting threads for `mp.Queue`, which can be blocking. + # Hence, the main process putting threads are called with + # `cancel_join_thread` at creation. See later section + # [ 3b. A process won't hang when putting into a queue; ] + # for more details. + # + # Here are two example cases where library clean-up codes can run + # before `__del__` is called: + # + # 1. If we hold onto a reference to the iterator, it more often + # than not tries to do `multiprocessing` library cleaning before + # clearing the alive referenced objects (https://github.com/pytorch/pytorch/issues/48666) + # and thus prevents our cleaning-up code to run first. + # + # 2. A similar issue araises when a `DataLoader` is used in a subprocess. + # When a process ends, it shuts the all its daemonic children + # down with a SIGTERM (instead of joining them without a timeout). + # Simiarly for threads, but by a different mechanism. This fact, + # together with a few implementation details of multiprocessing, forces + # us to make workers daemonic. All of our problems arise when a + # DataLoader is used in a subprocess, and are caused by multiprocessing + # code which looks more or less like this: + # + # try: + # your_function_using_a_dataloader() + # finally: + # multiprocessing.util._exit_function() + # + # The joining/termination mentioned above happens inside + # `_exit_function()`. Now, if `your_function_using_a_dataloader()` + # throws, the stack trace stored in the exception will prevent the + # frame which uses `DataLoaderIter` to be freed. If the frame has any + # reference to the `DataLoaderIter` (e.g., in a method of the iter), + # its `__del__`, which starts the shutdown procedure, will not be + # called. That, in turn, means that workers aren't notified. Attempting + # to join in `_exit_function` will then result in a hang. + # + # For context, `_exit_function` is also registered as an `atexit` call. + # So it is unclear to me (@ssnl) why this is needed in a finally block. + # The code dates back to 2008 and there is no comment on the original + # PEP 371 or patch https://bugs.python.org/issue3050 (containing both + # the finally block and the `atexit` registration) that explains this. + # + # + # Finally, another choice is to just shutdown workers with logic in 1 + # above whenever we see an error in `next`. This isn't ideal because + # a. It prevents users from using try-catch to resume data loading. + # b. It doesn't prevent hanging if users have references to the + # iterator. + # + # 3. All processes exit if any of them die unexpectedly by fatal signals. + # + # As shown above, the workers are set as daemonic children of the main + # process. However, automatic cleaning-up of such child processes only + # happens if the parent process exits gracefully (e.g., not via fatal + # signals like SIGKILL). So we must ensure that each process will exit + # even the process that should send/receive data to/from it were + # killed, i.e., + # + # a. A process won't hang when getting from a queue. + # + # Even with carefully designed data dependencies (i.e., a `put()` + # always corresponding to a `get()`), hanging on `get()` can still + # happen when data in queue is corrupted (e.g., due to + # `cancel_join_thread` or unexpected exit). + # + # For child exit, we set a timeout whenever we try to get data + # from `data_queue`, and check the workers' status on each timeout + # and error. + # See `_DataLoaderiter._get_batch()` and + # `_DataLoaderiter._try_get_data()` for details. + # + # Additionally, for child exit on non-Windows platforms, we also + # register a SIGCHLD handler (which is supported on Windows) on + # the main process, which checks if any of the workers fail in the + # (Python) handler. This is more efficient and faster in detecting + # worker failures, compared to only using the above mechanism. + # See `DataLoader.cpp` and `_utils/signal_handling.py` for details. + # + # For `.get()` calls where the sender(s) is not the workers, we + # guard them with timeouts, and check the status of the sender + # when timeout happens: + # + in the workers, the `_utils.worker.ManagerWatchdog` class + # checks the status of the main process. + # + if `pin_memory=True`, when getting from `pin_memory_thread`, + # check `pin_memory_thread` status periodically until `.get()` + # returns or see that `pin_memory_thread` died. + # + # b. A process won't hang when putting into a queue; + # + # We use `mp.Queue` which has a separate background thread to put + # objects from an unbounded buffer array. The background thread is + # daemonic and usually automatically joined when the process + # *exits*. + # + # In case that the receiver has ended abruptly while + # reading from the pipe, the join will hang forever. The usual + # solution for this in Python is calling `q.cancel_join_thread`, + # which prevents automatically joining it when finalizing + # (exiting). + # + # Nonetheless, `cancel_join_thread` must only be called when the + # queue is **not** going to be read from or write into by another + # process, because it may hold onto a lock or leave corrupted data + # in the queue, leading other readers/writers to hang. + # + # Hence, + # + For worker processes, we only do so (for their output + # queues, i.e., `worker_result_queue`) before exiting. + # + For `pin_memory_thread`, its output queue `data_queue` is a + # `queue.Queue` that does blocking `put` if the queue is full. + # So there is no above problem, but as a result, in + # `_pin_memory_loop`, we do need to wrap the `put` in a loop + # that breaks not only upon success, but also when the main + # process stops reading, i.e., is shutting down. + # + For loader process, we `cancel_join_thread()` for all + # `_index_queues` because the whole purpose of workers and + # `pin_memory_thread` is to serve the loader process. If + # loader process is already exiting, we don't really care if + # the queues are corrupted. + # + # + # Now let's get back to 1: + # how we gracefully exit the workers when the last reference to the + # iterator is gone. + # + # To achieve this, we implement the following logic along with the design + # choices mentioned above: + # + # `workers_done_event`: + # A `multiprocessing.Event` shared among the main process and all worker + # processes. This is used to signal the workers that the iterator is + # shutting down. After it is set, they will not send processed data to + # queues anymore, and only wait for the final `None` before exiting. + # `done_event` isn't strictly needed. I.e., we can just check for `None` + # from the input queue, but it allows us to skip wasting resources + # processing data if we are already shutting down. + # + # `pin_memory_thread_done_event`: + # A `threading.Event` for a similar purpose to that of + # `workers_done_event`, but is for the `pin_memory_thread`. The reason + # that separate events are needed is that `pin_memory_thread` reads from + # the output queue of the workers. But the workers, upon seeing that + # `workers_done_event` is set, only wants to see the final `None`, and is + # not required to flush all data in the output queue (e.g., it may call + # `cancel_join_thread` on that queue if its `IterableDataset` iterator + # happens to exhaust coincidentally, which is out of the control of the + # main process). Thus, since we will exit `pin_memory_thread` before the + # workers (see below), two separete events are used. + # + # NOTE: In short, the protocol is that the main process will set these + # `done_event`s and then the corresponding processes/threads a `None`, + # and that they may exit at any time after receiving the `None`. + # + # NOTE: Using `None` as the final signal is valid, since normal data will + # always be a 2-tuple with the 1st element being the index of the data + # transferred (different from dataset index/key), and the 2nd being + # either the dataset key or the data sample (depending on which part + # of the data model the queue is at). + # + # [ worker processes ] + # While loader process is alive: + # Get from `index_queue`. + # If get anything else, + # Check `workers_done_event`. + # If set, continue to next iteration + # i.e., keep getting until see the `None`, then exit. + # Otherwise, process data: + # If is fetching from an `IterableDataset` and the iterator + # is exhausted, send an `_IterableDatasetStopIteration` + # object to signal iteration end. The main process, upon + # receiving such an object, will send `None` to this + # worker and not use the corresponding `index_queue` + # anymore. + # If timed out, + # No matter `workers_done_event` is set (still need to see `None`) + # or not, must continue to next iteration. + # (outside loop) + # If `workers_done_event` is set, (this can be False with `IterableDataset`) + # `data_queue.cancel_join_thread()`. (Everything is ending here: + # main process won't read from it; + # other workers will also call + # `cancel_join_thread`.) + # + # [ pin_memory_thread ] + # # No need to check main thread. If this thread is alive, the main loader + # # thread must be alive, because this thread is set as daemonic. + # While `pin_memory_thread_done_event` is not set: + # Get from `worker_result_queue`. + # If timed out, continue to get in the next iteration. + # Otherwise, process data. + # While `pin_memory_thread_done_event` is not set: + # Put processed data to `data_queue` (a `queue.Queue` with blocking put) + # If timed out, continue to put in the next iteration. + # Otherwise, break, i.e., continuing to the out loop. + # + # NOTE: we don't check the status of the main thread because + # 1. if the process is killed by fatal signal, `pin_memory_thread` + # ends. + # 2. in other cases, either the cleaning-up in __del__ or the + # automatic exit of daemonic thread will take care of it. + # This won't busy-wait either because `.get(timeout)` does not + # busy-wait. + # + # [ main process ] + # In the DataLoader Iter's `__del__` + # b. Exit `pin_memory_thread` + # i. Set `pin_memory_thread_done_event`. + # ii Put `None` in `worker_result_queue`. + # iii. Join the `pin_memory_thread`. + # iv. `worker_result_queue.cancel_join_thread()`. + # + # c. Exit the workers. + # i. Set `workers_done_event`. + # ii. Put `None` in each worker's `index_queue`. + # iii. Join the workers. + # iv. Call `.cancel_join_thread()` on each worker's `index_queue`. + # + # NOTE: (c) is better placed after (b) because it may leave corrupted + # data in `worker_result_queue`, which `pin_memory_thread` + # reads from, in which case the `pin_memory_thread` can only + # happen at timing out, which is slow. Nonetheless, same thing + # happens if a worker is killed by signal at unfortunate times, + # but in other cases, we are better off having a non-corrupted + # `worker_result_queue` for `pin_memory_thread`. + # + # NOTE: If `pin_memory=False`, there is no `pin_memory_thread` and (b) + # can be omitted + # + # NB: `done_event`s isn't strictly needed. E.g., we can just check for + # `None` from `index_queue`, but it allows us to skip wasting resources + # processing indices already in `index_queue` if we are already shutting + # down. + + def __init__(self, loader): + super().__init__(loader) + + self._prefetch_factor = loader.prefetch_factor + + assert self._num_workers > 0 + assert self._prefetch_factor > 0 + + if loader.multiprocessing_context is None: + multiprocessing_context = multiprocessing + else: + multiprocessing_context = loader.multiprocessing_context + + self._worker_init_fn = loader.worker_init_fn + + # Adds forward compatibilities so classic DataLoader can work with DataPipes: + # Additional worker init function will take care of sharding in MP and Distributed + if isinstance(self._dataset, (IterDataPipe, MapDataPipe)): + self._worker_init_fn = functools.partial( + _sharding_worker_init_fn, self._worker_init_fn, self._world_size, self._rank) + + # No certainty which module multiprocessing_context is + self._worker_result_queue = multiprocessing_context.Queue() # type: ignore[var-annotated] + self._worker_pids_set = False + self._shutdown = False + self._workers_done_event = multiprocessing_context.Event() + + self._index_queues = [] + self._workers = [] + for i in range(self._num_workers): + # No certainty which module multiprocessing_context is + index_queue = multiprocessing_context.Queue() # type: ignore[var-annotated] + # Need to `cancel_join_thread` here! + # See sections (2) and (3b) above. + index_queue.cancel_join_thread() + w = multiprocessing_context.Process( + target=_utils.worker._worker_loop, + args=(self._dataset_kind, self._dataset, index_queue, + self._worker_result_queue, self._workers_done_event, + self._auto_collation, self._collate_fn, self._drop_last, + self._base_seed, self._worker_init_fn, i, self._num_workers, + self._persistent_workers, self._shared_seed)) + w.daemon = True + # NB: Process.start() actually take some time as it needs to + # start a process and pass the arguments over via a pipe. + # Therefore, we only add a worker to self._workers list after + # it started, so that we do not call .join() if program dies + # before it starts, and __del__ tries to join but will get: + # AssertionError: can only join a started process. + w.start() + self._index_queues.append(index_queue) + self._workers.append(w) + + if self._pin_memory: + self._pin_memory_thread_done_event = threading.Event() + + # Queue is not type-annotated + self._data_queue = queue.Queue() # type: ignore[var-annotated] + if self._pin_memory_device == "xpu": + current_device = torch.xpu.current_device() # type: ignore[attr-defined] + elif self._pin_memory_device == torch._C._get_privateuse1_backend_name(): + custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name()) + current_device = custom_device_mod.current_device() + else: + current_device = torch.cuda.current_device() # choose cuda for default + pin_memory_thread = threading.Thread( + target=_utils.pin_memory._pin_memory_loop, + args=(self._worker_result_queue, self._data_queue, + current_device, + self._pin_memory_thread_done_event, self._pin_memory_device)) + pin_memory_thread.daemon = True + pin_memory_thread.start() + # Similar to workers (see comment above), we only register + # pin_memory_thread once it is started. + self._pin_memory_thread = pin_memory_thread + else: + self._data_queue = self._worker_result_queue # type: ignore[assignment] + + # In some rare cases, persistent workers (daemonic processes) + # would be terminated before `__del__` of iterator is invoked + # when main process exits + # It would cause failure when pin_memory_thread tries to read + # corrupted data from worker_result_queue + # atexit is used to shutdown thread and child processes in the + # right sequence before main process exits + if self._persistent_workers and self._pin_memory: + import atexit + for w in self._workers: + atexit.register(_MultiProcessingDataLoaderIter._clean_up_worker, w) + + # .pid can be None only before process is spawned (not the case, so ignore) + _utils.signal_handling._set_worker_pids(id(self), tuple(w.pid for w in self._workers)) # type: ignore[misc] + _utils.signal_handling._set_SIGCHLD_handler() + self._worker_pids_set = True + self._reset(loader, first_iter=True) + + def _reset(self, loader, first_iter=False): + super()._reset(loader, first_iter) + self._send_idx = 0 # idx of the next task to be sent to workers + self._rcvd_idx = 0 # idx of the next task to be returned in __next__ + # information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx). + # map: task idx => - (worker_id,) if data isn't fetched (outstanding) + # \ (worker_id, data) if data is already fetched (out-of-order) + self._task_info = {} + self._tasks_outstanding = 0 # always equal to count(v for v in task_info.values() if len(v) == 1) + # A list of booleans representing whether each worker still has work to + # do, i.e., not having exhausted its iterable dataset object. It always + # contains all `True`s if not using an iterable-style dataset + # (i.e., if kind != Iterable). + # Not that this indicates that a worker still has work to do *for this epoch*. + # It does not mean that a worker is dead. In case of `_persistent_workers`, + # the worker will be reset to available in the next epoch. + self._workers_status = [True for i in range(self._num_workers)] + # Reset the worker queue cycle so it resumes next epoch at worker 0 + self._worker_queue_idx_cycle = itertools.cycle(range(self._num_workers)) + # We resume the prefetching in case it was enabled + if not first_iter: + for idx in range(self._num_workers): + self._index_queues[idx].put(_utils.worker._ResumeIteration(self._shared_seed)) + resume_iteration_cnt = self._num_workers + while resume_iteration_cnt > 0: + return_idx, return_data = self._get_data() + if isinstance(return_idx, _utils.worker._ResumeIteration): + assert return_data is None + resume_iteration_cnt -= 1 + # prime the prefetch loop + for _ in range(self._prefetch_factor * self._num_workers): + self._try_put_index() + + def _try_get_data(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL): + # Tries to fetch data from `self._data_queue` once for a given timeout. + # This can also be used as inner loop of fetching without timeout, with + # the sender status as the loop condition. + # + # This raises a `RuntimeError` if any worker died expectedly. This error + # can come from either the SIGCHLD handler in `_utils/signal_handling.py` + # (only for non-Windows platforms), or the manual check below on errors + # and timeouts. + # + # Returns a 2-tuple: + # (bool: whether successfully get data, any: data if successful else None) + try: + data = self._data_queue.get(timeout=timeout) + return (True, data) + except Exception as e: + # At timeout and error, we manually check whether any worker has + # failed. Note that this is the only mechanism for Windows to detect + # worker failures. + failed_workers = [] + for worker_id, w in enumerate(self._workers): + if self._workers_status[worker_id] and not w.is_alive(): + failed_workers.append(w) + self._mark_worker_as_unavailable(worker_id) + if len(failed_workers) > 0: + pids_str = ', '.join(str(w.pid) for w in failed_workers) + raise RuntimeError(f'DataLoader worker (pid(s) {pids_str}) exited unexpectedly') from e + if isinstance(e, queue.Empty): + return (False, None) + import tempfile + import errno + try: + # Raise an exception if we are this close to the FDs limit. + # Apparently, trying to open only one file is not a sufficient + # test. + # See NOTE [ DataLoader on Linux and open files limit ] + fds_limit_margin = 10 + fs = [tempfile.NamedTemporaryFile() for i in range(fds_limit_margin)] + except OSError as e: + if e.errno == errno.EMFILE: + raise RuntimeError( + "Too many open files. Communication with the" + " workers is no longer possible. Please increase the" + " limit using `ulimit -n` in the shell or change the" + " sharing strategy by calling" + " `torch.multiprocessing.set_sharing_strategy('file_system')`" + " at the beginning of your code") from None + raise + +# NOTE [ DataLoader on Linux and open files limit ] +# +# On Linux when DataLoader is used with multiprocessing we pass the data between +# the root process and the workers through SHM files. We remove those files from +# the filesystem as soon as they are created and keep them alive by +# passing around their file descriptors through AF_UNIX sockets. (See +# docs/source/multiprocessing.rst and 'Multiprocessing Technical Notes` in +# the wiki (https://github.com/pytorch/pytorch/wiki).) +# +# This sometimes leads us to exceeding the open files limit. When that happens, +# and the offending file descriptor is coming over a socket, the `socket` Python +# package silently strips the file descriptor from the message, setting only the +# `MSG_CTRUNC` flag (which might be a bit misleading since the manpage says that +# it _indicates that some control data were discarded due to lack of space in +# the buffer for ancillary data_). This might reflect the C implementation of +# AF_UNIX sockets. +# +# This behaviour can be reproduced with the script and instructions at the +# bottom of this note. +# +# When that happens, the standard Python `multiprocessing` (and not +# `torch.multiprocessing`) raises a `RuntimeError: received 0 items of ancdata` +# +# Sometimes, instead of the FD being stripped, you may get an `OSError: +# Too many open files`, both in the script below and in DataLoader. However, +# this is rare and seems to be nondeterministic. +# +# +# #!/usr/bin/env python3 +# import sys +# import socket +# import os +# import array +# import shutil +# import socket +# +# +# if len(sys.argv) != 4: +# print("Usage: ", sys.argv[0], " tmp_dirname iteration (send|recv)") +# sys.exit(1) +# +# if __name__ == '__main__': +# dirname = sys.argv[1] +# sock_path = dirname + "/sock" +# iterations = int(sys.argv[2]) +# def dummy_path(i): +# return dirname + "/" + str(i) + ".dummy" +# +# +# if sys.argv[3] == 'send': +# while not os.path.exists(sock_path): +# pass +# client = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) +# client.connect(sock_path) +# for i in range(iterations): +# fd = os.open(dummy_path(i), os.O_WRONLY | os.O_CREAT) +# ancdata = array.array('i', [fd]) +# msg = bytes([i % 256]) +# print("Sending fd ", fd, " (iteration #", i, ")") +# client.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, ancdata)]) +# +# +# else: +# assert sys.argv[3] == 'recv' +# +# if os.path.exists(dirname): +# raise Exception("Directory exists") +# +# os.mkdir(dirname) +# +# print("Opening socket...") +# server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) +# server.bind(sock_path) +# +# print("Listening...") +# for i in range(iterations): +# a = array.array('i') +# msg, ancdata, flags, addr = server.recvmsg(1, socket.CMSG_SPACE(a.itemsize)) +# assert(len(ancdata) == 1) +# cmsg_level, cmsg_type, cmsg_data = ancdata[0] +# a.frombytes(cmsg_data) +# print("Received fd ", a[0], " (iteration #", i, ")") +# +# shutil.rmtree(dirname) +# +# Steps to reproduce: +# +# 1. Run two shells and set lower file descriptor limit in the receiving one: +# (shell1) ulimit -n 1020 +# (shell2) ulimit -n 1022 +# +# 2. Run the script above with the `recv` option in the first shell +# (shell1) ./test_socket.py sock_tmp 1017 recv +# +# 3. Run the script with the `send` option in the second shell: +# (shell2) ./test_socket.py sock_tmp 1017 send + + def _get_data(self): + # Fetches data from `self._data_queue`. + # + # We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds, + # which we achieve by running `self._try_get_data(timeout=MP_STATUS_CHECK_INTERVAL)` + # in a loop. This is the only mechanism to detect worker failures for + # Windows. For other platforms, a SIGCHLD handler is also used for + # worker failure detection. + # + # If `pin_memory=True`, we also need check if `pin_memory_thread` had + # died at timeouts. + if self._timeout > 0: + success, data = self._try_get_data(self._timeout) + if success: + return data + else: + raise RuntimeError(f'DataLoader timed out after {self._timeout} seconds') + elif self._pin_memory: + while self._pin_memory_thread.is_alive(): + success, data = self._try_get_data() + if success: + return data + else: + # while condition is false, i.e., pin_memory_thread died. + raise RuntimeError('Pin memory thread exited unexpectedly') + # In this case, `self._data_queue` is a `queue.Queue`,. But we don't + # need to call `.task_done()` because we don't use `.join()`. + else: + while True: + success, data = self._try_get_data() + if success: + return data + + def _next_data(self): + while True: + # If the worker responsible for `self._rcvd_idx` has already ended + # and was unable to fulfill this task (due to exhausting an `IterableDataset`), + # we try to advance `self._rcvd_idx` to find the next valid index. + # + # This part needs to run in the loop because both the `self._get_data()` + # call and `_IterableDatasetStopIteration` check below can mark + # extra worker(s) as dead. + while self._rcvd_idx < self._send_idx: + info = self._task_info[self._rcvd_idx] + worker_id = info[0] + if len(info) == 2 or self._workers_status[worker_id]: # has data or is still active + break + del self._task_info[self._rcvd_idx] + self._rcvd_idx += 1 + else: + # no valid `self._rcvd_idx` is found (i.e., didn't break) + if not self._persistent_workers: + self._shutdown_workers() + raise StopIteration + + # Now `self._rcvd_idx` is the batch index we want to fetch + + # Check if the next sample has already been generated + if len(self._task_info[self._rcvd_idx]) == 2: + data = self._task_info.pop(self._rcvd_idx)[1] + return self._process_data(data) + + assert not self._shutdown and self._tasks_outstanding > 0 + idx, data = self._get_data() + self._tasks_outstanding -= 1 + if self._dataset_kind == _DatasetKind.Iterable: + # Check for _IterableDatasetStopIteration + if isinstance(data, _utils.worker._IterableDatasetStopIteration): + if self._persistent_workers: + self._workers_status[data.worker_id] = False + else: + self._mark_worker_as_unavailable(data.worker_id) + self._try_put_index() + continue + + if idx != self._rcvd_idx: + # store out-of-order samples + self._task_info[idx] += (data,) + else: + del self._task_info[idx] + return self._process_data(data) + + def _try_put_index(self): + assert self._tasks_outstanding < self._prefetch_factor * self._num_workers + + try: + index = self._next_index() + except StopIteration: + return + for _ in range(self._num_workers): # find the next active worker, if any + worker_queue_idx = next(self._worker_queue_idx_cycle) + if self._workers_status[worker_queue_idx]: + break + else: + # not found (i.e., didn't break) + return + + self._index_queues[worker_queue_idx].put((self._send_idx, index)) # type: ignore[possibly-undefined] + self._task_info[self._send_idx] = (worker_queue_idx,) + self._tasks_outstanding += 1 + self._send_idx += 1 + + def _process_data(self, data): + self._rcvd_idx += 1 + self._try_put_index() + if isinstance(data, ExceptionWrapper): + data.reraise() + return data + + def _mark_worker_as_unavailable(self, worker_id, shutdown=False): + # Mark a worker as having finished its work e.g., due to + # exhausting an `IterableDataset`. This should be used only when this + # `_MultiProcessingDataLoaderIter` is going to continue running. + + assert self._workers_status[worker_id] or (self._persistent_workers and shutdown) + + # Signal termination to that specific worker. + q = self._index_queues[worker_id] + # Indicate that no more data will be put on this queue by the current + # process. + q.put(None) + + # Note that we don't actually join the worker here, nor do we remove the + # worker's pid from C side struct because (1) joining may be slow, and + # (2) since we don't join, the worker may still raise error, and we + # prefer capturing those, rather than ignoring them, even though they + # are raised after the worker has finished its job. + # Joinning is deferred to `_shutdown_workers`, which it is called when + # all workers finish their jobs (e.g., `IterableDataset` replicas) or + # when this iterator is garbage collected. + + self._workers_status[worker_id] = False + + assert self._workers_done_event.is_set() == shutdown + + def _shutdown_workers(self): + # Called when shutting down this `_MultiProcessingDataLoaderIter`. + # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on + # the logic of this function. + if _utils is None or _utils.python_exit_status is True or _utils.python_exit_status is None: + # See (2) of the note. If Python is shutting down, do no-op. + return + # Normal exit when last reference is gone / iterator is depleted. + # See (1) and the second half of the note. + if not self._shutdown: + self._shutdown = True + try: + # Normal exit when last reference is gone / iterator is depleted. + # See (1) and the second half of the note. + + # Exit `pin_memory_thread` first because exiting workers may leave + # corrupted data in `worker_result_queue` which `pin_memory_thread` + # reads from. + if hasattr(self, '_pin_memory_thread'): + # Use hasattr in case error happens before we set the attribute. + self._pin_memory_thread_done_event.set() + # Send something to pin_memory_thread in case it is waiting + # so that it can wake up and check `pin_memory_thread_done_event` + self._worker_result_queue.put((None, None)) + self._pin_memory_thread.join() + self._worker_result_queue.cancel_join_thread() + self._worker_result_queue.close() + + # Exit workers now. + self._workers_done_event.set() + for worker_id in range(len(self._workers)): + # Get number of workers from `len(self._workers)` instead of + # `self._num_workers` in case we error before starting all + # workers. + # If we are using workers_status with persistent_workers + # we have to shut it down because the worker is paused + if self._persistent_workers or self._workers_status[worker_id]: + self._mark_worker_as_unavailable(worker_id, shutdown=True) + for w in self._workers: + # We should be able to join here, but in case anything went + # wrong, we set a timeout and if the workers fail to join, + # they are killed in the `finally` block. + w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL) + for q in self._index_queues: + q.cancel_join_thread() + q.close() + finally: + # Even though all this function does is putting into queues that + # we have called `cancel_join_thread` on, weird things can + # happen when a worker is killed by a signal, e.g., hanging in + # `Event.set()`. So we need to guard this with SIGCHLD handler, + # and remove pids from the C side data structure only at the + # end. + # + # FIXME: Unfortunately, for Windows, we are missing a worker + # error detection mechanism here in this function, as it + # doesn't provide a SIGCHLD handler. + if self._worker_pids_set: + _utils.signal_handling._remove_worker_pids(id(self)) + self._worker_pids_set = False + for w in self._workers: + if w.is_alive(): + # Existing mechanisms try to make the workers exit + # peacefully, but in case that we unfortunately reach + # here, which we shouldn't, (e.g., pytorch/pytorch#39570), + # we kill the worker. + w.terminate() + + # staticmethod is used to remove reference to `_MultiProcessingDataLoaderIter` + @staticmethod + def _clean_up_worker(w): + try: + w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL) + finally: + if w.is_alive(): + w.terminate() + + def __del__(self): + self._shutdown_workers() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f19389e21bfefff0ea2705680d0c133730cfa228 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__init__.py @@ -0,0 +1,3 @@ +from . import iter +from . import map +from . import dataframe diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_decorator.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_decorator.py new file mode 100644 index 0000000000000000000000000000000000000000..b67b73158575eeebaf75ccd7d8f4bb571024fc29 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_decorator.py @@ -0,0 +1,184 @@ +import inspect +from functools import wraps +from typing import Any, Callable, Optional, Type, Union, get_type_hints +from torch.utils.data.datapipes.datapipe import IterDataPipe, MapDataPipe +from torch.utils.data.datapipes._typing import _DataPipeMeta + + +###################################################### +# Functional API +###################################################### +class functional_datapipe: + name: str + + def __init__(self, name: str, enable_df_api_tracing=False) -> None: + """ + Define a functional datapipe. + + Args: + enable_df_api_tracing - if set, any returned DataPipe would accept + DataFrames API in tracing mode. + """ + self.name = name + self.enable_df_api_tracing = enable_df_api_tracing + + def __call__(self, cls): + if issubclass(cls, IterDataPipe): + if isinstance(cls, Type): # type: ignore[arg-type] + if not isinstance(cls, _DataPipeMeta): + raise TypeError('`functional_datapipe` can only decorate IterDataPipe') + # with non_deterministic decorator + else: + if not isinstance(cls, non_deterministic) and \ + not (hasattr(cls, '__self__') and + isinstance(cls.__self__, non_deterministic)): + raise TypeError('`functional_datapipe` can only decorate IterDataPipe') + IterDataPipe.register_datapipe_as_function(self.name, cls, enable_df_api_tracing=self.enable_df_api_tracing) + elif issubclass(cls, MapDataPipe): + MapDataPipe.register_datapipe_as_function(self.name, cls) + + return cls + + +###################################################### +# Determinism +###################################################### +_determinism: bool = False + + +class guaranteed_datapipes_determinism: + prev: bool + + def __init__(self) -> None: + global _determinism + self.prev = _determinism + _determinism = True + + def __enter__(self) -> None: + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + global _determinism + _determinism = self.prev + + +class non_deterministic: + cls: Optional[Type[IterDataPipe]] = None + # TODO: Lambda for picking + deterministic_fn: Callable[[], bool] + + def __init__(self, arg: Union[Type[IterDataPipe], Callable[[], bool]]) -> None: + # 1. Decorator doesn't have any argument + if isinstance(arg, Type): # type: ignore[arg-type] + if not issubclass(arg, IterDataPipe): # type: ignore[arg-type] + raise TypeError("Only `IterDataPipe` can be decorated with `non_deterministic`" + f", but {arg.__name__} is found") + self.cls = arg # type: ignore[assignment] + # 2. Decorator has an argument of a function + # This class should behave differently given different inputs. Use this + # function to verify the determinism for each instance. + # When the function returns True, the instance is non-deterministic. Otherwise, + # the instance is a deterministic DataPipe. + elif isinstance(arg, Callable): # type:ignore[arg-type] + self.deterministic_fn = arg # type: ignore[assignment, misc] + else: + raise TypeError(f"{arg} can not be decorated by non_deterministic") + + def __call__(self, *args, **kwargs): + global _determinism + # Decorate IterDataPipe + if self.cls is not None: + if _determinism: + raise TypeError("{} is non-deterministic, but you set 'guaranteed_datapipes_determinism'. " + "You can turn off determinism for this DataPipe if that is acceptable " + "for your application".format(self.cls.__name__)) + return self.cls(*args, **kwargs) # type: ignore[call-arg] + + # Decorate with a functional argument + if not (isinstance(args[0], Type) and # type: ignore[arg-type] + issubclass(args[0], IterDataPipe)): + raise TypeError(f"Only `IterDataPipe` can be decorated, but {args[0].__name__} is found") + self.cls = args[0] + return self.deterministic_wrapper_fn + + def deterministic_wrapper_fn(self, *args, **kwargs) -> IterDataPipe: + res = self.deterministic_fn(*args, **kwargs) # type: ignore[call-arg, misc] + if not isinstance(res, bool): + raise TypeError("deterministic_fn of `non_deterministic` decorator is required " + f"to return a boolean value, but {type(res)} is found") + global _determinism + if _determinism and res: + raise TypeError(f"{self.cls.__name__} is non-deterministic with the inputs, but you set " # type: ignore[union-attr] + "'guaranteed_datapipes_determinism'. You can turn off determinism " + "for this DataPipe if that is acceptable for your application" + ) + return self.cls(*args, **kwargs) # type: ignore[call-arg, misc] + + +###################################################### +# Type validation +###################################################### +# Validate each argument of DataPipe with hint as a subtype of the hint. +def argument_validation(f): + signature = inspect.signature(f) + hints = get_type_hints(f) + + @wraps(f) + def wrapper(*args, **kwargs): + bound = signature.bind(*args, **kwargs) + for argument_name, value in bound.arguments.items(): + if argument_name in hints and isinstance(hints[argument_name], _DataPipeMeta): + hint = hints[argument_name] + if not isinstance(value, IterDataPipe): + raise TypeError(f"Expected argument '{argument_name}' as a IterDataPipe, but found {type(value)}") + if not value.type.issubtype(hint.type): + raise TypeError(f"Expected type of argument '{argument_name}' as a subtype of " + f"hint {hint.type}, but found {value.type}" + ) + + return f(*args, **kwargs) + + return wrapper + + +# Default value is True +_runtime_validation_enabled: bool = True + + +class runtime_validation_disabled: + prev: bool + + def __init__(self) -> None: + global _runtime_validation_enabled + self.prev = _runtime_validation_enabled + _runtime_validation_enabled = False + + def __enter__(self) -> None: + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + global _runtime_validation_enabled + _runtime_validation_enabled = self.prev + + +# Runtime checking +# Validate output data is subtype of return hint +def runtime_validation(f): + # TODO: + # Can be extended to validate '__getitem__' and nonblocking + if f.__name__ != '__iter__': + raise TypeError(f"Can not decorate function {f.__name__} with 'runtime_validation'") + + @wraps(f) + def wrapper(self): + global _runtime_validation_enabled + if not _runtime_validation_enabled: + yield from f(self) + else: + it = f(self) + for d in it: + if not self.type.issubtype_of_instance(d): + raise RuntimeError(f"Expected an instance as subtype of {self.type}, but found {d}({type(d)})") + yield d + + return wrapper diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py new file mode 100644 index 0000000000000000000000000000000000000000..7463cc55d27c97aeb0af44433451e581b811b127 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_hook_iterator.py @@ -0,0 +1,248 @@ +import inspect +import functools +from enum import Enum + +import torch.autograd + + +class _SnapshotState(Enum): + r""" + These are the snapshotting-related states that IterDataPipes can be in. + + `NotStarted` - allows you to restore a snapshot and create an iterator with reset + `Restored` - cannot restore again, allows you to create an iterator without resetting the DataPipe + `Iterating` - can restore, will reset if you create a new iterator + """ + + NotStarted = 0 + Restored = 1 + Iterating = 2 + + +def _simplify_obj_name(obj) -> str: + """Simplify the display strings of objects for the purpose of rendering within DataPipe error messages.""" + if inspect.isfunction(obj): + return obj.__name__ + else: + return repr(obj) + + +def _strip_datapipe_from_name(name: str) -> str: + return name.replace("IterDataPipe", "").replace("MapDataPipe", "") + + +def _generate_input_args_string(obj): + """Generate a string for the input arguments of an object.""" + signature = inspect.signature(obj.__class__) + input_param_names = set() + for param_name in signature.parameters.keys(): + input_param_names.add(param_name) + result = [] + for name, value in inspect.getmembers(obj): + if name in input_param_names: + result.append((name, _simplify_obj_name(value))) + return ', '.join([f'{name}={value}' for name, value in result]) + + +def _generate_iterdatapipe_msg(datapipe, simplify_dp_name: bool = False): + output_string = f"{datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})" + if simplify_dp_name: + output_string = _strip_datapipe_from_name(output_string) + return output_string + + +def _gen_invalid_iterdatapipe_msg(datapipe): + return ("This iterator has been invalidated because another iterator has been created " + f"from the same IterDataPipe: {_generate_iterdatapipe_msg(datapipe)}\n" + "This may be caused multiple references to the same IterDataPipe. We recommend " + "using `.fork()` if that is necessary.") + + +_feedback_msg = ("\nFor feedback regarding this single iterator per IterDataPipe constraint, feel free " + "to comment on this issue: https://github.com/pytorch/data/issues/45.") + + +def _check_iterator_valid(datapipe, iterator_id, next_method_exists=False) -> None: + r""" + Given an instance of a DataPipe and an iterator ID, check if the IDs match, and if not, raises an exception. + + In the case of ChildDataPipe, the ID gets compared to the one stored in `main_datapipe` as well. + """ + if next_method_exists: + # This is the case where `IterDataPipe` has both `__iter__` and `__next__`. + # The `_valid_iterator_id` should either be never set (`None`), or set by at most one + # iterator (`0`). Otherwise, it means there are multiple iterators. + if datapipe._valid_iterator_id is not None and datapipe._valid_iterator_id != 0: + extra_msg = "\nNote that this exception is raised inside your IterDataPipe's a `__next__` method" + raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + extra_msg + _feedback_msg) + elif hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True: + if hasattr(datapipe, "_check_valid_iterator_id"): + if not datapipe._check_valid_iterator_id(iterator_id): + raise RuntimeError("This iterator has been invalidated, because a new iterator has been created " + f"from one of the ChildDataPipes of " + f"{_generate_iterdatapipe_msg(datapipe.main_datapipe)}." + _feedback_msg) + else: + raise RuntimeError("ChildDataPipe must have method `_check_valid_iterator_id`.") + elif datapipe._valid_iterator_id != iterator_id: + raise RuntimeError(_gen_invalid_iterdatapipe_msg(datapipe) + _feedback_msg) + + +def _set_datapipe_valid_iterator_id(datapipe): + """Given a DataPipe, updates its valid iterator ID and reset the DataPipe.""" + if hasattr(datapipe, "_is_child_datapipe") and datapipe._is_child_datapipe is True: + if hasattr(datapipe, "_set_main_datapipe_valid_iterator_id"): + datapipe._set_main_datapipe_valid_iterator_id() # reset() is called within this method when appropriate + else: + raise RuntimeError("ChildDataPipe must have method `_set_main_datapipe_valid_iterator_id`.") + else: + if datapipe._valid_iterator_id is None: + datapipe._valid_iterator_id = 0 + else: + datapipe._valid_iterator_id += 1 + datapipe.reset() + return datapipe._valid_iterator_id + + +def hook_iterator(namespace): + r""" + Define a hook that is applied to all `__iter__` of metaclass `_DataPipeMeta`. + + This is done for the purpose of profiling and checking if an iterator is still valid. + """ + + def profiler_record_fn_context(datapipe): + if not hasattr(datapipe, "_profile_name"): + datapipe._profile_name = _generate_iterdatapipe_msg(datapipe, simplify_dp_name=True) + return torch.autograd.profiler.record_function(datapipe._profile_name) + + class IteratorDecorator: + r""" + Wrap the iterator and modifying its `__next__` method. + + This decorator is applied to DataPipes of which `__iter__` method is NOT a generator function. + Those `__iter__` method commonly returns `self` but not necessarily. + """ + + def __init__(self, iterator, datapipe, iterator_id, has_next_method): + self.iterator = iterator + self.datapipe = datapipe + self.iterator_id = iterator_id + self._profiler_enabled = torch.autograd._profiler_enabled() + # Check if `__iter__` returns `self` and `DataPipe` has `__next__` + self.self_and_has_next_method = self.iterator is self.datapipe and has_next_method + + def __iter__(self): + return self + + def _get_next(self): + """Return next with logic related to iterator validity, profiler, and incrementation of samples yielded.""" + _check_iterator_valid(self.datapipe, self.iterator_id) + result = next(self.iterator) + if not self.self_and_has_next_method: + self.datapipe._number_of_samples_yielded += 1 + return result + + def __next__(self): + # TODO: Add try-except to in-place reduce traceback from the Exception + # See: https://github.com/pytorch/data/issues/284 + if self._profiler_enabled: + with profiler_record_fn_context(self.datapipe): + return self._get_next() + else: # Decided against using `contextlib.nullcontext` for performance reasons + return self._get_next() + + def __getattr__(self, name): + return getattr(self.iterator, name) + + func = namespace['__iter__'] + + # ``__iter__`` of IterDataPipe is a generator function + if inspect.isgeneratorfunction(func): + @functools.wraps(func) + def wrap_generator(*args, **kwargs): + gen = func(*args, **kwargs) + datapipe = args[0] + if datapipe._fast_forward_iterator: + it = datapipe._fast_forward_iterator + datapipe._fast_forward_iterator = None + datapipe._snapshot_state = _SnapshotState.Iterating + while True: + try: + yield next(it) + except StopIteration: + return + iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator + _profiler_enabled = torch.autograd._profiler_enabled() + try: + if _profiler_enabled: + with profiler_record_fn_context(datapipe): + response = gen.send(None) + else: + response = gen.send(None) + + while True: + datapipe._number_of_samples_yielded += 1 + request = yield response + # Pass through here every time `__next__` is called + if _profiler_enabled: + with profiler_record_fn_context(datapipe): + _check_iterator_valid(datapipe, iterator_id) + response = gen.send(request) + else: # Decided against using `contextlib.nullcontext` for performance reasons + _check_iterator_valid(datapipe, iterator_id) + response = gen.send(request) + except StopIteration as e: + return + except Exception as e: + # TODO: Simplify the traceback message to skip over `response = gen.send(None)` + # Part of https://github.com/pytorch/data/issues/284 + datapipe = args[0] + msg = "thrown by __iter__ of" + single_iterator_msg = "single iterator per IterDataPipe constraint" + if hasattr(e.args, '__len__'): + full_msg = f"{msg} {datapipe.__class__.__name__}({_generate_input_args_string(datapipe)})" + if len(e.args) == 0 or not isinstance(e.args[0], str): # If an exception message doesn't exist + e.args = (f'\nThis exception is {full_msg}',) + elif msg not in e.args[0] and single_iterator_msg not in e.args[0]: + e.args = (e.args[0] + f'\nThis exception is {full_msg}',) + e.args[1:] + raise + + namespace['__iter__'] = wrap_generator + else: # ``__iter__`` of IterDataPipe is NOT a generator function + # IterDataPipe is an iterator with both ``__iter__`` and ``__next__`` + # And ``__iter__`` may or may not return `self` + if '__next__' in namespace: # If `__next__` exists, put a wrapper around it + next_func = namespace['__next__'] + + @functools.wraps(next_func) + def wrap_next(*args, **kwargs): + datapipe = args[0] + if torch.autograd._profiler_enabled(): + with profiler_record_fn_context(datapipe): + result = next_func(*args, **kwargs) + else: + result = next_func(*args, **kwargs) + datapipe._number_of_samples_yielded += 1 + return result + + namespace['__next__'] = wrap_next + + # Note that if the `__next__` and `__iter__` do something completely unrelated. It may cause issue but + # the user will be violating the iterator protocol. Potential issue: + # 1. Valid iterator ID may not update or checked properly + # 2. The number of samples yielded will be miscounted + + # Regardless if `__next__` exists or not, `__iter__` needs a wrapper to track the number of valid iterators + @functools.wraps(func) + def wrap_iter(*args, **kwargs): + iter_ret = func(*args, **kwargs) + datapipe = args[0] + datapipe._snapshot_state = _SnapshotState.Iterating + if datapipe._fast_forward_iterator: + iter_ret = datapipe._fast_forward_iterator + datapipe._fast_forward_iterator = None + return iter_ret + iterator_id = _set_datapipe_valid_iterator_id(datapipe) # This ID is tied to each created iterator + return IteratorDecorator(iter_ret, datapipe, iterator_id, '__next__' in namespace) + + namespace['__iter__'] = wrap_iter diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..fdf2907abf1051d8eaf96eae89c679ebe9c57fb9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/_typing.py @@ -0,0 +1,430 @@ +# Taking reference from official Python typing +# https://github.com/python/cpython/blob/master/Lib/typing.py + +import collections +import functools +import numbers +import sys + +from torch.utils.data.datapipes._hook_iterator import hook_iterator, _SnapshotState +from typing import (Any, Dict, Iterator, Generic, List, Set, Tuple, TypeVar, Union, + get_type_hints) +from typing import _eval_type, _tp_cache, _type_check, _type_repr # type: ignore[attr-defined] +from typing import ForwardRef + +# TODO: Use TypeAlias when Python 3.6 is deprecated +# Please check [Note: TypeMeta and TypeAlias] +# In case of metaclass conflict due to ABCMeta or _ProtocolMeta +# For Python 3.9, only Protocol in typing uses metaclass +from abc import ABCMeta +from typing import _GenericAlias # type: ignore[attr-defined, no-redef] + +class GenericMeta(ABCMeta): # type: ignore[no-redef] + pass + + +class Integer(numbers.Integral): + pass + + +class Boolean(numbers.Integral): + pass + + +# Python 'type' object is not subscriptable +# Tuple[int, List, dict] -> valid +# tuple[int, list, dict] -> invalid +# Map Python 'type' to abstract base class +TYPE2ABC = { + bool: Boolean, + int: Integer, + float: numbers.Real, + complex: numbers.Complex, + dict: Dict, + list: List, + set: Set, + tuple: Tuple, + None: type(None), +} + + +def issubtype(left, right, recursive=True): + r""" + Check if the left-side type is a subtype of the right-side type. + + If any of type is a composite type like `Union` and `TypeVar` with + bounds, it would be expanded into a list of types and check all + of left-side types are subtypes of either one from right-side types. + """ + left = TYPE2ABC.get(left, left) + right = TYPE2ABC.get(right, right) + + if right is Any or left == right: + return True + + if isinstance(right, _GenericAlias): + if getattr(right, '__origin__', None) is Generic: + return True + + if right == type(None): + return False + + # Right-side type + constraints = _decompose_type(right) + + if len(constraints) == 0 or Any in constraints: + return True + + if left is Any: + return False + + # Left-side type + variants = _decompose_type(left) + + # all() will return True for empty variants + if len(variants) == 0: + return False + + return all(_issubtype_with_constraints(variant, constraints, recursive) for variant in variants) + + +def _decompose_type(t, to_list=True): + if isinstance(t, TypeVar): + if t.__bound__ is not None: + ts = [t.__bound__] + else: + # For T_co, __constraints__ is () + ts = list(t.__constraints__) + elif hasattr(t, '__origin__') and t.__origin__ == Union: + ts = t.__args__ + else: + if not to_list: + return None + ts = [t] + # Ignored: Generator has incompatible item type "object"; expected "Type[Any]" + ts = [TYPE2ABC.get(_t, _t) for _t in ts] # type: ignore[misc] + return ts + + +def _issubtype_with_constraints(variant, constraints, recursive=True): + r""" + Check if the variant is a subtype of either one from constraints. + + For composite types like `Union` and `TypeVar` with bounds, they + would be expanded for testing. + """ + if variant in constraints: + return True + + # [Note: Subtype for Union and TypeVar] + # Python typing is able to flatten Union[Union[...]] or Union[TypeVar]. + # But it couldn't flatten the following scenarios: + # - Union[int, TypeVar[Union[...]]] + # - TypeVar[TypeVar[...]] + # So, variant and each constraint may be a TypeVar or a Union. + # In these cases, all of inner types from the variant are required to be + # extraced and verified as a subtype of any constraint. And, all of + # inner types from any constraint being a TypeVar or a Union are + # also required to be extracted and verified if the variant belongs to + # any of them. + + # Variant + vs = _decompose_type(variant, to_list=False) + + # Variant is TypeVar or Union + if vs is not None: + return all(_issubtype_with_constraints(v, constraints, recursive) for v in vs) + + # Variant is not TypeVar or Union + if hasattr(variant, '__origin__') and variant.__origin__ is not None: + v_origin = variant.__origin__ + # In Python-3.9 typing library untyped generics do not have args + v_args = getattr(variant, "__args__", None) + else: + v_origin = variant + v_args = None + + # Constraints + for constraint in constraints: + cs = _decompose_type(constraint, to_list=False) + + # Constraint is TypeVar or Union + if cs is not None: + if _issubtype_with_constraints(variant, cs, recursive): + return True + # Constraint is not TypeVar or Union + else: + # __origin__ can be None for plain list, tuple, ... in Python 3.6 + if hasattr(constraint, '__origin__') and constraint.__origin__ is not None: + c_origin = constraint.__origin__ + if v_origin == c_origin: + if not recursive: + return True + # In Python-3.9 typing library untyped generics do not have args + c_args = getattr(constraint, "__args__", None) + if c_args is None or len(c_args) == 0: + return True + if v_args is not None and len(v_args) == len(c_args) and \ + all(issubtype(v_arg, c_arg) for v_arg, c_arg in zip(v_args, c_args)): + return True + # Tuple[int] -> Tuple + else: + if v_origin == constraint: + return True + + return False + + +def issubinstance(data, data_type): + if not issubtype(type(data), data_type, recursive=False): + return False + + # In Python-3.9 typing library __args__ attribute is not defined for untyped generics + dt_args = getattr(data_type, "__args__", None) + if isinstance(data, tuple): + if dt_args is None or len(dt_args) == 0: + return True + if len(dt_args) != len(data): + return False + return all(issubinstance(d, t) for d, t in zip(data, dt_args)) + elif isinstance(data, (list, set)): + if dt_args is None or len(dt_args) == 0: + return True + t = dt_args[0] + return all(issubinstance(d, t) for d in data) + elif isinstance(data, dict): + if dt_args is None or len(dt_args) == 0: + return True + kt, vt = dt_args + return all(issubinstance(k, kt) and issubinstance(v, vt) for k, v in data.items()) + + return True + + +# [Note: TypeMeta and TypeAlias] +# In order to keep compatibility for Python 3.6, use Meta for the typing. +# TODO: When PyTorch drops the support for Python 3.6, it can be converted +# into the Alias system and using `__class_getitem__` for DataPipe. The +# typing system will gain benefit of performance and resolving metaclass +# conflicts as elaborated in https://www.python.org/dev/peps/pep-0560/ + + +class _DataPipeType: + r"""Save type annotation in `param`.""" + + def __init__(self, param): + self.param = param + + def __repr__(self): + return _type_repr(self.param) + + def __eq__(self, other): + if isinstance(other, _DataPipeType): + return self.param == other.param + return NotImplemented + + def __hash__(self): + return hash(self.param) + + def issubtype(self, other): + if isinstance(other.param, _GenericAlias): + if getattr(other.param, '__origin__', None) is Generic: + return True + if isinstance(other, _DataPipeType): + return issubtype(self.param, other.param) + if isinstance(other, type): + return issubtype(self.param, other) + raise TypeError(f"Expected '_DataPipeType' or 'type', but found {type(other)}") + + def issubtype_of_instance(self, other): + return issubinstance(other, self.param) + + +# Default type for DataPipe without annotation +T_co = TypeVar('T_co', covariant=True) +_DEFAULT_TYPE = _DataPipeType(Generic[T_co]) + + +class _DataPipeMeta(GenericMeta): + r""" + Metaclass for `DataPipe`. + + Add `type` attribute and `__init_subclass__` based on the type, and validate the return hint of `__iter__`. + + Note that there is subclass `_IterDataPipeMeta` specifically for `IterDataPipe`. + """ + + type: _DataPipeType + + def __new__(cls, name, bases, namespace, **kwargs): + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + # TODO: the statements below are not reachable by design as there is a bug and typing is low priority for now. + cls.__origin__ = None + if 'type' in namespace: + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + namespace['__type_class__'] = False + # For plain derived class without annotation + for base in bases: + if isinstance(base, _DataPipeMeta): + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + namespace.update({'type': _DEFAULT_TYPE, + '__init_subclass__': _dp_init_subclass}) + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + def __init__(self, name, bases, namespace, **kwargs): + super().__init__(name, bases, namespace, **kwargs) # type: ignore[call-overload] + + # TODO: Fix isinstance bug + @_tp_cache + def _getitem_(self, params): + if params is None: + raise TypeError(f'{self.__name__}[t]: t can not be None') + if isinstance(params, str): + params = ForwardRef(params) + if not isinstance(params, tuple): + params = (params, ) + + msg = f"{self.__name__}[t]: t must be a type" + params = tuple(_type_check(p, msg) for p in params) + + if isinstance(self.type.param, _GenericAlias): + orig = getattr(self.type.param, '__origin__', None) + if isinstance(orig, type) and orig is not Generic: + p = self.type.param[params] # type: ignore[index] + t = _DataPipeType(p) + l = len(str(self.type)) + 2 + name = self.__name__[:-l] + name = name + '[' + str(t) + ']' + bases = (self,) + self.__bases__ + return self.__class__(name, bases, + {'__init_subclass__': _dp_init_subclass, + 'type': t, + '__type_class__': True}) + + if len(params) > 1: + raise TypeError(f'Too many parameters for {self} actual {len(params)}, expected 1') + + t = _DataPipeType(params[0]) + + if not t.issubtype(self.type): + raise TypeError(f'Can not subclass a DataPipe[{t}] from DataPipe[{self.type}]') + + # Types are equal, fast path for inheritance + if self.type == t: + return self + + name = self.__name__ + '[' + str(t) + ']' + bases = (self,) + self.__bases__ + + return self.__class__(name, bases, + {'__init_subclass__': _dp_init_subclass, + '__type_class__': True, + 'type': t}) + + # TODO: Fix isinstance bug + def _eq_(self, other): + if not isinstance(other, _DataPipeMeta): + return NotImplemented + if self.__origin__ is None or other.__origin__ is None: # type: ignore[has-type] + return self is other + return (self.__origin__ == other.__origin__ # type: ignore[has-type] + and self.type == other.type) + + # TODO: Fix isinstance bug + def _hash_(self): + return hash((self.__name__, self.type)) + + +class _IterDataPipeMeta(_DataPipeMeta): + r""" + Metaclass for `IterDataPipe` and inherits from `_DataPipeMeta`. + + Add various functions for behaviors specific to `IterDataPipe`. + """ + + def __new__(cls, name, bases, namespace, **kwargs): + + if 'reset' in namespace: + reset_func = namespace['reset'] + + @functools.wraps(reset_func) + def conditional_reset(*args, **kwargs): + r""" + Only execute DataPipe's `reset()` method if `_SnapshotState` is `Iterating` or `NotStarted`. + + This allows recently restored DataPipe to preserve its restored state during the initial `__iter__` call. + """ + datapipe = args[0] + if datapipe._snapshot_state in (_SnapshotState.Iterating, _SnapshotState.NotStarted): + # Reset `NotStarted` is necessary because the `source_datapipe` of a DataPipe might have + # already begun iterating. + datapipe._number_of_samples_yielded = 0 + datapipe._fast_forward_iterator = None + reset_func(*args, **kwargs) + datapipe._snapshot_state = _SnapshotState.Iterating + + namespace['reset'] = conditional_reset + + if '__iter__' in namespace: + hook_iterator(namespace) + return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload] + + +def _dp_init_subclass(sub_cls, *args, **kwargs): + # Add function for datapipe instance to reinforce the type + sub_cls.reinforce_type = reinforce_type + + # TODO: + # - add global switch for type checking at compile-time + + # Ignore internal type class + if getattr(sub_cls, '__type_class__', False): + return + + # Check if the string type is valid + if isinstance(sub_cls.type.param, ForwardRef): + base_globals = sys.modules[sub_cls.__module__].__dict__ + try: + param = _eval_type(sub_cls.type.param, base_globals, locals()) + sub_cls.type.param = param + except TypeError as e: + raise TypeError(f"{sub_cls.type.param.__forward_arg__} is not supported by Python typing") from e + + if '__iter__' in sub_cls.__dict__: + iter_fn = sub_cls.__dict__['__iter__'] + hints = get_type_hints(iter_fn) + if 'return' in hints: + return_hint = hints['return'] + # Plain Return Hint for Python 3.6 + if return_hint == Iterator: + return + if not (hasattr(return_hint, '__origin__') and + (return_hint.__origin__ == Iterator or + return_hint.__origin__ == collections.abc.Iterator)): + raise TypeError("Expected 'Iterator' as the return annotation for `__iter__` of {}" + ", but found {}".format(sub_cls.__name__, _type_repr(hints['return']))) + data_type = return_hint.__args__[0] + if not issubtype(data_type, sub_cls.type.param): + raise TypeError("Expected return type of '__iter__' as a subtype of {}, but found {}" + " for {}".format(sub_cls.type, _type_repr(data_type), sub_cls.__name__)) + + +def reinforce_type(self, expected_type): + r""" + Reinforce the type for DataPipe instance. + + And the 'expected_type' is required to be a subtype of the original type + hint to restrict the type requirement of DataPipe instance. + """ + if isinstance(expected_type, tuple): + expected_type = Tuple[expected_type] + _type_check(expected_type, msg="'expected_type' must be a type") + + if not issubtype(expected_type, self.type.param): + raise TypeError(f"Expected 'expected_type' as subtype of {self.type}, but found {_type_repr(expected_type)}") + + self.type = _DataPipeType(expected_type) + return self diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7a4892032ea570532d6a262d70501e8827128d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__init__.py @@ -0,0 +1,11 @@ +from torch.utils.data.datapipes.dataframe.dataframes import ( + CaptureDataFrame, DFIterDataPipe, +) +from torch.utils.data.datapipes.dataframe.datapipes import ( + DataFramesAsTuplesPipe, +) + +__all__ = ['CaptureDataFrame', 'DFIterDataPipe', 'DataFramesAsTuplesPipe'] + +# Please keep this list sorted +assert __all__ == sorted(__all__) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1a4363d1c5a687a36fd5500920a0db5a38fe4e4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/datapipes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/datapipes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6ca4ce9f0e9c85fb935258790b4d92b79252040 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/datapipes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53aece89b1c8d4fda933afe4fcc19eb66d6c2975 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/structures.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py new file mode 100644 index 0000000000000000000000000000000000000000..69a14e06fcbf7db40fe415fc70cf9c28cec3fc73 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframes.py @@ -0,0 +1,433 @@ +from typing import Any, Dict, List, Optional + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe + +from torch.utils.data.datapipes.dataframe.structures import DataChunkDF + +# TODO(VitalyFedyunin): Add error when two different traces get combined + +__all__ = [ + "Capture", + "CaptureA", + "CaptureAdd", + "CaptureCall", + "CaptureControl", + "CaptureDataFrame", + "CaptureDataFrameWithDataPipeOps", + "CaptureF", + "CaptureGetAttr", + "CaptureGetItem", + "CaptureInitial", + "CaptureLikeMock", + "CaptureMul", + "CaptureSetItem", + "CaptureSub", + "CaptureVariable", + "CaptureVariableAssign", + "DataFrameTracer", + "DataFrameTracedOps", + "disable_capture", + "get_val", +] + + +def disable_capture(): + CaptureControl.disabled = True + + +class CaptureControl: + disabled = False + + +class DataFrameTracedOps(DFIterDataPipe): + def __init__(self, source_datapipe, output_var): + self.source_datapipe = source_datapipe + self.output_var = output_var + + def __iter__(self): + for item in self.source_datapipe: + yield self.output_var.apply_ops(item) + + +# TODO(VitalyFedyunin): Extract this list from the DFIterDataPipe registred functions +DATAPIPES_OPS = ['_dataframes_as_tuples', 'groupby', '_dataframes_filter', 'map', 'to_datapipe', + 'shuffle', 'concat', 'batch', '_dataframes_per_row', '_dataframes_concat', '_dataframes_shuffle'] + +UNIMPLEMENTED_ATTR = ['__deepcopy__', '__setstate__', 'is_shardable', 'apply_sharding'] + + +class Capture: + # TODO: All operations are shared across entire InitialCapture, need to figure out what if we join two captures + + def __init__(self, schema_df=None): + self.ctx = {'operations': [], 'variables': [], 'schema_df': schema_df} + + def __str__(self): + return self._ops_str() + + def _ops_str(self): + res = "" + for op in self.ctx['operations']: + if len(res) > 0: + res += "\n" + res += str(op) + return res + + def __getstate__(self): + # TODO(VitalyFedyunin): Currently can't pickle (why?) + self.ctx['schema_df'] = None + for var in self.ctx['variables']: + var.calculated_value = None + state = {} + for item in self.__dict__: + state[item] = getattr(self, item) + return state + + def __setstate__(self, state): + for k, v in state.items(): + setattr(self, k, v) + + def __getattr__(self, attrname): + if attrname == 'kwarg' or attrname == 'kwargs': + raise Exception('no kwargs!') + if attrname in ['__deepcopy__']: + raise AttributeError() + result = CaptureGetAttr(self, attrname, ctx=self.ctx) + return result + + def __getitem__(self, key): + return CaptureGetItem(self, key, ctx=self.ctx) + + def __setitem__(self, key, value): + self.ctx['operations'].append( + CaptureSetItem(self, key, value, ctx=self.ctx)) + + def __add__(self, add_val): + res = CaptureAdd(self, add_val, ctx=self.ctx) + var = CaptureVariable(res, ctx=self.ctx) + self.ctx['operations'].append( + CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)) + return var + + def __sub__(self, add_val): + res = CaptureSub(self, add_val, ctx=self.ctx) + var = CaptureVariable(res, ctx=self.ctx) + self.ctx['operations'].append( + CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)) + return var + + def __mul__(self, add_val): + res = CaptureMul(self, add_val, ctx=self.ctx) + var = CaptureVariable(res, ctx=self.ctx) + t = CaptureVariableAssign(variable=var, value=res, ctx=self.ctx) + self.ctx['operations'].append(t) + return var + + def _is_context_empty(self): + return len(self.ctx['operations']) == 0 and len(self.ctx['variables']) == 0 + + def apply_ops_2(self, dataframe): + # TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer) + self.ctx['variables'][0].calculated_value = dataframe + for op in self.ctx['operations']: + op.execute() + + @property + def columns(self): + self.apply_ops_2(self.ctx['schema_df']) + value = self.execute() + return value.columns + + # TODO(VitalyFedyunin): Add tests + # TODO(VitalyFedyunin): Need to join context if one of them are empty because we used capture + + def __call__(self, *args, **kwargs): + # TODO: Check if args or kwargs have more than one different context + if self._is_context_empty(): + # TODO: Allow CaptureA to take context from mock + for arg in args: + if isinstance(arg, Capture) and not arg._is_context_empty(): + self.ctx = arg.ctx + break + if self._is_context_empty(): + for k, v in kwargs.items(): + if isinstance(k, Capture) and not k._is_context_empty(): + self.ctx = k.ctx + break + if isinstance(v, Capture) and not v._is_context_empty(): + self.ctx = v.ctx + break + + res = CaptureCall(self, ctx=self.ctx, args=args, kwargs=kwargs) + var = CaptureVariable(None, ctx=self.ctx) + t = CaptureVariableAssign(ctx=self.ctx, variable=var, value=res) + self.ctx['operations'].append(t) + return var + + +class CaptureF(Capture): + def __init__(self, ctx=None, **kwargs): + if ctx is None: + self.ctx = {'operations': [], 'variables': []} + else: + self.ctx = ctx + self.kwargs = kwargs + + +class CaptureA(CaptureF): + def __str__(self): + return f"{self.kwargs['name']}" + + def execute(self): + value = self.kwargs['real_attribute'] + return value + + +class CaptureLikeMock: + def __init__(self, name): + import unittest.mock as mock + # TODO(VitalyFedyunin): Do not use provate function here, copy own implementation instead. + get_target, attribute = mock._get_target(name) # type: ignore[attr-defined] + self.get_target = get_target + self.attribute = attribute + self.name = name + + def __enter__(self): + self.save = getattr(self.get_target(), self.attribute) + capt = CaptureA(name=self.name, real_attribute=self.save) + setattr(self.get_target(), self.attribute, capt) + + def __exit__(self, *exc_info): + setattr(self.get_target(), self.attribute, self.save) + + +class CaptureCall(Capture): + + def __init__(self, callable, ctx=None, **kwargs): + if ctx is None: + self.ctx = {'operations': [], 'variables': []} + else: + self.ctx = ctx + self.kwargs = kwargs + self.callable = callable + + def __str__(self): + return "{callable}({args},{kwargs})".format(callable=self.callable, **self.kwargs) + + def execute(self): + + # TODO: VitalyFedyunin execute kwargs and maybe nested structures + executed_args = [] + for arg in self.kwargs['args']: + if isinstance(arg, Capture): + executed_args.append(arg.execute()) + else: + executed_args.append(arg) + left = get_val(self.callable) + return left(*executed_args, **self.kwargs['kwargs']) + + +class CaptureVariableAssign(CaptureF): + def __str__(self): + variable = self.kwargs['variable'] + value = self.kwargs['value'] + return f"{variable} = {value}" + + def execute(self): + self.kwargs['variable'].calculated_value = self.kwargs['value'].execute() + + +class CaptureVariable(Capture): + # TODO(VitalyFedyunin): This should be atomic and thread safe + names_idx = 0 + + def __init__(self, value, ctx): + if CaptureControl.disabled: + raise Exception('Attempting to create capture variable with capture off') + self.ctx = ctx + self.value = value + self.name = f'var_{CaptureVariable.names_idx}' + CaptureVariable.names_idx += 1 + self.ctx['variables'].append(self) + + def __str__(self): + return self.name + + def execute(self): + return self.calculated_value + + def apply_ops(self, dataframe): + # TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer) + self.ctx['variables'][0].calculated_value = dataframe + for op in self.ctx['operations']: + op.execute() + return self.calculated_value + + +class CaptureGetItem(Capture): + def __init__(self, left, key, ctx): + self.ctx = ctx + self.left = left + self.key = key + + def __str__(self): + return f"{self.left}[{get_val(self.key)}]" + + def execute(self): + left = self.left.execute() + return left[self.key] + + +class CaptureSetItem(Capture): + def __init__(self, left, key, value, ctx): + self.ctx = ctx + self.left = left + self.key = key + self.value = value + + def __str__(self): + return f"{self.left}[{get_val(self.key)}] = {self.value}" + + def execute(self): + left = self.left.execute() + value = self.value.execute() + left[self.key] = value + + +class CaptureAdd(Capture): + def __init__(self, left, right, ctx): + self.ctx = ctx + self.left = left + self.right = right + + def __str__(self): + return f"{self.left} + {self.right}" + + def execute(self): + return get_val(self.left) + get_val(self.right) + + +class CaptureMul(Capture): + def __init__(self, left, right, ctx): + self.ctx = ctx + self.left = left + self.right = right + + def __str__(self): + return f"{self.left} * {self.right}" + + def execute(self): + return get_val(self.left) * get_val(self.right) + + +class CaptureSub(Capture): + def __init__(self, left, right, ctx): + self.ctx = ctx + self.left = left + self.right = right + + def __str__(self): + return f"{self.left} - {self.right}" + + def execute(self): + return get_val(self.left) - get_val(self.right) + + +class CaptureGetAttr(Capture): + def __init__(self, src, name, ctx): + self.ctx = ctx + self.src = src + self.name = name + + def __str__(self): + return f"{self.src}.{self.name}" + + def execute(self): + val = get_val(self.src) + return getattr(val, self.name) + + +def get_val(capture): + if isinstance(capture, Capture): + return capture.execute() + elif isinstance(capture, str): + return f'"{capture}"' + else: + return capture + + +class CaptureInitial(CaptureVariable): + def __init__(self, schema_df=None): + new_ctx: Dict[str, List[Any]] = {'operations': [], 'variables': [], 'schema_df': schema_df} + super().__init__(None, new_ctx) + self.name = f'input_{self.name}' + + +class CaptureDataFrame(CaptureInitial): + pass + + +class CaptureDataFrameWithDataPipeOps(CaptureDataFrame): + def as_datapipe(self): + return DataFrameTracedOps( + self.ctx['variables'][0].source_datapipe, self) + + def raw_iterator(self): + return self.as_datapipe().__iter__() + + def __iter__(self): + return iter(self._dataframes_as_tuples()) + + def batch(self, batch_size=10, drop_last: bool = False, wrapper_class=DataChunkDF): + dp = self._dataframes_per_row()._dataframes_concat(batch_size) + dp = dp.as_datapipe().batch(1, drop_last=drop_last, wrapper_class=wrapper_class) + dp._dp_contains_dataframe = True + return dp + + def groupby(self, + group_key_fn, + *, + buffer_size=10000, + group_size=None, + guaranteed_group_size=None, + drop_remaining=False): + dp = self._dataframes_per_row() + dp = dp.as_datapipe().groupby(group_key_fn, buffer_size=buffer_size, group_size=group_size, + guaranteed_group_size=guaranteed_group_size, drop_remaining=drop_remaining) + return dp + + def shuffle(self, *args, **kwargs): + return self._dataframes_shuffle(*args, **kwargs) + + def filter(self, *args, **kwargs): + return self._dataframes_filter(*args, **kwargs) + + def collate(self, *args, **kwargs): + raise Exception("Can't collate unbatched DataFrames stream") + + def __getattr__(self, attrname): # ? + if attrname in UNIMPLEMENTED_ATTR: + raise AttributeError('Attempting to get ', attrname) + if attrname in DATAPIPES_OPS: + return (self.as_datapipe()).__getattr__(attrname) + return super().__getattr__(attrname) + + +@functional_datapipe('trace_as_dataframe') +class DataFrameTracer(CaptureDataFrameWithDataPipeOps, IterDataPipe): # type: ignore[misc] + source_datapipe: Optional[Any] = None + + # TODO(VitalyFedyunin): Must implement all special functions of datapipes + + def set_shuffle_settings(self, *args, **kwargs): + pass + + def is_shardable(self): + return False + + def __init__(self, source_datapipe, schema_df=None): + self.source_datapipe = source_datapipe + if schema_df is None: + schema_df = next(iter(self.source_datapipe)) + super().__init__(schema_df=schema_df) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py new file mode 100644 index 0000000000000000000000000000000000000000..a75cc5c7a7c210d67cbc6291dcf892576669eb2a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/datapipes.py @@ -0,0 +1,131 @@ +import random + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import DFIterDataPipe, IterDataPipe + +from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper + +__all__ = [ + "ConcatDataFramesPipe", + "DataFramesAsTuplesPipe", + "ExampleAggregateAsDataFrames", + "FilterDataFramesPipe", + "PerRowDataFramesPipe", + "ShuffleDataFramesPipe", +] + + +@functional_datapipe('_dataframes_as_tuples') +class DataFramesAsTuplesPipe(IterDataPipe): + def __init__(self, source_datapipe): + self.source_datapipe = source_datapipe + + def __iter__(self): + for df in self.source_datapipe: + # for record in df.to_records(index=False): + yield from df_wrapper.iterate(df) + + +@functional_datapipe('_dataframes_per_row', enable_df_api_tracing=True) +class PerRowDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe): + self.source_datapipe = source_datapipe + + def __iter__(self): + for df in self.source_datapipe: + # TODO(VitalyFedyunin): Replacing with TorchArrow only API, as we are dropping pandas as followup + for i in range(len(df)): + yield df[i:i + 1] + + +@functional_datapipe('_dataframes_concat', enable_df_api_tracing=True) +class ConcatDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe, batch=3): + self.source_datapipe = source_datapipe + self.n_batch = batch + + def __iter__(self): + buffer = [] + for df in self.source_datapipe: + buffer.append(df) + if len(buffer) == self.n_batch: + yield df_wrapper.concat(buffer) + buffer = [] + if len(buffer): + yield df_wrapper.concat(buffer) + + +@functional_datapipe('_dataframes_shuffle', enable_df_api_tracing=True) +class ShuffleDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe): + self.source_datapipe = source_datapipe + + def __iter__(self): + size = None + all_buffer = [] + for df in self.source_datapipe: + if size is None: + size = df_wrapper.get_len(df) + for i in range(df_wrapper.get_len(df)): + all_buffer.append(df_wrapper.get_item(df, i)) + random.shuffle(all_buffer) + buffer = [] + for df in all_buffer: + buffer.append(df) + if len(buffer) == size: + yield df_wrapper.concat(buffer) + buffer = [] + if len(buffer): + yield df_wrapper.concat(buffer) + + +@functional_datapipe('_dataframes_filter', enable_df_api_tracing=True) +class FilterDataFramesPipe(DFIterDataPipe): + def __init__(self, source_datapipe, filter_fn): + self.source_datapipe = source_datapipe + self.filter_fn = filter_fn + + def __iter__(self): + size = None + all_buffer = [] + filter_res = [] + for df in self.source_datapipe: + if size is None: + size = len(df.index) + for i in range(len(df.index)): + all_buffer.append(df[i:i + 1]) + filter_res.append(self.filter_fn(df.iloc[i])) + + buffer = [] + for df, res in zip(all_buffer, filter_res): + if res: + buffer.append(df) + if len(buffer) == size: + yield df_wrapper.concat(buffer) + buffer = [] + if len(buffer): + yield df_wrapper.concat(buffer) + + +@functional_datapipe('_to_dataframes_pipe', enable_df_api_tracing=True) +class ExampleAggregateAsDataFrames(DFIterDataPipe): + def __init__(self, source_datapipe, dataframe_size=10, columns=None): + self.source_datapipe = source_datapipe + self.columns = columns + self.dataframe_size = dataframe_size + + def _as_list(self, item): + try: + return list(item) + except Exception: # TODO(VitalyFedyunin): Replace with better iterable exception + return [item] + + def __iter__(self): + aggregate = [] + for item in self.source_datapipe: + aggregate.append(self._as_list(item)) + if len(aggregate) == self.dataframe_size: + yield df_wrapper.create_dataframe(aggregate, columns=self.columns) + aggregate = [] + if len(aggregate) > 0: + yield df_wrapper.create_dataframe(aggregate, columns=self.columns) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/structures.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/structures.py new file mode 100644 index 0000000000000000000000000000000000000000..507a04e491d30e36b1f4bbec2a676efc53db7194 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/structures.py @@ -0,0 +1,18 @@ +from torch.utils.data.datapipes.datapipe import DataChunk +from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper + +__all__ = ["DataChunkDF", ] + + +class DataChunkDF(DataChunk): + """DataChunkDF iterating over individual items inside of DataFrame containers, to access DataFrames user `raw_iterator`.""" + + def __iter__(self): + for df in self.items: + yield from df_wrapper.iterate(df) + + def __len__(self): + total_len = 0 + for df in self.items: + total_len += df_wrapper.get_len(df) + return total_len diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.py new file mode 100644 index 0000000000000000000000000000000000000000..c6d9baf95ae7dac626dae135305f21454aff96d5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.py @@ -0,0 +1,404 @@ +import functools +import pickle +from typing import Dict, Callable, Optional, TypeVar, Generic, Iterator + +from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta +from torch.utils.data.datapipes._hook_iterator import _SnapshotState +from torch.utils.data.datapipes.utils.common import ( + _deprecation_warning, + _iter_deprecated_functional_names, + _map_deprecated_functional_names, +) +from torch.utils.data.dataset import Dataset, IterableDataset +from torch.utils._import_utils import import_dill + +dill = import_dill() +HAS_DILL = dill is not None + +__all__ = [ + "DataChunk", + "DFIterDataPipe", + "IterDataPipe", + "MapDataPipe", +] + +T = TypeVar('T') +T_co = TypeVar('T_co', covariant=True) + +UNTRACABLE_DATAFRAME_PIPES = ['batch', # As it returns DataChunks + 'groupby', # As it returns DataChunks + '_dataframes_as_tuples', # As it unpacks DF + 'trace_as_dataframe', # As it used to mark DF for tracing + ] + + +class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta): + r""" + Iterable-style DataPipe. + + All DataPipes that represent an iterable of data samples should subclass this. + This style of DataPipes is particularly useful when data come from a stream, or + when the number of samples is too large to fit them all in memory. ``IterDataPipe`` is lazily initialized and its + elements are computed only when ``next()`` is called on the iterator of an ``IterDataPipe``. + + All subclasses should overwrite :meth:`__iter__`, which would return an + iterator of samples in this DataPipe. Calling ``__iter__`` of an ``IterDataPipe`` automatically invokes its + method ``reset()``, which by default performs no operation. When writing a custom ``IterDataPipe``, users should + override ``reset()`` if necessary. The common usages include resetting buffers, pointers, + and various state variables within the custom ``IterDataPipe``. + + Note: + Only `one` iterator can be valid for each ``IterDataPipe`` at a time, + and the creation a second iterator will invalidate the first one. This constraint is necessary because + some ``IterDataPipe`` have internal buffers, whose states can become invalid if there are multiple iterators. + The code example below presents details on how this constraint looks in practice. + If you have any feedback related to this constraint, please see `GitHub IterDataPipe Single Iterator Issue`_. + + These DataPipes can be invoked in two ways, using the class constructor or applying their + functional form onto an existing ``IterDataPipe`` (recommended, available to most but not all DataPipes). + You can chain multiple `IterDataPipe` together to form a pipeline that will perform multiple + operations in succession. + + .. _GitHub IterDataPipe Single Iterator Issue: + https://github.com/pytorch/data/issues/45 + + Note: + When a subclass is used with :class:`~torch.utils.data.DataLoader`, each + item in the DataPipe will be yielded from the :class:`~torch.utils.data.DataLoader` + iterator. When :attr:`num_workers > 0`, each worker process will have a + different copy of the DataPipe object, so it is often desired to configure + each copy independently to avoid having duplicate data returned from the + workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker + process, returns information about the worker. It can be used in either the + dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's + :attr:`worker_init_fn` option to modify each copy's behavior. + + Examples: + General Usage: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, Mapper + >>> dp = IterableWrapper(range(10)) + >>> map_dp_1 = Mapper(dp, lambda x: x + 1) # Using class constructor + >>> map_dp_2 = dp.map(lambda x: x + 1) # Using functional form (recommended) + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> filter_dp = map_dp_1.filter(lambda x: x % 2 == 0) + >>> list(filter_dp) + [2, 4, 6, 8, 10] + Single Iterator Constraint Example: + >>> from torchdata.datapipes.iter import IterableWrapper, Mapper + >>> source_dp = IterableWrapper(range(10)) + >>> it1 = iter(source_dp) + >>> list(it1) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> it1 = iter(source_dp) + >>> it2 = iter(source_dp) # The creation of a new iterator invalidates `it1` + >>> next(it2) + 0 + >>> next(it1) # Further usage of `it1` will raise a `RunTimeError` + """ + + functions: Dict[str, Callable] = {} + reduce_ex_hook: Optional[Callable] = None + getstate_hook: Optional[Callable] = None + str_hook: Optional[Callable] = None + repr_hook: Optional[Callable] = None + _valid_iterator_id: Optional[int] = None + _number_of_samples_yielded: int = 0 + _snapshot_state: _SnapshotState = _SnapshotState.NotStarted + _fast_forward_iterator: Optional[Iterator] = None + + def __iter__(self) -> Iterator[T_co]: + return self + + def __getattr__(self, attribute_name): + if attribute_name in IterDataPipe.functions: + if attribute_name in _iter_deprecated_functional_names: + kwargs = _iter_deprecated_functional_names[attribute_name] + _deprecation_warning(**kwargs) + f = IterDataPipe.functions[attribute_name] + function = functools.partial(f, self) + functools.update_wrapper(wrapper=function, wrapped=f, assigned=("__doc__",)) + return function + else: + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attribute_name}") + + @classmethod + def register_function(cls, function_name, function): + cls.functions[function_name] = function + + @classmethod + def register_datapipe_as_function(cls, function_name, cls_to_register, enable_df_api_tracing=False): + if function_name in cls.functions: + raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken") + + def class_function(cls, enable_df_api_tracing, source_dp, *args, **kwargs): + result_pipe = cls(source_dp, *args, **kwargs) + if isinstance(result_pipe, IterDataPipe): + if enable_df_api_tracing or isinstance(source_dp, DFIterDataPipe): + if function_name not in UNTRACABLE_DATAFRAME_PIPES: + result_pipe = result_pipe.trace_as_dataframe() + + return result_pipe + + function = functools.partial( + class_function, cls_to_register, enable_df_api_tracing + ) + functools.update_wrapper( + wrapper=function, wrapped=cls_to_register, assigned=("__doc__",) + ) + cls.functions[function_name] = function + + def __getstate__(self): + """ + Serialize `lambda` functions when `dill` is available. + + If this doesn't cover your custom DataPipe's use case, consider writing custom methods for + `__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization. + """ + state = self.__dict__ + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __reduce_ex__(self, *args, **kwargs): + if IterDataPipe.reduce_ex_hook is not None: + try: + return IterDataPipe.reduce_ex_hook(self) + except NotImplementedError: + pass + return super().__reduce_ex__(*args, **kwargs) + + @classmethod + def set_getstate_hook(cls, hook_fn): + if IterDataPipe.getstate_hook is not None and hook_fn is not None: + raise Exception("Attempt to override existing getstate_hook") + IterDataPipe.getstate_hook = hook_fn + + @classmethod + def set_reduce_ex_hook(cls, hook_fn): + if IterDataPipe.reduce_ex_hook is not None and hook_fn is not None: + raise Exception("Attempt to override existing reduce_ex_hook") + IterDataPipe.reduce_ex_hook = hook_fn + + def __repr__(self): + if self.repr_hook is not None: + return self.repr_hook(self) + # Instead of showing , return the class name + return str(self.__class__.__qualname__) + + def __str__(self): + if self.str_hook is not None: + return self.str_hook(self) + # Instead of showing , return the class name + return str(self.__class__.__qualname__) + + def __dir__(self): + # for auto-completion in a REPL (e.g. Jupyter notebook) + return list(super().__dir__()) + list(self.functions.keys()) + + def reset(self) -> None: + r""" + Reset the `IterDataPipe` to the initial state. + + By default, no-op. For subclasses of `IterDataPipe`, depending on their functionalities, + they may want to override this method with implementations that + may clear the buffers and reset pointers of the DataPipe. + The `reset` method is always called when `__iter__` is called as part of `hook_iterator`. + """ + pass + + +class DFIterDataPipe(IterDataPipe): + def _is_dfpipe(self): + return True + + +class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta): + r""" + Map-style DataPipe. + + All datasets that represent a map from keys to data samples should subclass this. + Subclasses should overwrite :meth:`__getitem__`, supporting fetching a + data sample for a given, unique key. Subclasses can also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. + + These DataPipes can be invoked in two ways, using the class constructor or applying their + functional form onto an existing `MapDataPipe` (recommend, available to most but not all DataPipes). + + Note: + :class:`~torch.utils.data.DataLoader` by default constructs an index + sampler that yields integral indices. To make it work with a map-style + DataPipe with non-integral indices/keys, a custom sampler must be provided. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper, Mapper + >>> dp = SequenceWrapper(range(10)) + >>> map_dp_1 = dp.map(lambda x: x + 1) # Using functional form (recommended) + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) # Using class constructor + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> batch_dp = map_dp_1.batch(batch_size=2) + >>> list(batch_dp) + [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] + """ + + functions: Dict[str, Callable] = {} + reduce_ex_hook: Optional[Callable] = None + getstate_hook: Optional[Callable] = None + str_hook: Optional[Callable] = None + repr_hook: Optional[Callable] = None + + def __getattr__(self, attribute_name): + if attribute_name in MapDataPipe.functions: + if attribute_name in _map_deprecated_functional_names: + kwargs = _map_deprecated_functional_names[attribute_name] + _deprecation_warning(**kwargs) + f = MapDataPipe.functions[attribute_name] + function = functools.partial(f, self) + functools.update_wrapper(wrapper=function, wrapped=f, assigned=("__doc__",)) + return function + else: + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attribute_name}") + + @classmethod + def register_function(cls, function_name, function): + cls.functions[function_name] = function + + @classmethod + def register_datapipe_as_function(cls, function_name, cls_to_register): + if function_name in cls.functions: + raise Exception(f"Unable to add DataPipe function name {function_name} as it is already taken") + + def class_function(cls, source_dp, *args, **kwargs): + result_pipe = cls(source_dp, *args, **kwargs) + return result_pipe + + function = functools.partial(class_function, cls_to_register) + functools.update_wrapper( + wrapper=function, wrapped=cls_to_register, assigned=("__doc__",) + ) + cls.functions[function_name] = function + + def __getstate__(self): + """ + Serialize `lambda` functions when `dill` is available. + + If this doesn't cover your custom DataPipe's use case, consider writing custom methods for + `__getstate__` and `__setstate__`, or use `pickle.dumps` for serialization. + """ + state = self.__dict__ + if MapDataPipe.getstate_hook is not None: + return MapDataPipe.getstate_hook(state) + return state + + def __reduce_ex__(self, *args, **kwargs): + if MapDataPipe.reduce_ex_hook is not None: + try: + return MapDataPipe.reduce_ex_hook(self) + except NotImplementedError: + pass + return super().__reduce_ex__(*args, **kwargs) + + @classmethod + def set_getstate_hook(cls, hook_fn): + if MapDataPipe.getstate_hook is not None and hook_fn is not None: + raise Exception("Attempt to override existing getstate_hook") + MapDataPipe.getstate_hook = hook_fn + + @classmethod + def set_reduce_ex_hook(cls, hook_fn): + if MapDataPipe.reduce_ex_hook is not None and hook_fn is not None: + raise Exception("Attempt to override existing reduce_ex_hook") + MapDataPipe.reduce_ex_hook = hook_fn + + def __repr__(self): + if self.repr_hook is not None: + return self.repr_hook(self) + # Instead of showing , return the class name + return str(self.__class__.__qualname__) + + def __str__(self): + if self.str_hook is not None: + return self.str_hook(self) + # Instead of showing , return the class name + return str(self.__class__.__qualname__) + + def __dir__(self): + # for auto-completion in a REPL (e.g. Jupyter notebook) + return list(super().__dir__()) + list(self.functions.keys()) + + + +class _DataPipeSerializationWrapper: + def __init__(self, datapipe): + self._datapipe = datapipe + + def __getstate__(self): + use_dill = False + try: + value = pickle.dumps(self._datapipe) + except Exception: + if HAS_DILL: + value = dill.dumps(self._datapipe) + use_dill = True + else: + raise + return (value, use_dill) + + def __setstate__(self, state): + value, use_dill = state + if use_dill: + self._datapipe = dill.loads(value) + else: + self._datapipe = pickle.loads(value) + + def __len__(self): + try: + return len(self._datapipe) + except Exception as e: + raise TypeError( + f"{type(self).__name__} instance doesn't have valid length" + ) from e + + +class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe): + def __init__(self, datapipe: IterDataPipe[T_co]): + super().__init__(datapipe) + self._datapipe_iter: Optional[Iterator[T_co]] = None + + def __iter__(self) -> "_IterDataPipeSerializationWrapper": + self._datapipe_iter = iter(self._datapipe) + return self + + def __next__(self) -> T_co: # type: ignore[type-var] + assert self._datapipe_iter is not None + return next(self._datapipe_iter) + + +class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe): + def __getitem__(self, idx): + return self._datapipe[idx] + + +class DataChunk(list, Generic[T]): + def __init__(self, items): + super().__init__(items) + self.items = items + + def as_str(self, indent=''): + res = indent + "[" + ", ".join(str(i) for i in iter(self)) + "]" + return res + + def __iter__(self) -> Iterator[T]: + yield from super().__iter__() + + def raw_iterator(self) -> T: # type: ignore[misc] + yield from self.items diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi new file mode 100644 index 0000000000000000000000000000000000000000..34e80bcb95f5e487c8fd7c9e8dcb01db56307bc5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/datapipe.pyi @@ -0,0 +1,689 @@ +# This base template ("datapipe.pyi.in") is generated from mypy stubgen with minimal editing for code injection +# The output file will be "datapipe.pyi". This is executed as part of torch/CMakeLists.txt +# Note that, for mypy, .pyi file takes precedent over .py file, such that we must define the interface for other +# classes/objects here, even though we are not injecting extra code into them at the moment. + +from typing import Any, Callable, Dict, Generic, Iterator, List, Literal, Optional, TypeVar, Union + +from torch.utils.data import Dataset, default_collate, IterableDataset +from torch.utils.data.datapipes._hook_iterator import _SnapshotState +from torch.utils.data.datapipes._typing import _DataPipeMeta, _IterDataPipeMeta + +T_co = TypeVar("T_co", covariant=True) +T = TypeVar("T") +UNTRACABLE_DATAFRAME_PIPES: Any + +class MapDataPipe(Dataset[T_co], metaclass=_DataPipeMeta): + functions: Dict[str, Callable] = ... + reduce_ex_hook: Optional[Callable] = ... + getstate_hook: Optional[Callable] = ... + str_hook: Optional[Callable] = ... + repr_hook: Optional[Callable] = ... + def __getattr__(self, attribute_name: Any): ... + @classmethod + def register_function(cls, function_name: Any, function: Any) -> None: ... + @classmethod + def register_datapipe_as_function( + cls, + function_name: Any, + cls_to_register: Any, + ): ... + def __getstate__(self): ... + def __reduce_ex__(self, *args: Any, **kwargs: Any): ... + @classmethod + def set_getstate_hook(cls, hook_fn: Any) -> None: ... + @classmethod + def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ... + # Functional form of 'BatcherMapDataPipe' + def batch(self, batch_size: int, drop_last: bool = False, wrapper_class=DataChunk) -> MapDataPipe: + r""" + Create mini-batches of data (functional name: ``batch``). + + An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, + or ``length % batch_size`` for the last batch if ``drop_last`` is set to ``False``. + + Args: + datapipe: Iterable DataPipe being batched + batch_size: The size of each batch + drop_last: Option to drop the last batch if it's not full + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> batch_dp = dp.batch(batch_size=2) + >>> list(batch_dp) + [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] + """ + + # Functional form of 'ConcaterMapDataPipe' + def concat(self, *datapipes: MapDataPipe) -> MapDataPipe: + r""" + Concatenate multiple Map DataPipes (functional name: ``concat``). + + The new index of is the cumulative sum of source DataPipes. + For example, if there are 2 source DataPipes both with length 5, + index 0 to 4 of the resulting `ConcatMapDataPipe` would refer to + elements of the first DataPipe, and 5 to 9 would refer to elements + of the second DataPipe. + + Args: + datapipes: Map DataPipes being concatenated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(3)) + >>> concat_dp = dp1.concat(dp2) + >>> list(concat_dp) + [0, 1, 2, 0, 1, 2] + """ + + # Functional form of 'MapperMapDataPipe' + def map(self, fn: Callable= ...) -> MapDataPipe: + r""" + Apply the input function over each item from the source DataPipe (functional name: ``map``). + + The function can be any regular Python function or partial object. Lambda + function is not recommended as it is not supported by pickle. + + Args: + datapipe: Source MapDataPipe + fn: Function being applied to each item + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper, Mapper + >>> def add_one(x): + ... return x + 1 + >>> dp = SequenceWrapper(range(10)) + >>> map_dp_1 = dp.map(add_one) + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + """ + + # Functional form of 'ShufflerIterDataPipe' + def shuffle(self, *, indices: Optional[List] = None) -> IterDataPipe: + r""" + Shuffle the input MapDataPipe via its indices (functional name: ``shuffle``). + + When it is used with :class:`~torch.utils.data.DataLoader`, the methods to + set up random seed are different based on :attr:`num_workers`. + + For single-process mode (:attr:`num_workers == 0`), the random seed is set before + the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process + mode (:attr:`num_worker > 0`), ``worker_init_fn`` is used to set up a random seed + for each worker process. + + Args: + datapipe: MapDataPipe being shuffled + indices: a list of indices of the MapDataPipe. If not provided, we assume it uses 0-based indexing + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> shuffle_dp = dp.shuffle().set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + >>> list(shuffle_dp) + [6, 1, 9, 5, 2, 4, 7, 3, 8, 0] + >>> # Reset seed for Shuffler + >>> shuffle_dp = shuffle_dp.set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + + Note: + Even thought this ``shuffle`` operation takes a ``MapDataPipe`` as the input, it would return an + ``IterDataPipe`` rather than a ``MapDataPipe``, because ``MapDataPipe`` should be non-sensitive to + the order of data order for the sake of random reads, but ``IterDataPipe`` depends on the order + of data during data-processing. + """ + + # Functional form of 'ZipperMapDataPipe' + def zip(self, *datapipes: MapDataPipe[T_co]) -> MapDataPipe: + r""" + Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``). + + This MataPipe is out of bound as soon as the shortest input DataPipe is exhausted. + + Args: + *datapipes: Map DataPipes being aggregated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(10, 13)) + >>> zip_dp = dp1.zip(dp2) + >>> list(zip_dp) + [(0, 10), (1, 11), (2, 12)] + """ + + +class IterDataPipe(IterableDataset[T_co], metaclass=_IterDataPipeMeta): + functions: Dict[str, Callable] = ... + reduce_ex_hook: Optional[Callable] = ... + getstate_hook: Optional[Callable] = ... + str_hook: Optional[Callable] = ... + repr_hook: Optional[Callable] = ... + _number_of_samples_yielded: int = ... + _snapshot_state: _SnapshotState = _SnapshotState.Iterating + _fast_forward_iterator: Optional[Iterator] = ... + def __getattr__(self, attribute_name: Any): ... + @classmethod + def register_function(cls, function_name: Any, function: Any) -> None: ... + @classmethod + def register_datapipe_as_function( + cls, + function_name: Any, + cls_to_register: Any, + enable_df_api_tracing: bool = ..., + ): ... + def __getstate__(self): ... + def __reduce_ex__(self, *args: Any, **kwargs: Any): ... + @classmethod + def set_getstate_hook(cls, hook_fn: Any) -> None: ... + @classmethod + def set_reduce_ex_hook(cls, hook_fn: Any) -> None: ... + # Functional form of 'BatcherIterDataPipe' + def batch(self, batch_size: int, drop_last: bool = False, wrapper_class=DataChunk) -> IterDataPipe: + r""" + Creates mini-batches of data (functional name: ``batch``). + + An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, or ``length % batch_size`` for the + last batch if ``drop_last`` is set to ``False``. + + Args: + datapipe: Iterable DataPipe being batched + batch_size: The size of each batch + drop_last: Option to drop the last batch if it's not full + wrapper_class: wrapper to apply onto each batch (type ``List``) before yielding, + defaults to ``DataChunk`` + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp = IterableWrapper(range(10)) + >>> dp = dp.batch(batch_size=3, drop_last=True) + >>> list(dp) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + """ + + # Functional form of 'CollatorIterDataPipe' + def collate(self, conversion: Optional[Union[Callable[..., Any],Dict[Union[str, Any], Union[Callable, Any]],]] = default_collate, collate_fn: Optional[Callable] = None) -> IterDataPipe: + r""" + Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: ``collate``). + + By default, it uses :func:`torch.utils.data.default_collate`. + + .. note:: + While writing a custom collate function, you can import :func:`torch.utils.data.default_collate` for the + default behavior and `functools.partial` to specify any additional arguments. + + Args: + datapipe: Iterable DataPipe being collated + collate_fn: Customized collate function to collect and combine data or a batch of data. + Default function collates to Tensor(s) based on data type. + + Example: + >>> # xdoctest: +SKIP + >>> # Convert integer data to float Tensor + >>> class MyIterDataPipe(torch.utils.data.IterDataPipe): + ... def __init__(self, start, end): + ... super(MyIterDataPipe).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... return iter(range(self.start, self.end)) + ... + ... def __len__(self): + ... return self.end - self.start + ... + >>> ds = MyIterDataPipe(start=3, end=7) + >>> print(list(ds)) + [3, 4, 5, 6] + >>> def collate_fn(batch): + ... return torch.tensor(batch, dtype=torch.float) + ... + >>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn) + >>> print(list(collated_ds)) + [tensor(3.), tensor(4.), tensor(5.), tensor(6.)] + """ + + # Functional form of 'ConcaterIterDataPipe' + def concat(self, *datapipes: IterDataPipe) -> IterDataPipe: + r""" + Concatenates multiple Iterable DataPipes (functional name: ``concat``). + + The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones. + + Args: + datapipes: Iterable DataPipes being concatenated + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> import random + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1 = IterableWrapper(range(3)) + >>> dp2 = IterableWrapper(range(5)) + >>> list(dp1.concat(dp2)) + [0, 1, 2, 0, 1, 2, 3, 4] + """ + + # Functional form of 'DemultiplexerIterDataPipe' + def demux(self, num_instances: int, classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool = False, buffer_size: int = 1000) -> List[IterDataPipe]: + r""" + Splits the input DataPipe into multiple child DataPipes, using the given classification function (functional name: ``demux``). + + A list of the child DataPipes is returned from this operation. + + Args: + datapipe: Iterable DataPipe being filtered + num_instances: number of instances of the DataPipe to create + classifier_fn: a function that maps values to an integer within the range ``[0, num_instances - 1]`` or ``None`` + drop_none: defaults to ``False``, if ``True``, the function will skip over elements classified as ``None`` + buffer_size: this defines the maximum number of inputs that the buffer can hold across all child + DataPipes while waiting for their values to be yielded. + Defaults to ``1000``. Use ``-1`` for the unlimited buffer. + + Examples: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def odd_or_even(n): + ... return n % 2 + >>> source_dp = IterableWrapper(range(5)) + >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even) + >>> list(dp1) + [0, 2, 4] + >>> list(dp2) + [1, 3] + >>> # It can also filter out any element that gets `None` from the `classifier_fn` + >>> def odd_or_even_no_zero(n): + ... return n % 2 if n != 0 else None + >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even_no_zero, drop_none=True) + >>> list(dp1) + [2, 4] + >>> list(dp2) + [1, 3] + """ + + # Functional form of 'FilterIterDataPipe' + def filter(self, filter_fn: Callable, input_col=None) -> IterDataPipe: + r""" + Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``). + + Args: + datapipe: Iterable DataPipe being filtered + filter_fn: Customized function mapping an element to a boolean. + input_col: Index or indices of data which ``filter_fn`` is applied, such as: + + - ``None`` as default to apply ``filter_fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def is_even(n): + ... return n % 2 == 0 + >>> dp = IterableWrapper(range(5)) + >>> filter_dp = dp.filter(filter_fn=is_even) + >>> list(filter_dp) + [0, 2, 4] + """ + + # Functional form of 'ForkerIterDataPipe' + def fork(self, num_instances: int, buffer_size: int = 1000, copy: Optional[Literal["shallow", "deep"]] = None) -> List[IterDataPipe]: + r""" + Creates multiple instances of the same Iterable DataPipe (functional name: ``fork``). + + Args: + datapipe: Iterable DataPipe being copied + num_instances: number of instances of the datapipe to create + buffer_size: this restricts how far ahead the leading child DataPipe + can read relative to the slowest child DataPipe. + Defaults to ``1000``. Use ``-1`` for the unlimited buffer. + copy: copy strategy to use for items yielded by each branch. Supported + options are ``None`` for no copying, ``"shallow"`` for shallow object + copies, and ``"deep"`` for deep object copies. Defaults to ``None``. + + Note: + All branches of the forked pipeline return the identical object unless + the copy parameter is supplied. If the object is mutable or contains + mutable objects, changing them in one branch will affect all others. + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> source_dp = IterableWrapper(range(5)) + >>> dp1, dp2 = source_dp.fork(num_instances=2) + >>> list(dp1) + [0, 1, 2, 3, 4] + >>> list(dp2) + [0, 1, 2, 3, 4] + """ + + # Functional form of 'GrouperIterDataPipe' + def groupby(self, group_key_fn: Callable[[T_co], Any], *, keep_key: bool = False, buffer_size: int = 10000, group_size: Optional[int] = None, guaranteed_group_size: Optional[int] = None, drop_remaining: bool = False) -> IterDataPipe: + r""" + Groups data from IterDataPipe by keys from ``group_key_fn``, yielding a ``DataChunk`` with batch size up to ``group_size``. + + (functional name: ``groupby``). + + The samples are read sequentially from the source ``datapipe``, and a batch of samples belonging to the same group + will be yielded as soon as the size of the batch reaches ``group_size``. When the buffer is full, + the DataPipe will yield the largest batch with the same key, provided that its size is larger + than ``guaranteed_group_size``. If its size is smaller, it will be dropped if ``drop_remaining=True``. + + After iterating through the entirety of source ``datapipe``, everything not dropped due to the buffer capacity + will be yielded from the buffer, even if the group sizes are smaller than ``guaranteed_group_size``. + + Args: + datapipe: Iterable datapipe to be grouped + group_key_fn: Function used to generate group key from the data of the source datapipe + keep_key: Option to yield the matching key along with the items in a tuple, + resulting in `(key, [items])` otherwise returning [items] + buffer_size: The size of buffer for ungrouped data + group_size: The max size of each group, a batch is yielded as soon as it reaches this size + guaranteed_group_size: The guaranteed minimum group size to be yielded in case the buffer is full + drop_remaining: Specifies if the group smaller than ``guaranteed_group_size`` will be dropped from buffer + when the buffer is full + + Example: + >>> import os + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def group_fn(file): + ... return os.path.basename(file).split(".")[0] + >>> source_dp = IterableWrapper(["a.png", "b.png", "a.json", "b.json", "a.jpg", "c.json"]) + >>> dp0 = source_dp.groupby(group_key_fn=group_fn) + >>> list(dp0) + [['a.png', 'a.json', 'a.jpg'], ['b.png', 'b.json'], ['c.json']] + >>> # A group is yielded as soon as its size equals to `group_size` + >>> dp1 = source_dp.groupby(group_key_fn=group_fn, group_size=2) + >>> list(dp1) + [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']] + >>> # Scenario where `buffer` is full, and group 'a' needs to be yielded since its size > `guaranteed_group_size` + >>> dp2 = source_dp.groupby(group_key_fn=group_fn, buffer_size=3, group_size=3, guaranteed_group_size=2) + >>> list(dp2) + [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']] + """ + + # Functional form of 'FileListerIterDataPipe' + def list_files(self, masks: Union[str, List[str]] = '', *, recursive: bool = False, abspath: bool = False, non_deterministic: bool = False, length: int = -1) -> IterDataPipe: + r""" + Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory. + + Multiple root directories can be provided (functional name: ``list_files``). + + Args: + root: Root directory or a sequence of root directories + masks: Unix style filter string or string list for filtering file name(s) + recursive: Whether to return pathname from nested directories or not + abspath: Whether to return relative pathname or absolute pathname + non_deterministic: Whether to return pathname in sorted order or not. + If ``False``, the results yielded from each root directory will be sorted + length: Nominal length of the datapipe + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import FileLister + >>> dp = FileLister(root=".", recursive=True) + >>> list(dp) + ['example.py', './data/data.tar'] + """ + + # Functional form of 'MapperIterDataPipe' + def map(self, fn: Callable, input_col=None, output_col=None) -> IterDataPipe: + r""" + Applies a function over each item from the source DataPipe (functional name: ``map``). + + The function can be any regular Python function or partial object. Lambda + function is not recommended as it is not supported by pickle. + + Args: + datapipe: Source Iterable DataPipe + fn: Function being applied over each item + input_col: Index or indices of data which ``fn`` is applied, such as: + + - ``None`` as default to apply ``fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified + only when ``input_col`` is not ``None`` + + - ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with + multiple indices, the left-most one is used, and other indices will be removed. + - Integer is used for list/tuple. ``-1`` represents to append result at the end. + - Key is used for dict. New key is acceptable. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, Mapper + >>> def add_one(x): + ... return x + 1 + >>> dp = IterableWrapper(range(10)) + >>> map_dp_1 = dp.map(add_one) # Invocation via functional form is preferred + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> # We discourage the usage of `lambda` functions as they are not serializable with `pickle` + >>> # Use `functools.partial` or explicitly define the function instead + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + """ + + # Functional form of 'MultiplexerIterDataPipe' + def mux(self, *datapipes) -> IterDataPipe: + r""" + Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux``). + + As in, one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration, + and so on. It ends when the shortest input DataPipe is exhausted. + + Args: + datapipes: Iterable DataPipes that will take turn to yield their elements, until the shortest DataPipe is exhausted + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) + >>> list(dp1.mux(dp2, dp3)) + [0, 10, 20, 1, 11, 21, 2, 12, 22] + """ + + # Functional form of 'FileOpenerIterDataPipe' + def open_files(self, mode: str = 'r', encoding: Optional[str] = None, length: int = -1) -> IterDataPipe: + r""" + Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``open_files``). + + Args: + datapipe: Iterable datapipe that provides pathnames + mode: An optional string that specifies the mode in which + the file is opened by ``open()``. It defaults to ``r``, other options are + ``b`` for reading in binary mode and ``t`` for text mode. + encoding: An optional string that specifies the encoding of the + underlying file. It defaults to ``None`` to match the default encoding of ``open``. + length: Nominal length of the datapipe + + Note: + The opened file handles will be closed by Python's GC periodically. Users can choose + to close them explicitly. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader + >>> dp = FileLister(root=".").filter(lambda fname: fname.endswith('.txt')) + >>> dp = FileOpener(dp) + >>> dp = StreamReader(dp) + >>> list(dp) + [('./abc.txt', 'abc')] + """ + + # Functional form of 'StreamReaderIterDataPipe' + def read_from_stream(self, chunk=None) -> IterDataPipe: + r""" + Given IO streams and their label names, yield bytes with label name as tuple. + + (functional name: ``read_from_stream``). + + Args: + datapipe: Iterable DataPipe provides label/URL and byte stream + chunk: Number of bytes to be read from stream per iteration. + If ``None``, all bytes will be read until the EOF. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, StreamReader + >>> from io import StringIO + >>> dp = IterableWrapper([("alphabet", StringIO("abcde"))]) + >>> list(StreamReader(dp, chunk=1)) + [('alphabet', 'a'), ('alphabet', 'b'), ('alphabet', 'c'), ('alphabet', 'd'), ('alphabet', 'e')] + """ + + # Functional form of 'RoutedDecoderIterDataPipe' + def routed_decode(self, *handlers: Callable, key_fn: Callable= ...) -> IterDataPipe: + r""" + Decodes binary streams from input DataPipe, yields pathname and decoded data in a tuple. + + (functional name: ``routed_decode``) + + Args: + datapipe: Iterable datapipe that provides pathname and binary stream in tuples + handlers: Optional user defined decoder handlers. If ``None``, basic and image decoder + handlers will be set as default. If multiple handles are provided, the priority + order follows the order of handlers (the first handler has the top priority) + key_fn: Function for decoder to extract key from pathname to dispatch handlers. + Default is set to extract file extension from pathname + + Note: + When ``key_fn`` is specified returning anything other than extension, the default + handler will not work and users need to specify custom handler. Custom handler + could use regex to determine the eligibility to handle data. + """ + + # Functional form of 'ShardingFilterIterDataPipe' + def sharding_filter(self, sharding_group_filter=None) -> IterDataPipe: + r""" + Wrapper that allows DataPipe to be sharded (functional name: ``sharding_filter``). + + After ``apply_sharding`` is called, each instance of the DataPipe (on different workers) will have every `n`-th element of the + original DataPipe, where `n` equals to the number of instances. + + Args: + source_datapipe: Iterable DataPipe that will be sharded + """ + + # Functional form of 'ShufflerIterDataPipe' + def shuffle(self, *, buffer_size: int = 10000, unbatch_level: int = 0) -> IterDataPipe: + r""" + Shuffle the input DataPipe with a buffer (functional name: ``shuffle``). + + The buffer with ``buffer_size`` is filled with elements from the datapipe first. Then, + each item will be yielded from the buffer by reservoir sampling via iterator. + + ``buffer_size`` is required to be larger than ``0``. For ``buffer_size == 1``, the + datapipe is not shuffled. In order to fully shuffle all elements from datapipe, + ``buffer_size`` is required to be greater than or equal to the size of datapipe. + + When it is used with :class:`torch.utils.data.DataLoader`, the methods to + set up random seed are different based on :attr:`num_workers`. + + For single-process mode (:attr:`num_workers == 0`), the random seed is set before + the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process + mode (:attr:`num_worker > 0`), `worker_init_fn` is used to set up a random seed + for each worker process. + + Args: + datapipe: The IterDataPipe being shuffled + buffer_size: The buffer size for shuffling (default to ``10000``) + unbatch_level: Specifies if it is necessary to unbatch source data before + applying the shuffle + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp = IterableWrapper(range(10)) + >>> shuffle_dp = dp.shuffle() + >>> list(shuffle_dp) + [0, 4, 1, 6, 3, 2, 9, 5, 7, 8] + """ + + # Functional form of 'UnBatcherIterDataPipe' + def unbatch(self, unbatch_level: int = 1) -> IterDataPipe: + r""" + Undos batching of data (functional name: ``unbatch``). + + In other words, it flattens the data up to the specified level within a batched DataPipe. + + Args: + datapipe: Iterable DataPipe being un-batched + unbatch_level: Defaults to ``1`` (only flattening the top level). If set to ``2``, + it will flatten the top two levels, and ``-1`` will flatten the entire DataPipe. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> source_dp = IterableWrapper([[[0, 1], [2]], [[3, 4], [5]], [[6]]]) + >>> dp1 = source_dp.unbatch() + >>> list(dp1) + [[0, 1], [2], [3, 4], [5], [6]] + >>> dp2 = source_dp.unbatch(unbatch_level=2) + >>> list(dp2) + [0, 1, 2, 3, 4, 5, 6] + """ + + # Functional form of 'ZipperIterDataPipe' + def zip(self, *datapipes: IterDataPipe) -> IterDataPipe: + r""" + Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``). + + The output is stopped as soon as the shortest input DataPipe is exhausted. + + Args: + *datapipes: Iterable DataPipes being aggregated + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) + >>> list(dp1.zip(dp2, dp3)) + [(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)] + """ + + +class DFIterDataPipe(IterDataPipe): + def _is_dfpipe(self): ... + def __iter__(self): ... + +class _DataPipeSerializationWrapper: + def __init__(self, datapipe): ... + def __getstate__(self): ... + def __setstate__(self, state): ... + def __len__(self): ... + +class _IterDataPipeSerializationWrapper(_DataPipeSerializationWrapper, IterDataPipe): + def __iter__(self): ... + +class _MapDataPipeSerializationWrapper(_DataPipeSerializationWrapper, MapDataPipe): + def __getitem__(self, idx): ... + +class DataChunk(list, Generic[T]): + def __init__(self, items): + super().__init__(items) + self.items = items + def as_str(self, indent: str = "") -> str: + res = indent + "[" + ", ".join(str(i) for i in iter(self)) + "]" + return res + def __iter__(self) -> Iterator[T]: + yield from super().__iter__() + def raw_iterator(self) -> T: # type: ignore[misc] + yield from self.items diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/gen_pyi.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/gen_pyi.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f8a801bd0763d206926d50bb513be6b9d3135d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/gen_pyi.py @@ -0,0 +1,246 @@ +import os +import pathlib +from collections import defaultdict +from typing import Any, Dict, List, Set, Tuple, Union + + +def materialize_lines(lines: List[str], indentation: int) -> str: + output = "" + new_line_with_indent = "\n" + " " * indentation + for i, line in enumerate(lines): + if i != 0: + output += new_line_with_indent + output += line.replace('\n', new_line_with_indent) + return output + + +def gen_from_template(dir: str, template_name: str, output_name: str, replacements: List[Tuple[str, Any, int]]): + + template_path = os.path.join(dir, template_name) + output_path = os.path.join(dir, output_name) + + with open(template_path) as f: + content = f.read() + for placeholder, lines, indentation in replacements: + with open(output_path, "w") as f: + content = content.replace(placeholder, materialize_lines(lines, indentation)) + f.write(content) + + +def find_file_paths(dir_paths: List[str], files_to_exclude: Set[str]) -> Set[str]: + """ + When given a path to a directory, returns the paths to the relevant files within it. + + This function does NOT recursive traverse to subdirectories. + """ + paths: Set[str] = set() + for dir_path in dir_paths: + all_files = os.listdir(dir_path) + python_files = {fname for fname in all_files if ".py" == fname[-3:]} + filter_files = {fname for fname in python_files if fname not in files_to_exclude} + paths.update({os.path.join(dir_path, fname) for fname in filter_files}) + return paths + + +def extract_method_name(line: str) -> str: + """Extract method name from decorator in the form of "@functional_datapipe({method_name})".""" + if "(\"" in line: + start_token, end_token = "(\"", "\")" + elif "(\'" in line: + start_token, end_token = "(\'", "\')" + else: + raise RuntimeError(f"Unable to find appropriate method name within line:\n{line}") + start, end = line.find(start_token) + len(start_token), line.find(end_token) + return line[start:end] + + +def extract_class_name(line: str) -> str: + """Extract class name from class definition in the form of "class {CLASS_NAME}({Type}):".""" + start_token = "class " + end_token = "(" + start, end = line.find(start_token) + len(start_token), line.find(end_token) + return line[start:end] + + +def parse_datapipe_file(file_path: str) -> Tuple[Dict[str, str], Dict[str, str], Set[str], Dict[str, List[str]]]: + """Given a path to file, parses the file and returns a dictionary of method names to function signatures.""" + method_to_signature, method_to_class_name, special_output_type = {}, {}, set() + doc_string_dict = defaultdict(list) + with open(file_path) as f: + open_paren_count = 0 + method_name, class_name, signature = "", "", "" + skip = False + for line in f: + if line.count("\"\"\"") % 2 == 1: + skip = not skip + if skip or "\"\"\"" in line: # Saving docstrings + doc_string_dict[method_name].append(line) + continue + if "@functional_datapipe" in line: + method_name = extract_method_name(line) + doc_string_dict[method_name] = [] + continue + if method_name and "class " in line: + class_name = extract_class_name(line) + continue + if method_name and ("def __init__(" in line or "def __new__(" in line): + if "def __new__(" in line: + special_output_type.add(method_name) + open_paren_count += 1 + start = line.find("(") + len("(") + line = line[start:] + if open_paren_count > 0: + open_paren_count += line.count('(') + open_paren_count -= line.count(')') + if open_paren_count == 0: + end = line.rfind(')') + signature += line[:end] + method_to_signature[method_name] = process_signature(signature) + method_to_class_name[method_name] = class_name + method_name, class_name, signature = "", "", "" + elif open_paren_count < 0: + raise RuntimeError("open parenthesis count < 0. This shouldn't be possible.") + else: + signature += line.strip('\n').strip(' ') + return method_to_signature, method_to_class_name, special_output_type, doc_string_dict + + +def parse_datapipe_files(file_paths: Set[str]) -> Tuple[Dict[str, str], Dict[str, str], Set[str], Dict[str, List[str]]]: + methods_and_signatures, methods_and_class_names, methods_with_special_output_types = {}, {}, set() + methods_and_doc_strings = {} + for path in file_paths: + ( + method_to_signature, + method_to_class_name, + methods_needing_special_output_types, + doc_string_dict, + ) = parse_datapipe_file(path) + methods_and_signatures.update(method_to_signature) + methods_and_class_names.update(method_to_class_name) + methods_with_special_output_types.update(methods_needing_special_output_types) + methods_and_doc_strings.update(doc_string_dict) + return methods_and_signatures, methods_and_class_names, methods_with_special_output_types, methods_and_doc_strings + + +def split_outside_bracket(line: str, delimiter: str = ",") -> List[str]: + """Given a line of text, split it on comma unless the comma is within a bracket '[]'.""" + bracket_count = 0 + curr_token = "" + res = [] + for char in line: + if char == "[": + bracket_count += 1 + elif char == "]": + bracket_count -= 1 + elif char == delimiter and bracket_count == 0: + res.append(curr_token) + curr_token = "" + continue + curr_token += char + res.append(curr_token) + return res + + +def process_signature(line: str) -> str: + """ + Clean up a given raw function signature. + + This includes removing the self-referential datapipe argument, default + arguments of input functions, newlines, and spaces. + """ + tokens: List[str] = split_outside_bracket(line) + for i, token in enumerate(tokens): + tokens[i] = token.strip(' ') + if token == "cls": + tokens[i] = "self" + elif i > 0 and ("self" == tokens[i - 1]) and (tokens[i][0] != "*"): + # Remove the datapipe after 'self' or 'cls' unless it has '*' + tokens[i] = "" + elif "Callable =" in token: # Remove default argument if it is a function + head, default_arg = token.rsplit("=", 2) + tokens[i] = head.strip(' ') + "= ..." + tokens = [t for t in tokens if t != ""] + line = ', '.join(tokens) + return line + + +def get_method_definitions(file_path: Union[str, List[str]], + files_to_exclude: Set[str], + deprecated_files: Set[str], + default_output_type: str, + method_to_special_output_type: Dict[str, str], + root: str = "") -> List[str]: + """ + #.pyi generation for functional DataPipes Process. + + # 1. Find files that we want to process (exclude the ones who don't) + # 2. Parse method name and signature + # 3. Remove first argument after self (unless it is "*datapipes"), default args, and spaces + """ + if root == "": + root = str(pathlib.Path(__file__).parent.resolve()) + file_path = [file_path] if isinstance(file_path, str) else file_path + file_path = [os.path.join(root, path) for path in file_path] + file_paths = find_file_paths(file_path, + files_to_exclude=files_to_exclude.union(deprecated_files)) + methods_and_signatures, methods_and_class_names, methods_w_special_output_types, methods_and_doc_strings = \ + parse_datapipe_files(file_paths) + + for fn_name in method_to_special_output_type: + if fn_name not in methods_w_special_output_types: + methods_w_special_output_types.add(fn_name) + + method_definitions = [] + for method_name, arguments in methods_and_signatures.items(): + class_name = methods_and_class_names[method_name] + if method_name in methods_w_special_output_types: + output_type = method_to_special_output_type[method_name] + else: + output_type = default_output_type + doc_string = "".join(methods_and_doc_strings[method_name]) + if doc_string == "": + doc_string = " ...\n" + method_definitions.append(f"# Functional form of '{class_name}'\n" + f"def {method_name}({arguments}) -> {output_type}:\n" + f"{doc_string}") + method_definitions.sort(key=lambda s: s.split('\n')[1]) # sorting based on method_name + + return method_definitions + + +# Defined outside of main() so they can be imported by TorchData +iterDP_file_path: str = "iter" +iterDP_files_to_exclude: Set[str] = {"__init__.py", "utils.py"} +iterDP_deprecated_files: Set[str] = set() +iterDP_method_to_special_output_type: Dict[str, str] = {"demux": "List[IterDataPipe]", "fork": "List[IterDataPipe]"} + +mapDP_file_path: str = "map" +mapDP_files_to_exclude: Set[str] = {"__init__.py", "utils.py"} +mapDP_deprecated_files: Set[str] = set() +mapDP_method_to_special_output_type: Dict[str, str] = {"shuffle": "IterDataPipe"} + + +def main() -> None: + """ + # Inject file into template datapipe.pyi.in. + + TODO: The current implementation of this script only generates interfaces for built-in methods. To generate + interface for user-defined DataPipes, consider changing `IterDataPipe.register_datapipe_as_function`. + """ + iter_method_definitions = get_method_definitions(iterDP_file_path, iterDP_files_to_exclude, iterDP_deprecated_files, + "IterDataPipe", iterDP_method_to_special_output_type) + + map_method_definitions = get_method_definitions(mapDP_file_path, mapDP_files_to_exclude, mapDP_deprecated_files, + "MapDataPipe", mapDP_method_to_special_output_type) + + path = pathlib.Path(__file__).parent.resolve() + replacements = [('${IterDataPipeMethods}', iter_method_definitions, 4), + ('${MapDataPipeMethods}', map_method_definitions, 4)] + gen_from_template(dir=str(path), + template_name="datapipe.pyi.in", + output_name="datapipe.pyi", + replacements=replacements) + + +if __name__ == '__main__': + main() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/dataset.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..554bf90d108bdd4e76e1e0e001be960dc9b41255 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/dataset.py @@ -0,0 +1,488 @@ +import bisect +import itertools +import math +import warnings +from typing import ( + cast, + Dict, + Generic, + Iterable, + List, + Optional, + Sequence, + Tuple, + TypeVar, + Union, +) + +# No 'default_generator' in torch/__init__.pyi +from torch import default_generator, randperm + +from ... import Generator, Tensor + +__all__ = [ + "Dataset", + "IterableDataset", + "TensorDataset", + "StackDataset", + "ConcatDataset", + "ChainDataset", + "Subset", + "random_split", +] + +T_co = TypeVar("T_co", covariant=True) +T = TypeVar("T") +T_dict = Dict[str, T_co] +T_tuple = Tuple[T_co, ...] +T_stack = TypeVar("T_stack", T_tuple, T_dict) + + +class Dataset(Generic[T_co]): + r"""An abstract class representing a :class:`Dataset`. + + All datasets that represent a map from keys to data samples should subclass + it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a + data sample for a given key. Subclasses could also optionally overwrite + :meth:`__len__`, which is expected to return the size of the dataset by many + :class:`~torch.utils.data.Sampler` implementations and the default options + of :class:`~torch.utils.data.DataLoader`. Subclasses could also + optionally implement :meth:`__getitems__`, for speedup batched samples + loading. This method accepts list of indices of samples of batch and returns + list of samples. + + .. note:: + :class:`~torch.utils.data.DataLoader` by default constructs an index + sampler that yields integral indices. To make it work with a map-style + dataset with non-integral indices/keys, a custom sampler must be provided. + """ + + def __getitem__(self, index) -> T_co: + raise NotImplementedError("Subclasses of Dataset should implement __getitem__.") + + # def __getitems__(self, indices: List) -> List[T_co]: + # Not implemented to prevent false-positives in fetcher check in + # torch.utils.data._utils.fetch._MapDatasetFetcher + + def __add__(self, other: "Dataset[T_co]") -> "ConcatDataset[T_co]": + return ConcatDataset([self, other]) + + # No `def __len__(self)` default? + # See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + # in pytorch/torch/utils/data/sampler.py + + +class IterableDataset(Dataset[T_co], Iterable[T_co]): + r"""An iterable Dataset. + + All datasets that represent an iterable of data samples should subclass it. + Such form of datasets is particularly useful when data come from a stream. + + All subclasses should overwrite :meth:`__iter__`, which would return an + iterator of samples in this dataset. + + When a subclass is used with :class:`~torch.utils.data.DataLoader`, each + item in the dataset will be yielded from the :class:`~torch.utils.data.DataLoader` + iterator. When :attr:`num_workers > 0`, each worker process will have a + different copy of the dataset object, so it is often desired to configure + each copy independently to avoid having duplicate data returned from the + workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker + process, returns information about the worker. It can be used in either the + dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's + :attr:`worker_init_fn` option to modify each copy's behavior. + + Example 1: splitting workload across all workers in :meth:`__iter__`:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER) + >>> # xdoctest: +SKIP("Fails on MacOS12") + >>> class MyIterableDataset(torch.utils.data.IterableDataset): + ... def __init__(self, start, end): + ... super(MyIterableDataset).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... worker_info = torch.utils.data.get_worker_info() + ... if worker_info is None: # single-process data loading, return the full iterator + ... iter_start = self.start + ... iter_end = self.end + ... else: # in a worker process + ... # split workload + ... per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers))) + ... worker_id = worker_info.id + ... iter_start = self.start + worker_id * per_worker + ... iter_end = min(iter_start + per_worker, self.end) + ... return iter(range(iter_start, iter_end)) + ... + >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6]. + >>> ds = MyIterableDataset(start=3, end=7) + + >>> # Single-process loading + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0))) + [tensor([3]), tensor([4]), tensor([5]), tensor([6])] + + >>> # xdoctest: +REQUIRES(POSIX) + >>> # Mult-process loading with two worker processes + >>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6]. + >>> # xdoctest: +IGNORE_WANT("non deterministic") + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2))) + [tensor([3]), tensor([5]), tensor([4]), tensor([6])] + + >>> # With even more workers + >>> # xdoctest: +IGNORE_WANT("non deterministic") + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12))) + [tensor([3]), tensor([5]), tensor([4]), tensor([6])] + + Example 2: splitting workload across all workers using :attr:`worker_init_fn`:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER) + >>> class MyIterableDataset(torch.utils.data.IterableDataset): + ... def __init__(self, start, end): + ... super(MyIterableDataset).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... return iter(range(self.start, self.end)) + ... + >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6]. + >>> ds = MyIterableDataset(start=3, end=7) + + >>> # Single-process loading + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0))) + [3, 4, 5, 6] + >>> + >>> # Directly doing multi-process loading yields duplicate data + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2))) + [3, 3, 4, 4, 5, 5, 6, 6] + + >>> # Define a `worker_init_fn` that configures each dataset copy differently + >>> def worker_init_fn(worker_id): + ... worker_info = torch.utils.data.get_worker_info() + ... dataset = worker_info.dataset # the dataset copy in this worker process + ... overall_start = dataset.start + ... overall_end = dataset.end + ... # configure the dataset to only process the split workload + ... per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers))) + ... worker_id = worker_info.id + ... dataset.start = overall_start + worker_id * per_worker + ... dataset.end = min(dataset.start + per_worker, overall_end) + ... + + >>> # Mult-process loading with the custom `worker_init_fn` + >>> # Worker 0 fetched [3, 4]. Worker 1 fetched [5, 6]. + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn))) + [3, 5, 4, 6] + + >>> # With even more workers + >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12, worker_init_fn=worker_init_fn))) + [3, 4, 5, 6] + """ + + def __add__(self, other: Dataset[T_co]): + return ChainDataset([self, other]) + + # No `def __len__(self)` default? Subclasses raise `TypeError` when needed. + # See NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + + +class TensorDataset(Dataset[Tuple[Tensor, ...]]): + r"""Dataset wrapping tensors. + + Each sample will be retrieved by indexing tensors along the first dimension. + + Args: + *tensors (Tensor): tensors that have the same size of the first dimension. + """ + + tensors: Tuple[Tensor, ...] + + def __init__(self, *tensors: Tensor) -> None: + assert all( + tensors[0].size(0) == tensor.size(0) for tensor in tensors + ), "Size mismatch between tensors" + self.tensors = tensors + + def __getitem__(self, index): + return tuple(tensor[index] for tensor in self.tensors) + + def __len__(self): + return self.tensors[0].size(0) + + +class StackDataset(Dataset[T_stack]): + r"""Dataset as a stacking of multiple datasets. + + This class is useful to assemble different parts of complex input data, given as datasets. + + Example: + >>> # xdoctest: +SKIP + >>> images = ImageDataset() + >>> texts = TextDataset() + >>> tuple_stack = StackDataset(images, texts) + >>> tuple_stack[0] == (images[0], texts[0]) + >>> dict_stack = StackDataset(image=images, text=texts) + >>> dict_stack[0] == {'image': images[0], 'text': texts[0]} + + Args: + *args (Dataset): Datasets for stacking returned as tuple. + **kwargs (Dataset): Datasets for stacking returned as dict. + """ + + datasets: Union[tuple, dict] + + def __init__(self, *args: Dataset[T_co], **kwargs: Dataset[T_co]) -> None: + if args: + if kwargs: + raise ValueError( + "Supported either ``tuple``- (via ``args``) or" + "``dict``- (via ``kwargs``) like input/output, but both types are given." + ) + self._length = len(args[0]) # type: ignore[arg-type] + if any(self._length != len(dataset) for dataset in args): # type: ignore[arg-type] + raise ValueError("Size mismatch between datasets") + self.datasets = args + elif kwargs: + tmp = list(kwargs.values()) + self._length = len(tmp[0]) # type: ignore[arg-type] + if any(self._length != len(dataset) for dataset in tmp): # type: ignore[arg-type] + raise ValueError("Size mismatch between datasets") + self.datasets = kwargs + else: + raise ValueError("At least one dataset should be passed") + + def __getitem__(self, index): + if isinstance(self.datasets, dict): + return {k: dataset[index] for k, dataset in self.datasets.items()} + return tuple(dataset[index] for dataset in self.datasets) + + def __getitems__(self, indices: list): + # add batched sampling support when parent datasets supports it. + if isinstance(self.datasets, dict): + dict_batch: List[T_dict] = [{} for _ in indices] + for k, dataset in self.datasets.items(): + if callable(getattr(dataset, "__getitems__", None)): + items = dataset.__getitems__(indices) # type: ignore[attr-defined] + if len(items) != len(indices): + raise ValueError( + "Nested dataset's output size mismatch." + f" Expected {len(indices)}, got {len(items)}" + ) + for data, d_sample in zip(items, dict_batch): + d_sample[k] = data + else: + for idx, d_sample in zip(indices, dict_batch): + d_sample[k] = dataset[idx] + return dict_batch + + # tuple data + list_batch: List[list] = [[] for _ in indices] + for dataset in self.datasets: + if callable(getattr(dataset, "__getitems__", None)): + items = dataset.__getitems__(indices) # type: ignore[attr-defined] + if len(items) != len(indices): + raise ValueError( + "Nested dataset's output size mismatch." + f" Expected {len(indices)}, got {len(items)}" + ) + for data, t_sample in zip(items, list_batch): + t_sample.append(data) + else: + for idx, t_sample in zip(indices, list_batch): + t_sample.append(dataset[idx]) + tuple_batch: List[T_tuple] = [tuple(sample) for sample in list_batch] + return tuple_batch + + def __len__(self): + return self._length + + +class ConcatDataset(Dataset[T_co]): + r"""Dataset as a concatenation of multiple datasets. + + This class is useful to assemble different existing datasets. + + Args: + datasets (sequence): List of datasets to be concatenated + """ + + datasets: List[Dataset[T_co]] + cumulative_sizes: List[int] + + @staticmethod + def cumsum(sequence): + r, s = [], 0 + for e in sequence: + l = len(e) + r.append(l + s) + s += l + return r + + def __init__(self, datasets: Iterable[Dataset]) -> None: + super().__init__() + self.datasets = list(datasets) + assert len(self.datasets) > 0, "datasets should not be an empty iterable" # type: ignore[arg-type] + for d in self.datasets: + assert not isinstance( + d, IterableDataset + ), "ConcatDataset does not support IterableDataset" + self.cumulative_sizes = self.cumsum(self.datasets) + + def __len__(self): + return self.cumulative_sizes[-1] + + def __getitem__(self, idx): + if idx < 0: + if -idx > len(self): + raise ValueError( + "absolute value of index should not exceed dataset length" + ) + idx = len(self) + idx + dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if dataset_idx == 0: + sample_idx = idx + else: + sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] + return self.datasets[dataset_idx][sample_idx] + + @property + def cummulative_sizes(self): + warnings.warn( + "cummulative_sizes attribute is renamed to " "cumulative_sizes", + DeprecationWarning, + stacklevel=2, + ) + return self.cumulative_sizes + + +class ChainDataset(IterableDataset): + r"""Dataset for chaining multiple :class:`IterableDataset` s. + + This class is useful to assemble different existing dataset streams. The + chaining operation is done on-the-fly, so concatenating large-scale + datasets with this class will be efficient. + + Args: + datasets (iterable of IterableDataset): datasets to be chained together + """ + + def __init__(self, datasets: Iterable[Dataset]) -> None: + super().__init__() + self.datasets = datasets + + def __iter__(self): + for d in self.datasets: + assert isinstance( + d, IterableDataset + ), "ChainDataset only supports IterableDataset" + yield from d + + def __len__(self): + total = 0 + for d in self.datasets: + assert isinstance( + d, IterableDataset + ), "ChainDataset only supports IterableDataset" + total += len(d) # type: ignore[arg-type] + return total + + +class Subset(Dataset[T_co]): + r""" + Subset of a dataset at specified indices. + + Args: + dataset (Dataset): The whole Dataset + indices (sequence): Indices in the whole set selected for subset + """ + + dataset: Dataset[T_co] + indices: Sequence[int] + + def __init__(self, dataset: Dataset[T_co], indices: Sequence[int]) -> None: + self.dataset = dataset + self.indices = indices + + def __getitem__(self, idx): + if isinstance(idx, list): + return self.dataset[[self.indices[i] for i in idx]] + return self.dataset[self.indices[idx]] + + def __getitems__(self, indices: List[int]) -> List[T_co]: + # add batched sampling support when parent dataset supports it. + # see torch.utils.data._utils.fetch._MapDatasetFetcher + if callable(getattr(self.dataset, "__getitems__", None)): + return self.dataset.__getitems__([self.indices[idx] for idx in indices]) # type: ignore[attr-defined] + else: + return [self.dataset[self.indices[idx]] for idx in indices] + + def __len__(self): + return len(self.indices) + + +def random_split( + dataset: Dataset[T], + lengths: Sequence[Union[int, float]], + generator: Optional[Generator] = default_generator, +) -> List[Subset[T]]: + r""" + Randomly split a dataset into non-overlapping new datasets of given lengths. + + If a list of fractions that sum up to 1 is given, + the lengths will be computed automatically as + floor(frac * len(dataset)) for each fraction provided. + + After computing the lengths, if there are any remainders, 1 count will be + distributed in round-robin fashion to the lengths + until there are no remainders left. + + Optionally fix the generator for reproducible results, e.g.: + + Example: + >>> # xdoctest: +SKIP + >>> generator1 = torch.Generator().manual_seed(42) + >>> generator2 = torch.Generator().manual_seed(42) + >>> random_split(range(10), [3, 7], generator=generator1) + >>> random_split(range(30), [0.3, 0.3, 0.4], generator=generator2) + + Args: + dataset (Dataset): Dataset to be split + lengths (sequence): lengths or fractions of splits to be produced + generator (Generator): Generator used for the random permutation. + """ + if math.isclose(sum(lengths), 1) and sum(lengths) <= 1: + subset_lengths: List[int] = [] + for i, frac in enumerate(lengths): + if frac < 0 or frac > 1: + raise ValueError(f"Fraction at index {i} is not between 0 and 1") + n_items_in_split = int( + math.floor(len(dataset) * frac) # type: ignore[arg-type] + ) + subset_lengths.append(n_items_in_split) + remainder = len(dataset) - sum(subset_lengths) # type: ignore[arg-type] + # add 1 to all the lengths in round-robin fashion until the remainder is 0 + for i in range(remainder): + idx_to_add_at = i % len(subset_lengths) + subset_lengths[idx_to_add_at] += 1 + lengths = subset_lengths + for i, length in enumerate(lengths): + if length == 0: + warnings.warn( + f"Length of split at index {i} is 0. " + f"This might result in an empty dataset." + ) + + # Cannot verify that dataset is Sized + if sum(lengths) != len(dataset): # type: ignore[arg-type] + raise ValueError( + "Sum of input lengths does not equal the length of the input dataset!" + ) + + indices = randperm(sum(lengths), generator=generator).tolist() # type: ignore[arg-type, call-overload] + lengths = cast(Sequence[int], lengths) + return [ + Subset(dataset, indices[offset - length : offset]) + for offset, length in zip(itertools.accumulate(lengths), lengths) + ] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/distributed.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..3d2141b8430fc59a92a0a07466630708b8cb4be7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/distributed.py @@ -0,0 +1,137 @@ +import math +from typing import TypeVar, Optional, Iterator + +import torch +from . import Sampler, Dataset +import torch.distributed as dist + +__all__ = ["DistributedSampler", ] + +T_co = TypeVar('T_co', covariant=True) + + +class DistributedSampler(Sampler[T_co]): + r"""Sampler that restricts data loading to a subset of the dataset. + + It is especially useful in conjunction with + :class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each + process can pass a :class:`~torch.utils.data.DistributedSampler` instance as a + :class:`~torch.utils.data.DataLoader` sampler, and load a subset of the + original dataset that is exclusive to it. + + .. note:: + Dataset is assumed to be of constant size and that any instance of it always + returns the same elements in the same order. + + Args: + dataset: Dataset used for sampling. + num_replicas (int, optional): Number of processes participating in + distributed training. By default, :attr:`world_size` is retrieved from the + current distributed group. + rank (int, optional): Rank of the current process within :attr:`num_replicas`. + By default, :attr:`rank` is retrieved from the current distributed + group. + shuffle (bool, optional): If ``True`` (default), sampler will shuffle the + indices. + seed (int, optional): random seed used to shuffle the sampler if + :attr:`shuffle=True`. This number should be identical across all + processes in the distributed group. Default: ``0``. + drop_last (bool, optional): if ``True``, then the sampler will drop the + tail of the data to make it evenly divisible across the number of + replicas. If ``False``, the sampler will add extra indices to make + the data evenly divisible across the replicas. Default: ``False``. + + .. warning:: + In distributed mode, calling the :meth:`set_epoch` method at + the beginning of each epoch **before** creating the :class:`DataLoader` iterator + is necessary to make shuffling work properly across multiple epochs. Otherwise, + the same ordering will be always used. + + Example:: + + >>> # xdoctest: +SKIP + >>> sampler = DistributedSampler(dataset) if is_distributed else None + >>> loader = DataLoader(dataset, shuffle=(sampler is None), + ... sampler=sampler) + >>> for epoch in range(start_epoch, n_epochs): + ... if is_distributed: + ... sampler.set_epoch(epoch) + ... train(loader) + """ + + def __init__(self, dataset: Dataset, num_replicas: Optional[int] = None, + rank: Optional[int] = None, shuffle: bool = True, + seed: int = 0, drop_last: bool = False) -> None: + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + if rank >= num_replicas or rank < 0: + raise ValueError( + f"Invalid rank {rank}, rank should be in the interval [0, {num_replicas - 1}]") + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + self.drop_last = drop_last + # If the dataset length is evenly divisible by # of replicas, then there + # is no need to drop any data, since the dataset will be split equally. + if self.drop_last and len(self.dataset) % self.num_replicas != 0: # type: ignore[arg-type] + # Split to nearest available length that is evenly divisible. + # This is to ensure each rank receives the same amount of data when + # using this Sampler. + self.num_samples = math.ceil( + (len(self.dataset) - self.num_replicas) / self.num_replicas # type: ignore[arg-type] + ) + else: + self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) # type: ignore[arg-type] + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + self.seed = seed + + def __iter__(self) -> Iterator[T_co]: + if self.shuffle: + # deterministically shuffle based on epoch and seed + g = torch.Generator() + g.manual_seed(self.seed + self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type] + else: + indices = list(range(len(self.dataset))) # type: ignore[arg-type] + + if not self.drop_last: + # add extra samples to make it evenly divisible + padding_size = self.total_size - len(indices) + if padding_size <= len(indices): + indices += indices[:padding_size] + else: + indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size] + else: + # remove tail of data to make it evenly divisible. + indices = indices[:self.total_size] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self) -> int: + return self.num_samples + + def set_epoch(self, epoch: int) -> None: + r""" + Set the epoch for this sampler. + + When :attr:`shuffle=True`, this ensures all replicas + use a different random ordering for each epoch. Otherwise, the next iteration of this + sampler will yield the same ordering. + + Args: + epoch (int): Epoch number. + """ + self.epoch = epoch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/graph.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..cd78db474d5e06187b8b7ec63da6e33b7619f798 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/graph.py @@ -0,0 +1,149 @@ +import io +import pickle +import warnings + +from collections.abc import Collection +from typing import Dict, List, Optional, Set, Tuple, Type, Union + +from torch.utils.data import IterDataPipe, MapDataPipe +from torch.utils._import_utils import dill_available + + +__all__ = ["traverse", "traverse_dps"] + +DataPipe = Union[IterDataPipe, MapDataPipe] +DataPipeGraph = Dict[int, Tuple[DataPipe, "DataPipeGraph"]] # type: ignore[misc] + + +def _stub_unpickler(): + return "STUB" + + +# TODO(VitalyFedyunin): Make sure it works without dill module installed +def _list_connected_datapipes(scan_obj: DataPipe, only_datapipe: bool, cache: Set[int]) -> List[DataPipe]: + f = io.BytesIO() + p = pickle.Pickler(f) # Not going to work for lambdas, but dill infinite loops on typing and can't be used as is + if dill_available(): + from dill import Pickler as dill_Pickler + d = dill_Pickler(f) + else: + d = None + + captured_connections = [] + + def getstate_hook(ori_state): + state = None + if isinstance(ori_state, dict): + state = {} # type: ignore[assignment] + for k, v in ori_state.items(): + if isinstance(v, (IterDataPipe, MapDataPipe, Collection)): + state[k] = v # type: ignore[attr-defined] + elif isinstance(ori_state, (tuple, list)): + state = [] # type: ignore[assignment] + for v in ori_state: + if isinstance(v, (IterDataPipe, MapDataPipe, Collection)): + state.append(v) # type: ignore[attr-defined] + elif isinstance(ori_state, (IterDataPipe, MapDataPipe, Collection)): + state = ori_state # type: ignore[assignment] + return state + + def reduce_hook(obj): + if obj == scan_obj or id(obj) in cache: + raise NotImplementedError + else: + captured_connections.append(obj) + # Adding id to remove duplicate DataPipe serialized at the same level + cache.add(id(obj)) + return _stub_unpickler, () + + datapipe_classes: Tuple[Type[DataPipe]] = (IterDataPipe, MapDataPipe) # type: ignore[assignment] + + try: + for cls in datapipe_classes: + cls.set_reduce_ex_hook(reduce_hook) + if only_datapipe: + cls.set_getstate_hook(getstate_hook) + try: + p.dump(scan_obj) + except (pickle.PickleError, AttributeError, TypeError): + if dill_available(): + d.dump(scan_obj) + else: + raise + finally: + for cls in datapipe_classes: + cls.set_reduce_ex_hook(None) + if only_datapipe: + cls.set_getstate_hook(None) + if dill_available(): + from dill import extend as dill_extend + dill_extend(False) # Undo change to dispatch table + return captured_connections + + +def traverse_dps(datapipe: DataPipe) -> DataPipeGraph: + r""" + Traverse the DataPipes and their attributes to extract the DataPipe graph. + + This only looks into the attribute from each DataPipe that is either a + DataPipe and a Python collection object such as ``list``, ``tuple``, + ``set`` and ``dict``. + + Args: + datapipe: the end DataPipe of the graph + Returns: + A graph represented as a nested dictionary, where keys are ids of DataPipe instances + and values are tuples of DataPipe instance and the sub-graph + """ + cache: Set[int] = set() + return _traverse_helper(datapipe, only_datapipe=True, cache=cache) + + +def traverse(datapipe: DataPipe, only_datapipe: Optional[bool] = None) -> DataPipeGraph: + r""" + Traverse the DataPipes and their attributes to extract the DataPipe graph. + + [Deprecated] + When ``only_dataPipe`` is specified as ``True``, it would only look into the + attribute from each DataPipe that is either a DataPipe and a Python collection object + such as ``list``, ``tuple``, ``set`` and ``dict``. + + Note: + This function is deprecated. Please use `traverse_dps` instead. + + Args: + datapipe: the end DataPipe of the graph + only_datapipe: If ``False`` (default), all attributes of each DataPipe are traversed. + This argument is deprecating and will be removed after the next release. + Returns: + A graph represented as a nested dictionary, where keys are ids of DataPipe instances + and values are tuples of DataPipe instance and the sub-graph + """ + msg = "`traverse` function and will be removed after 1.13. " \ + "Please use `traverse_dps` instead." + if not only_datapipe: + msg += " And, the behavior will be changed to the equivalent of `only_datapipe=True`." + warnings.warn(msg, FutureWarning) + if only_datapipe is None: + only_datapipe = False + cache: Set[int] = set() + return _traverse_helper(datapipe, only_datapipe, cache) + + +# Add cache here to prevent infinite recursion on DataPipe +def _traverse_helper(datapipe: DataPipe, only_datapipe: bool, cache: Set[int]) -> DataPipeGraph: + if not isinstance(datapipe, (IterDataPipe, MapDataPipe)): + raise RuntimeError(f"Expected `IterDataPipe` or `MapDataPipe`, but {type(datapipe)} is found") + + dp_id = id(datapipe) + if dp_id in cache: + return {} + cache.add(dp_id) + # Using cache.copy() here is to prevent the same DataPipe pollutes the cache on different paths + items = _list_connected_datapipes(datapipe, only_datapipe, cache.copy()) + d: DataPipeGraph = {dp_id: (datapipe, {})} + for item in items: + # Using cache.copy() here is to prevent recursion on a single path rather than global graph + # Single DataPipe can present multiple times in different paths in graph + d[dp_id][1].update(_traverse_helper(item, only_datapipe, cache.copy())) + return d diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/graph_settings.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/graph_settings.py new file mode 100644 index 0000000000000000000000000000000000000000..4b42cc6065a788e18afd38aea6fe6cdf63214430 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/graph_settings.py @@ -0,0 +1,160 @@ +import inspect +import warnings + +from typing import Any, List, Optional, Set + +import torch + +from torch.utils.data.datapipes.iter.sharding import ( + _ShardingIterDataPipe, + SHARDING_PRIORITIES, +) +from torch.utils.data.graph import DataPipe, DataPipeGraph, traverse_dps + +__all__ = [ + "apply_random_seed", + "apply_sharding", + "apply_shuffle_seed", + "apply_shuffle_settings", + "get_all_graph_pipes", +] + + +def get_all_graph_pipes(graph: DataPipeGraph) -> List[DataPipe]: + return _get_all_graph_pipes_helper(graph, set()) + + +def _get_all_graph_pipes_helper(graph: DataPipeGraph, id_cache: Set[int]) -> List[DataPipe]: + results: List[DataPipe] = [] + for dp_id, (datapipe, sub_graph) in graph.items(): + if dp_id in id_cache: + continue + id_cache.add(dp_id) + results.append(datapipe) + results.extend(_get_all_graph_pipes_helper(sub_graph, id_cache)) + return results + + +def _is_sharding_datapipe(datapipe: DataPipe) -> bool: + if isinstance(datapipe, _ShardingIterDataPipe): + return True + if hasattr(datapipe, "apply_sharding") and inspect.ismethod(datapipe.apply_sharding): + return True + return False + + +def apply_sharding(datapipe: DataPipe, + num_of_instances: int, + instance_id: int, + sharding_group=SHARDING_PRIORITIES.DEFAULT) -> DataPipe: + r""" + Apply dynamic sharding over the ``sharding_filter`` DataPipe that has a method ``apply_sharding``. + + RuntimeError will be raised when multiple ``sharding_filter`` are presented in the same branch. + """ + graph = traverse_dps(datapipe) + + def _helper(graph, prev_applied=None): + for (dp, sub_graph) in graph.values(): + applied = None + if _is_sharding_datapipe(dp): + if prev_applied is not None: + raise RuntimeError("Sharding twice on a single pipeline is likely unintended and will cause data loss. " + f"Sharding already applied to {prev_applied} while trying to apply to {dp}") + # For BC, only provide sharding_group if accepted + sig = inspect.signature(dp.apply_sharding) + if len(sig.parameters) < 3: + dp.apply_sharding(num_of_instances, instance_id) + else: + dp.apply_sharding(num_of_instances, instance_id, sharding_group=sharding_group) + applied = dp + if applied is None: + applied = prev_applied + _helper(sub_graph, applied) + + _helper(graph) + + return datapipe + + +def _is_shuffle_datapipe(datapipe: DataPipe) -> bool: + if not hasattr(datapipe, "set_shuffle") or not hasattr(datapipe, "set_seed"): + return False + if not inspect.ismethod(datapipe.set_shuffle) or not inspect.ismethod(datapipe.set_seed): + return False + return True + + +def apply_shuffle_settings(datapipe: DataPipe, shuffle: Optional[bool] = None) -> DataPipe: + r""" + Traverse the graph of ``DataPipes`` to find and set shuffle attribute. + + Apply the method to each `DataPipe` that has APIs of ``set_shuffle`` + and ``set_seed``. + + Args: + datapipe: DataPipe that needs to set shuffle attribute + shuffle: Shuffle option (default: ``None`` and no-op to the graph) + """ + if shuffle is None: + return datapipe + + graph = traverse_dps(datapipe) + all_pipes = get_all_graph_pipes(graph) + shufflers = [pipe for pipe in all_pipes if _is_shuffle_datapipe(pipe)] + if not shufflers and shuffle: + warnings.warn( + "`shuffle=True` was set, but the datapipe does not contain a `Shuffler`. Adding one at the end. " + "Be aware that the default buffer size might not be sufficient for your task." + ) + datapipe = datapipe.shuffle() + shufflers = [datapipe, ] # type: ignore[list-item] + + for shuffler in shufflers: + shuffler.set_shuffle(shuffle) + + return datapipe + + +def apply_shuffle_seed(datapipe: DataPipe, rng: Any) -> DataPipe: + warnings.warn( + "`apply_shuffle_seed` is deprecated since 1.12 and will be removed in the future releases." + "\nPlease use `apply_random_seed` instead." + ) + return apply_random_seed(datapipe, rng) + + +def _is_random_datapipe(datapipe: DataPipe) -> bool: + if hasattr(datapipe, "set_seed") and inspect.ismethod(datapipe.set_seed): + return True + return False + + +def apply_random_seed(datapipe: DataPipe, rng: torch.Generator) -> DataPipe: + r""" + Traverse the graph of ``DataPipes`` to find random ``DataPipe`` with an API of ``set_seed``. + + Then set the random seed based on the provided RNG to those ``DataPipe``. + + Args: + datapipe: DataPipe that needs to set randomness + rng: Random number generator to generate random seeds + """ + graph = traverse_dps(datapipe) + all_pipes = get_all_graph_pipes(graph) + # Using a set to track id of DataPipe to prevent setting randomness per DataPipe more than once. + # And, `id` is used in case of unhashable DataPipe + cache = set() + random_datapipes = [] + for pipe in all_pipes: + if id(pipe) in cache: + continue + if _is_random_datapipe(pipe): + random_datapipes.append(pipe) + cache.add(id(pipe)) + + for pipe in random_datapipes: + random_seed = int(torch.empty((), dtype=torch.int64).random_(generator=rng).item()) + pipe.set_seed(random_seed) + + return datapipe diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/sampler.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..dbd91d0ac1576fad6356062bb1f82deea9cd77ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/sampler.py @@ -0,0 +1,305 @@ +import torch +from torch import Tensor + +from typing import Iterator, Iterable, Optional, Sequence, List, TypeVar, Generic, Sized, Union + +__all__ = [ + "BatchSampler", + "RandomSampler", + "Sampler", + "SequentialSampler", + "SubsetRandomSampler", + "WeightedRandomSampler", +] + +T_co = TypeVar('T_co', covariant=True) + + +class Sampler(Generic[T_co]): + r"""Base class for all Samplers. + + Every Sampler subclass has to provide an :meth:`__iter__` method, providing a + way to iterate over indices or lists of indices (batches) of dataset elements, and a :meth:`__len__` method + that returns the length of the returned iterators. + + Args: + data_source (Dataset): This argument is not used and will be removed in 2.2.0. + You may still have custom implementation that utilizes it. + + Example: + >>> # xdoctest: +SKIP + >>> class AccedingSequenceLengthSampler(Sampler[int]): + >>> def __init__(self, data: List[str]) -> None: + >>> self.data = data + >>> + >>> def __len__(self) -> int: + >>> return len(self.data) + >>> + >>> def __iter__(self) -> Iterator[int]: + >>> sizes = torch.tensor([len(x) for x in self.data]) + >>> yield from torch.argsort(sizes).tolist() + >>> + >>> class AccedingSequenceLengthBatchSampler(Sampler[List[int]]): + >>> def __init__(self, data: List[str], batch_size: int) -> None: + >>> self.data = data + >>> self.batch_size = batch_size + >>> + >>> def __len__(self) -> int: + >>> return (len(self.data) + self.batch_size - 1) // self.batch_size + >>> + >>> def __iter__(self) -> Iterator[List[int]]: + >>> sizes = torch.tensor([len(x) for x in self.data]) + >>> for batch in torch.chunk(torch.argsort(sizes), len(self)): + >>> yield batch.tolist() + + .. note:: The :meth:`__len__` method isn't strictly required by + :class:`~torch.utils.data.DataLoader`, but is expected in any + calculation involving the length of a :class:`~torch.utils.data.DataLoader`. + """ + + def __init__(self, data_source: Optional[Sized] = None) -> None: + if data_source is not None: + import warnings + + warnings.warn("`data_source` argument is not used and will be removed in 2.2.0." + "You may still have custom implementation that utilizes it.") + + def __iter__(self) -> Iterator[T_co]: + raise NotImplementedError + + # NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + # + # Many times we have an abstract class representing a collection/iterable of + # data, e.g., `torch.utils.data.Sampler`, with its subclasses optionally + # implementing a `__len__` method. In such cases, we must make sure to not + # provide a default implementation, because both straightforward default + # implementations have their issues: + # + # + `return NotImplemented`: + # Calling `len(subclass_instance)` raises: + # TypeError: 'NotImplementedType' object cannot be interpreted as an integer + # + # + `raise NotImplementedError()`: + # This prevents triggering some fallback behavior. E.g., the built-in + # `list(X)` tries to call `len(X)` first, and executes a different code + # path if the method is not found or `NotImplemented` is returned, while + # raising a `NotImplementedError` will propagate and make the call fail + # where it could have used `__iter__` to complete the call. + # + # Thus, the only two sensible things to do are + # + # + **not** provide a default `__len__`. + # + # + raise a `TypeError` instead, which is what Python uses when users call + # a method that is not defined on an object. + # (@ssnl verifies that this works on at least Python 3.7.) + + +class SequentialSampler(Sampler[int]): + r"""Samples elements sequentially, always in the same order. + + Args: + data_source (Dataset): dataset to sample from + """ + + data_source: Sized + + def __init__(self, data_source: Sized) -> None: + self.data_source = data_source + + def __iter__(self) -> Iterator[int]: + return iter(range(len(self.data_source))) + + def __len__(self) -> int: + return len(self.data_source) + + +class RandomSampler(Sampler[int]): + r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset. + + If with replacement, then user can specify :attr:`num_samples` to draw. + + Args: + data_source (Dataset): dataset to sample from + replacement (bool): samples are drawn on-demand with replacement if ``True``, default=``False`` + num_samples (int): number of samples to draw, default=`len(dataset)`. + generator (Generator): Generator used in sampling. + """ + + data_source: Sized + replacement: bool + + def __init__(self, data_source: Sized, replacement: bool = False, + num_samples: Optional[int] = None, generator=None) -> None: + self.data_source = data_source + self.replacement = replacement + self._num_samples = num_samples + self.generator = generator + + if not isinstance(self.replacement, bool): + raise TypeError(f"replacement should be a boolean value, but got replacement={self.replacement}") + + if not isinstance(self.num_samples, int) or self.num_samples <= 0: + raise ValueError(f"num_samples should be a positive integer value, but got num_samples={self.num_samples}") + + @property + def num_samples(self) -> int: + # dataset size might change at runtime + if self._num_samples is None: + return len(self.data_source) + return self._num_samples + + def __iter__(self) -> Iterator[int]: + n = len(self.data_source) + if self.generator is None: + seed = int(torch.empty((), dtype=torch.int64).random_().item()) + generator = torch.Generator() + generator.manual_seed(seed) + else: + generator = self.generator + + if self.replacement: + for _ in range(self.num_samples // 32): + yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=generator).tolist() + yield from torch.randint(high=n, size=(self.num_samples % 32,), dtype=torch.int64, generator=generator).tolist() + else: + for _ in range(self.num_samples // n): + yield from torch.randperm(n, generator=generator).tolist() + yield from torch.randperm(n, generator=generator).tolist()[:self.num_samples % n] + + def __len__(self) -> int: + return self.num_samples + + +class SubsetRandomSampler(Sampler[int]): + r"""Samples elements randomly from a given list of indices, without replacement. + + Args: + indices (sequence): a sequence of indices + generator (Generator): Generator used in sampling. + """ + + indices: Sequence[int] + + def __init__(self, indices: Sequence[int], generator=None) -> None: + self.indices = indices + self.generator = generator + + def __iter__(self) -> Iterator[int]: + for i in torch.randperm(len(self.indices), generator=self.generator): + yield self.indices[i] + + def __len__(self) -> int: + return len(self.indices) + + +class WeightedRandomSampler(Sampler[int]): + r"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights). + + Args: + weights (sequence) : a sequence of weights, not necessary summing up to one + num_samples (int): number of samples to draw + replacement (bool): if ``True``, samples are drawn with replacement. + If not, they are drawn without replacement, which means that when a + sample index is drawn for a row, it cannot be drawn again for that row. + generator (Generator): Generator used in sampling. + + Example: + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True)) + [4, 4, 1, 4, 5] + >>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False)) + [0, 1, 4, 3, 2] + """ + + weights: Tensor + num_samples: int + replacement: bool + + def __init__(self, weights: Sequence[float], num_samples: int, + replacement: bool = True, generator=None) -> None: + if not isinstance(num_samples, int) or isinstance(num_samples, bool) or \ + num_samples <= 0: + raise ValueError(f"num_samples should be a positive integer value, but got num_samples={num_samples}") + if not isinstance(replacement, bool): + raise ValueError(f"replacement should be a boolean value, but got replacement={replacement}") + + weights_tensor = torch.as_tensor(weights, dtype=torch.double) + if len(weights_tensor.shape) != 1: + raise ValueError("weights should be a 1d sequence but given " + f"weights have shape {tuple(weights_tensor.shape)}") + + self.weights = weights_tensor + self.num_samples = num_samples + self.replacement = replacement + self.generator = generator + + def __iter__(self) -> Iterator[int]: + rand_tensor = torch.multinomial(self.weights, self.num_samples, self.replacement, generator=self.generator) + yield from iter(rand_tensor.tolist()) + + def __len__(self) -> int: + return self.num_samples + + +class BatchSampler(Sampler[List[int]]): + r"""Wraps another sampler to yield a mini-batch of indices. + + Args: + sampler (Sampler or Iterable): Base sampler. Can be any iterable object + batch_size (int): Size of mini-batch. + drop_last (bool): If ``True``, the sampler will drop the last batch if + its size would be less than ``batch_size`` + + Example: + >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] + >>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True)) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + """ + + def __init__(self, sampler: Union[Sampler[int], Iterable[int]], batch_size: int, drop_last: bool) -> None: + # Since collections.abc.Iterable does not check for `__getitem__`, which + # is one way for an object to be an iterable, we don't do an `isinstance` + # check here. + if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \ + batch_size <= 0: + raise ValueError(f"batch_size should be a positive integer value, but got batch_size={batch_size}") + if not isinstance(drop_last, bool): + raise ValueError(f"drop_last should be a boolean value, but got drop_last={drop_last}") + self.sampler = sampler + self.batch_size = batch_size + self.drop_last = drop_last + + def __iter__(self) -> Iterator[List[int]]: + # Implemented based on the benchmarking in https://github.com/pytorch/pytorch/pull/76951 + if self.drop_last: + sampler_iter = iter(self.sampler) + while True: + try: + batch = [next(sampler_iter) for _ in range(self.batch_size)] + yield batch + except StopIteration: + break + else: + batch = [0] * self.batch_size + idx_in_batch = 0 + for idx in self.sampler: + batch[idx_in_batch] = idx + idx_in_batch += 1 + if idx_in_batch == self.batch_size: + yield batch + idx_in_batch = 0 + batch = [0] * self.batch_size + if idx_in_batch > 0: + yield batch[:idx_in_batch] + + def __len__(self) -> int: + # Can only be called if self.sampler has __len__ implemented + # We cannot enforce this condition, so we turn off typechecking for the + # implementation below. + # Somewhat related: see NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ] + if self.drop_last: + return len(self.sampler) // self.batch_size # type: ignore[arg-type] + else: + return (len(self.sampler) + self.batch_size - 1) // self.batch_size # type: ignore[arg-type] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/dlpack.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/dlpack.py new file mode 100644 index 0000000000000000000000000000000000000000..6bfa4b9f85bd6fc8bb8524926210b1e931e2bd50 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/dlpack.py @@ -0,0 +1,121 @@ +from typing import Any + +import torch +import enum + +from torch._C import _from_dlpack +from torch._C import _to_dlpack as to_dlpack + + +class DLDeviceType(enum.IntEnum): + # Enums as in DLPack specification (aten/src/ATen/dlpack.h) + kDLCPU = 1, + kDLGPU = 2, + kDLCPUPinned = 3, + kDLOpenCL = 4, + kDLVulkan = 7, + kDLMetal = 8, + kDLVPI = 9, + kDLROCM = 10, + kDLExtDev = 12, + kDLOneAPI = 14, + + +torch._C._add_docstr(to_dlpack, r"""to_dlpack(tensor) -> PyCapsule + +Returns an opaque object (a "DLPack capsule") representing the tensor. + +.. note:: + ``to_dlpack`` is a legacy DLPack interface. The capsule it returns + cannot be used for anything in Python other than use it as input to + ``from_dlpack``. The more idiomatic use of DLPack is to call + ``from_dlpack`` directly on the tensor object - this works when that + object has a ``__dlpack__`` method, which PyTorch and most other + libraries indeed have now. + +.. warning:: + Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``. + Behavior when a capsule is consumed multiple times is undefined. + +Args: + tensor: a tensor to be exported + +The DLPack capsule shares the tensor's memory. +""") + + +# TODO: add a typing.Protocol to be able to tell Mypy that only objects with +# __dlpack__ and __dlpack_device__ methods are accepted. +def from_dlpack(ext_tensor: Any) -> 'torch.Tensor': + """from_dlpack(ext_tensor) -> Tensor + + Converts a tensor from an external library into a ``torch.Tensor``. + + The returned PyTorch tensor will share the memory with the input tensor + (which may have come from another library). Note that in-place operations + will therefore also affect the data of the input tensor. This may lead to + unexpected issues (e.g., other libraries may have read-only flags or + immutable data structures), so the user should only do this if they know + for sure that this is fine. + + Args: + ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule): + The tensor or DLPack capsule to convert. + + If ``ext_tensor`` is a tensor (or ndarray) object, it must support + the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__`` + method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is + an opaque ``PyCapsule`` instance, typically produced by a + ``to_dlpack`` function or method. + + Examples:: + + >>> import torch.utils.dlpack + >>> t = torch.arange(4) + + # Convert a tensor directly (supported in PyTorch >= 1.10) + >>> t2 = torch.from_dlpack(t) + >>> t2[:2] = -1 # show that memory is shared + >>> t2 + tensor([-1, -1, 2, 3]) + >>> t + tensor([-1, -1, 2, 3]) + + # The old-style DLPack usage, with an intermediate capsule object + >>> capsule = torch.utils.dlpack.to_dlpack(t) + >>> capsule + + >>> t3 = torch.from_dlpack(capsule) + >>> t3 + tensor([-1, -1, 2, 3]) + >>> t3[0] = -9 # now we're sharing memory between 3 tensors + >>> t3 + tensor([-9, -1, 2, 3]) + >>> t2 + tensor([-9, -1, 2, 3]) + >>> t + tensor([-9, -1, 2, 3]) + + """ + if hasattr(ext_tensor, '__dlpack__'): + device = ext_tensor.__dlpack_device__() + # device is either CUDA or ROCm, we need to pass the current + # stream + if device[0] in (DLDeviceType.kDLGPU, DLDeviceType.kDLROCM): + stream = torch.cuda.current_stream(f'cuda:{device[1]}') + # cuda_stream is the pointer to the stream and it is a public + # attribute, but it is not documented + # The array API specify that the default legacy stream must be passed + # with a value of 1 for CUDA + # https://data-apis.org/array-api/latest/API_specification/array_object.html?dlpack-self-stream-none#dlpack-self-stream-none + is_cuda = device[0] == DLDeviceType.kDLGPU + # Since pytorch is not using PTDS by default, lets directly pass + # the legacy stream + stream_ptr = 1 if is_cuda and stream.cuda_stream == 0 else stream.cuda_stream + dlpack = ext_tensor.__dlpack__(stream=stream_ptr) + else: + dlpack = ext_tensor.__dlpack__() + else: + # Old versions just call the converter + dlpack = ext_tensor + return _from_dlpack(dlpack) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/file_baton.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/file_baton.py new file mode 100644 index 0000000000000000000000000000000000000000..b55db82b8532b7a7679e267773156b39ae08b5e8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/file_baton.py @@ -0,0 +1,49 @@ +import os +import time + + +class FileBaton: + """A primitive, file-based synchronization utility.""" + + def __init__(self, lock_file_path, wait_seconds=0.1): + """ + Create a new :class:`FileBaton`. + + Args: + lock_file_path: The path to the file used for locking. + wait_seconds: The seconds to periodically sleep (spin) when + calling ``wait()``. + """ + self.lock_file_path = lock_file_path + self.wait_seconds = wait_seconds + self.fd = None + + def try_acquire(self): + """ + Try to atomically create a file under exclusive access. + + Returns: + True if the file could be created, else False. + """ + try: + self.fd = os.open(self.lock_file_path, os.O_CREAT | os.O_EXCL) + return True + except FileExistsError: + return False + + def wait(self): + """ + Periodically sleeps for a certain amount until the baton is released. + + The amount of time slept depends on the ``wait_seconds`` parameter + passed to the constructor. + """ + while os.path.exists(self.lock_file_path): + time.sleep(self.wait_seconds) + + def release(self): + """Release the baton and removes its file.""" + if self.fd is not None: + os.close(self.fd) + + os.remove(self.lock_file_path) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/flop_counter.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/flop_counter.py new file mode 100644 index 0000000000000000000000000000000000000000..c76a9a2432a7853a873b2184dc0fc3d44d6f4034 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/flop_counter.py @@ -0,0 +1,559 @@ +import torch +import torch.nn as nn +from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten +from typing import List, Any, Dict, Optional, Union, NamedTuple +from collections import defaultdict +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils.hooks import RemovableHandle +from torch._decomp import register_decomposition +from math import prod +from functools import wraps + + + +__all__ = ["FlopCounterMode", "register_flop_formula"] + +aten = torch.ops.aten + +def get_shape(i): + if isinstance(i, torch.Tensor): + return i.shape + return i + +flop_registry: Dict[Any, Any] = {} + +def shape_wrapper(f): + @wraps(f) + def nf(*args, out=None, **kwargs): + args, kwargs, out_shape = tree_map(get_shape, (args, kwargs, out)) + return f(*args, out_shape=out_shape, **kwargs) + return nf + +def register_flop_formula(targets, get_raw=False): + def register_fun(flop_formula): + if not get_raw: + flop_formula = shape_wrapper(flop_formula) + register_decomposition(targets, registry=flop_registry, unsafe=True)(flop_formula) + return flop_formula + + return register_fun + +@register_flop_formula(aten.mm) +def mm_flop(a_shape, b_shape, *args, out_shape=None, **kwargs) -> int: + """Count flops for matmul.""" + # Inputs should be a list of length 2. + # Inputs contains the shapes of two matrices. + m, k = a_shape + k2, n = b_shape + assert k == k2 + # NB(chilli): Should be 2 * k - 1 technically for FLOPs. + return m * n * 2 * k + +@register_flop_formula(aten.addmm) +def addmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int: + """Count flops for addmm.""" + return mm_flop(a_shape, b_shape) + +@register_flop_formula(aten.bmm) +def bmm_flop(a_shape, b_shape, out_shape=None, **kwargs) -> int: + """Count flops for the bmm operation.""" + # Inputs should be a list of length 2. + # Inputs contains the shapes of two tensor. + b, m, k = a_shape + b2, k2, n = b_shape + assert b == b2 + assert k == k2 + # NB(chilli): Should be 2 * k - 1 technically for FLOPs. + flop = b * m * n * 2 * k + return flop + +@register_flop_formula(aten.baddbmm) +def baddbmm_flop(self_shape, a_shape, b_shape, out_shape=None, **kwargs) -> int: + """Count flops for the baddbmm operation.""" + # Inputs should be a list of length 3. + # Inputs contains the shapes of three tensors. + return bmm_flop(a_shape, b_shape) + + +def conv_flop_count( + x_shape: List[int], + w_shape: List[int], + out_shape: List[int], + transposed: bool = False, +) -> int: + """Count flops for convolution. + + Note only multiplication is + counted. Computation for bias are ignored. + Flops for a transposed convolution are calculated as + flops = (x_shape[2:] * prod(w_shape) * batch_size). + Args: + x_shape (list(int)): The input shape before convolution. + w_shape (list(int)): The filter shape. + out_shape (list(int)): The output shape after convolution. + transposed (bool): is the convolution transposed + Returns: + int: the number of flops + """ + + batch_size = x_shape[0] + conv_shape = (x_shape if transposed else out_shape)[2:] + c_out, c_in, *filter_size = w_shape + + """ + General idea here is that for a regular conv, for each point in the output + spatial dimension we convolve the filter with something (hence + `prod(conv_shape) * prod(filter_size)` ops). Then, this gets multiplied by + 1. batch_size, 2. the cross product of input and weight channels. + + For the transpose, it's not each point in the *output* spatial dimension but + each point in the *input* spatial dimension. + """ + # NB(chilli): I don't think this properly accounts for padding :think: + # NB(chilli): Should be 2 * c_in - 1 technically for FLOPs. + flop = prod(conv_shape) * prod(filter_size) * batch_size * c_out * c_in * 2 + return flop + +@register_flop_formula([aten.convolution, aten._convolution]) +def conv_flop(x_shape, w_shape, _bias, _stride, _padding, _dilation, transposed, *args, out_shape=None, **kwargs) -> int: + """Count flops for convolution.""" + return conv_flop_count(x_shape, w_shape, out_shape, transposed=transposed) + + +@register_flop_formula(aten.convolution_backward) +def conv_backward_flop( + grad_out_shape, + x_shape, + w_shape, + _bias, + _stride, + _padding, + _dilation, + transposed, + _output_padding, + _groups, + output_mask, + out_shape) -> int: + + def t(shape): + return [shape[1], shape[0]] + list(shape[2:]) + flop_count = 0 + + """ + Let's say we have a regular 1D conv + {A, B, C} [inp] + {i, j} [weight] + => (conv) + {Ai + Bj, Bi + Cj} [out] + + And as a reminder, the transposed conv of the above is + => {Ai, Aj + Bi, Bj + Ci, Cj} [transposed conv out] + + For the backwards of conv, we now have + {D, E} [grad_out] + {A, B, C} [inp] + {i, j} [weight] + + # grad_inp as conv_transpose(grad_out, weight) + Let's first compute grad_inp. To do so, we can simply look at all the + multiplications that each element of inp is involved in. For example, A is + only involved in the first element of the output (and thus only depends upon + D in grad_out), and C is only involved in the last element of the output + (and thus only depends upon E in grad_out) + + {Di, Dj + Ei, Ej} [grad_inp] + + Note that this corresponds to the below conv_transpose. This gives us the + output_mask[0] branch, which is grad_inp. + + {D, E} [inp (grad_out)] + {i, j} [weight] + => (conv_transpose) + {Di, Dj + Ei, Ej} [out (grad_inp)] + + I leave the fact that grad_inp for a transposed conv is just conv(grad_out, + weight) as an exercise for the reader. + + # grad_weight as conv(inp, grad_out) + To compute grad_weight, we again look at the terms in the output, which as + a reminder is: + => {Ai + Bj, Bi + Cj} [out] + => {D, E} [grad_out] + If we manually compute the gradient for the weights, we see it's + {AD + BE, BD + CE} [grad_weight] + + This corresponds to the below conv + {A, B, C} [inp] + {D, E} [weight (grad_out)] + => (conv) + {AD + BE, BD + CE} [out (grad_weight)] + + # grad_weight of transposed conv as conv(grad_out, inp) + As a reminder, the terms of the output of a transposed conv are: + => {Ai, Aj + Bi, Bj + Ci, Cj} [transposed conv out] + => {D, E, F, G} [grad_out] + + Manually computing the gradient for the weights, we see it's + {AD + BE + CF, AE + BF + CG} [grad_weight] + + This corresponds to the below conv + {D, E, F, G} [inp (grad_out)] + {A, B, C} [weight (inp)] + => (conv) + {AD + BE + CF, AE + BF + CG} [out (grad_weight)] + + For the full backwards formula, there are also some details involving + transpose of the batch/channel dimensions and groups, but I skip those for + the sake of brevity (and they're pretty similar to matmul backwards) + + Check [conv backwards decomposition as conv forwards] + """ + # grad_inp as conv_transpose(grad_out, weight) + if output_mask[0]: + grad_input_shape = get_shape(out_shape[0]) + flop_count += conv_flop_count(grad_out_shape, w_shape, grad_input_shape, not transposed) + + if output_mask[1]: + grad_weight_shape = get_shape(out_shape[1]) + if transposed: + # grad_weight of transposed conv as conv(grad_out, inp) + flop_count += conv_flop_count(t(grad_out_shape), t(x_shape), t(grad_weight_shape), transposed=False) + else: + # grad_weight as conv(inp, grad_out) + flop_count += conv_flop_count(t(x_shape), t(grad_out_shape), t(grad_weight_shape), transposed=False) + + return flop_count + +def sdpa_flop_count(query_shape, key_shape, value_shape): + """ + Count flops for self-attention. + + NB: We can assume that value_shape == key_shape + """ + b, h, s_q, d_q = query_shape + _b2, _h2, s_k, _d2 = key_shape + _b3, _h3, _s3, d_v = value_shape + assert b == _b2 == _b3 and h == _h2 == _h3 and d_q == _d2 and s_k == _s3 and d_q == _d2 + total_flops = 0 + # q: [b, h, s_q, d_q] @ k: [b, h, d_q, s_k] -> scores: [b, h, s_q, s_k] + total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k)) + # scores: [b, h, s_q, s_k] @ v: [b, h, s_k, d_v] -> out: [b, h, s_q, d_v] + total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_v)) + return total_flops + + +@register_flop_formula([aten._scaled_dot_product_efficient_attention, aten._scaled_dot_product_flash_attention]) +def sdpa_flop(query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs) -> int: + """Count flops for self-attention.""" + # NB: We aren't accounting for causal attention here + return sdpa_flop_count(query_shape, key_shape, value_shape) + + +def sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape): + total_flops = 0 + b, h, s_q, d_q = query_shape + _b2, _h2, s_k, _d2 = key_shape + _b3, _h3, _s3, d_v = value_shape + _b4, _h4, _s4, _d4 = grad_out_shape + assert b == _b2 == _b3 == _b4 and h == _h2 == _h3 == _h4 and d_q == _d2 + assert d_v == _d4 and s_k == _s3 and s_q == _s4 + total_flops = 0 + # Step 1: We recompute the scores matrix. + # q: [b, h, s_q, d_q] @ k: [b, h, d_q, s_k] -> scores: [b, h, s_q, s_k] + total_flops += bmm_flop((b * h, s_q, d_q), (b * h, d_q, s_k)) + + # Step 2: We propagate the gradients through the score @ v operation. + # gradOut: [b, h, s_q, d_v] @ v: [b, h, d_v, s_k] -> gradScores: [b, h, s_q, s_k] + total_flops += bmm_flop((b * h, s_q, d_v), (b * h, d_v, s_k)) + # scores: [b, h, s_k, s_q] @ gradOut: [b, h, s_q, d_v] -> gradV: [b, h, s_k, d_v] + total_flops += bmm_flop((b * h, s_k, s_q), (b * h, s_q, d_v)) + + # Step 3: We propagate th gradients through the k @ v operation + # gradScores: [b, h, s_q, s_k] @ k: [b, h, s_k, d_q] -> gradQ: [b, h, s_q, d_q] + total_flops += bmm_flop((b * h, s_q, s_k), (b * h, s_k, d_q)) + # q: [b, h, d_q, s_q] @ gradScores: [b, h, s_q, s_k] -> gradK: [b, h, d_q, s_k] + total_flops += bmm_flop((b * h, d_q, s_q), (b * h, s_q, s_k)) + return total_flops + + +@register_flop_formula([aten._scaled_dot_product_efficient_attention_backward, aten._scaled_dot_product_flash_attention_backward]) +def sdpa_backward_flop(grad_out_shape, query_shape, key_shape, value_shape, *args, out_shape=None, **kwargs) -> int: + """Count flops for self-attention backward.""" + return sdpa_backward_flop_count(grad_out_shape, query_shape, key_shape, value_shape) + +flop_registry = { + aten.mm: mm_flop, + aten.addmm: addmm_flop, + aten.bmm: bmm_flop, + aten.baddbmm: baddbmm_flop, + aten.convolution: conv_flop, + aten._convolution: conv_flop, + aten.convolution_backward: conv_backward_flop, + aten._scaled_dot_product_efficient_attention: sdpa_flop, + aten._scaled_dot_product_flash_attention: sdpa_flop, + aten._scaled_dot_product_efficient_attention_backward: sdpa_backward_flop, + aten._scaled_dot_product_flash_attention_backward: sdpa_backward_flop, +} + +def normalize_tuple(x): + if not isinstance(x, tuple): + return (x,) + return x + + +# Define the suffixes for different orders of magnitude +suffixes = ["", "K", "M", "B", "T"] +# Thanks BingChat! +def get_suffix_str(number): + # Find the index of the appropriate suffix based on the number of digits + # with some additional overflow. + # i.e. 1.01B should be displayed as 1001M, not 1.001B + index = max(0, min(len(suffixes) - 1, (len(str(number)) - 2) // 3)) + return suffixes[index] + +def convert_num_with_suffix(number, suffix): + index = suffixes.index(suffix) + # Divide the number by 1000^index and format it to two decimal places + value = f"{number / 1000 ** index:.3f}" + # Return the value and the suffix as a string + return value + suffixes[index] + +def convert_to_percent_str(num, denom): + if denom == 0: + return "0%" + return f"{num / denom:.2%}" + +def _pytreeify_preserve_structure(f): + @wraps(f) + def nf(args): + flat_args, spec = tree_flatten(args) + out = f(*flat_args) + return tree_unflatten(out, spec) + + return nf + + +class FlopCounterMode(TorchDispatchMode): + """ + ``FlopCounterMode`` is a context manager that counts the number of flops within its context. + + It does this using a ``TorchDispatchMode``. + + It also supports hierarchical output by passing a module (or list of + modules) to FlopCounterMode on construction. If you do not need hierarchical + output, you do not need to use it with a module. + + Example usage + + .. code-block:: python + + mod = ... + flop_counter = FlopCounterMode(mod) + with flop_counter: + mod.sum().backward() + + """ + + def __init__( + self, + mods: Optional[Union[torch.nn.Module, List[torch.nn.Module]]] = None, + depth: int = 2, + display: bool = True, + custom_mapping: Optional[Dict[Any, Any]] = None): + self.flop_counts: Dict[str, Dict[Any, int]] = defaultdict(lambda: defaultdict(int)) + self.depth = depth + self.parents = ["Global"] + self.in_backward = False + self.display = display + if custom_mapping is None: + custom_mapping = {} + if isinstance(mods, torch.nn.Module): + mods = [mods] + self.mods = mods + # Keys will include the modules in `mods` and their submodules + self._module_to_forward_hook_handles: Dict[nn.Module, _ForwardHookHandles] = {} + self.flop_registry = { + **flop_registry, + **{k: v if getattr(v, "_get_raw", False) else shape_wrapper(v) for k, v in custom_mapping.items()} + } + + def _register_forward_hooks(self): + if self.mods is None: + return + for mod in self.mods: + prefix = type(mod).__name__ + for name, module in dict(mod.named_modules()).items(): + if name == "": + name = prefix + else: + name = ".".join([prefix, name]) + + forward_pre_hook_handle = module.register_forward_pre_hook(self._enter_module(name)) + forward_hook_handle = module.register_forward_hook(self._exit_module(name)) + self._module_to_forward_hook_handles[module] = _ForwardHookHandles( + forward_pre_hook_handle, forward_hook_handle + ) + + def _deregister_forward_hooks(self): + for forward_hook_handles in self._module_to_forward_hook_handles.values(): + forward_hook_handles[0].remove() + forward_hook_handles[1].remove() + self._module_to_forward_hook_handles.clear() + + def _enter_module(self, name): + def f(module, inputs): + out = _pytreeify_preserve_structure(self._create_pre_module(name))(inputs) + return out + + return f + + def _exit_module(self, name): + def f(module, inputs, outputs): + outputs = _pytreeify_preserve_structure(self._create_post_module(name))(outputs) + return outputs + return f + + def _create_post_module(self, name): + class PushState(torch.autograd.Function): + @staticmethod + def forward(ctx, *args): + assert self.parents[-1] == name, f"{self.parents[-1]} is not {name}" + self.parents.pop() + args = tree_map(lambda x: x.clone() if isinstance(x, torch.Tensor) else x, args) + return args + + @staticmethod + def backward(ctx, *grad_outs): + self.in_backward = True + self.parents.append(name) + return grad_outs + + return PushState.apply + + def _create_pre_module(self, name): + class PopState(torch.autograd.Function): + @staticmethod + def forward(ctx, *args): + if self.in_backward: + self.parents = ["Global"] + self.in_backward = True + self.parents.append(name) + args = tree_map(lambda x: x.clone() if isinstance(x, torch.Tensor) else x, args) + return args + + @staticmethod + def backward(ctx, *grad_outs): + assert self.parents[-1] == name + self.parents.pop() + return grad_outs + + return PopState.apply + + def get_total_flops(self) -> int: + return sum(self.flop_counts['Global'].values()) + + def get_flop_counts(self) -> Dict[str, Dict[Any, int]]: + """Return the flop counts as a dictionary of dictionaries. + + The outer + dictionary is keyed by module name, and the inner dictionary is keyed by + operation name. + + Returns: + Dict[str, Dict[Any, int]]: The flop counts as a dictionary. + """ + return {k: dict(v) for k, v in self.flop_counts.items()} + + def get_table(self, depth=None): + if depth is None: + depth = self.depth + if depth is None: + depth = 999999 + + import tabulate + tabulate.PRESERVE_WHITESPACE = True + header = ["Module", "FLOP", "% Total"] + values = [] + global_flops = self.get_total_flops() + global_suffix = get_suffix_str(global_flops) + is_global_subsumed = False + + def process_mod(mod_name, depth): + nonlocal is_global_subsumed + + total_flops = sum(self.flop_counts[mod_name].values()) + + is_global_subsumed |= total_flops >= global_flops + + padding = " " * depth + values = [] + values.append([ + padding + mod_name, + convert_num_with_suffix(total_flops, global_suffix), + convert_to_percent_str(total_flops, global_flops) + ]) + for k, v in self.flop_counts[mod_name].items(): + values.append([ + padding + " - " + str(k), + convert_num_with_suffix(v, global_suffix), + convert_to_percent_str(v, global_flops) + ]) + return values + + for mod in self.flop_counts.keys(): + if mod == 'Global': + continue + mod_depth = mod.count(".") + 1 + if mod_depth > depth: + continue + + cur_values = process_mod(mod, mod_depth - 1) + values.extend(cur_values) + + # We do a bit of messing around here to only output the "Global" value + # if there are any FLOPs in there that aren't already fully contained by + # a module. + if 'Global' in self.flop_counts and not is_global_subsumed: + for idx, value in enumerate(values): + values[idx][0] = " " + values[idx][0] + + values = process_mod('Global', 0) + values + + if len(values) == 0: + values = [["Global", "0", "0%"]] + + return tabulate.tabulate(values, headers=header, colalign=("left", "right", "right")) + + def __enter__(self): + self.flop_counts.clear() + self._register_forward_hooks() + super().__enter__() + return self + + def __exit__(self, *args): + if self.display: + print(self.get_table(self.depth)) + self._deregister_forward_hooks() + super().__exit__(*args) + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + kwargs = kwargs if kwargs else {} + out = func(*args, **kwargs) + func_packet = func._overloadpacket + if func_packet in self.flop_registry: + flop_count_func = self.flop_registry[func_packet] + flop_count = flop_count_func(*args, **kwargs, out=out) # type: ignore[operator] + if len(set(self.parents)) != len(self.parents): + print( + "The module hierarchy tracking seems to be messed up." + "Please file a bug or just run the flop counter without" + "tracking the module hierarchy (i.e. `with FlopCounterMode():`)" + ) + for par in set(self.parents): + self.flop_counts[par][func_packet] += flop_count + + return out + +class _ForwardHookHandles(NamedTuple): + forward_pre_hook_handle: RemovableHandle + forward_hook_handle: RemovableHandle diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/hooks.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..790bb498e5d83e05b2b59b58720e0d7ca1ef5e0f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/hooks.py @@ -0,0 +1,252 @@ +import torch +from collections import OrderedDict +import weakref +import warnings +from typing import Any, Tuple + +__all__ = ["RemovableHandle", "unserializable_hook", "warn_if_has_hooks", "BackwardHook"] + +class RemovableHandle: + r""" + A handle which provides the capability to remove a hook. + + Args: + hooks_dict (dict): A dictionary of hooks, indexed by hook ``id``. + extra_dict (Union[dict, List[dict]]): An additional dictionary or list of + dictionaries whose keys will be deleted when the same keys are + removed from ``hooks_dict``. + """ + + id: int + next_id: int = 0 + + def __init__(self, hooks_dict: Any, *, extra_dict: Any = None) -> None: + self.hooks_dict_ref = weakref.ref(hooks_dict) + self.id = RemovableHandle.next_id + RemovableHandle.next_id += 1 + + self.extra_dict_ref: Tuple = () + if isinstance(extra_dict, dict): + self.extra_dict_ref = (weakref.ref(extra_dict),) + elif isinstance(extra_dict, list): + self.extra_dict_ref = tuple(weakref.ref(d) for d in extra_dict) + + def remove(self) -> None: + hooks_dict = self.hooks_dict_ref() + if hooks_dict is not None and self.id in hooks_dict: + del hooks_dict[self.id] + + for ref in self.extra_dict_ref: + extra_dict = ref() + if extra_dict is not None and self.id in extra_dict: + del extra_dict[self.id] + + def __getstate__(self): + if self.extra_dict_ref is None: + return (self.hooks_dict_ref(), self.id) + else: + return (self.hooks_dict_ref(), self.id, tuple(ref() for ref in self.extra_dict_ref)) + + def __setstate__(self, state) -> None: + if state[0] is None: + # create a dead reference + self.hooks_dict_ref = weakref.ref(OrderedDict()) + else: + self.hooks_dict_ref = weakref.ref(state[0]) + self.id = state[1] + RemovableHandle.next_id = max(RemovableHandle.next_id, self.id + 1) + + if len(state) < 3 or state[2] is None: + self.extra_dict_ref = () + else: + self.extra_dict_ref = tuple(weakref.ref(d) for d in state[2]) + + def __enter__(self) -> "RemovableHandle": + return self + + def __exit__(self, type: Any, value: Any, tb: Any) -> None: + self.remove() + + +def unserializable_hook(f): + """ + Mark a function as an unserializable hook with this decorator. + + This suppresses warnings that would otherwise arise if you attempt + to serialize a tensor that has a hook. + """ + f.__torch_unserializable__ = True + return f + + +def warn_if_has_hooks(tensor): + if tensor._backward_hooks: + for k in tensor._backward_hooks: + hook = tensor._backward_hooks[k] + if not hasattr(k, "__torch_unserializable__"): + warnings.warn(f"backward hook {repr(hook)} on tensor will not be " + "serialized. If this is expected, you can " + "decorate the function with @torch.utils.hooks.unserializable_hook " + "to suppress this warning") + +class BackwardHook: + """ + A wrapper class to implement nn.Module backward hooks. + + It handles: + - Ignoring non-Tensor inputs and replacing them by None before calling the user hook + - Generating the proper Node to capture a set of Tensor's gradients + - Linking the gradients captures for the outputs with the gradients captured for the input + - Calling the user hook once both output and input gradients are available + """ + + def __init__(self, module, user_hooks, user_pre_hooks): + self.user_hooks = user_hooks + self.user_pre_hooks = user_pre_hooks + self.module = module + + self.grad_outputs = None + self.n_outputs = -1 + self.output_tensors_index = None + self.n_inputs = -1 + self.input_tensors_index = None + + def _pack_with_none(self, indices, values, size): + res = [None] * size + for idx, val in zip(indices, values): + res[idx] = val + + return tuple(res) + + def _unpack_none(self, indices, values): + res = [] + for idx in indices: + res.append(values[idx]) + + return tuple(res) + + def _set_user_hook(self, grad_fn): + def hook(grad_input, _): + if self.grad_outputs is None: + # This happens because the gradient in your nn.Module flows to + # the Module's input without " passing through the Module's + # output, e.g. when you're doing double backward. + return + res = self._pack_with_none(self.input_tensors_index, grad_input, self.n_inputs) + + for hook in self.user_hooks: + out = hook(self.module, res, self.grad_outputs) + + if out is None: + continue + + if len(out) != len(res): + raise RuntimeError("Backward hook returned an invalid number of grad_input, " + f"got {len(out)}, but expected {len(res)}") + + res = out + + self.grad_outputs = None + + return self._unpack_none(self.input_tensors_index, res) + + grad_fn.register_hook(hook) + + def _apply_on_tensors(self, fn, args): + # Can be used to apply the given function to the tensors contained in the + # args. Will return updated args and the tensors indices + tensors_idx = [] + tensors = [] + + requires_grad = False + for i, arg in enumerate(args): + if isinstance(arg, torch.Tensor): + tensors_idx.append(i) + tensors.append(arg) + requires_grad |= arg.requires_grad + + if not (requires_grad and torch.is_grad_enabled()): + return args, None + + new_tensors = torch.nn.modules._functions.BackwardHookFunction.apply(*tensors) + if len(new_tensors) == 0: + raise RuntimeError("Cannot set Module backward hook for a Module with no input Tensors.") + + grad_fns = [t.grad_fn for t in new_tensors if t.grad_fn is not None and t.grad_fn.name() == "BackwardHookFunctionBackward"] + if len(grad_fns) == 0: + raise RuntimeError("Error while setting up backward hooks. Please open " + "an issue with a code sample to reproduce this.") + + fn(grad_fns[0]) + + arg_list = list(args) + for idx, val in zip(tensors_idx, new_tensors): + arg_list[idx] = val + + if type(args) is tuple: + out = tuple(arg_list) + else: + out = type(args)(*arg_list) + return out, tensors_idx + + def setup_input_hook(self, args): + def fn(grad_fn): + self._set_user_hook(grad_fn) + + res, input_idx = self._apply_on_tensors(fn, args) + self.n_inputs = len(args) + self.input_tensors_index = input_idx + return res + + def setup_output_hook(self, args): + def fn(grad_fn): + def hook(_, grad_output): + self.grad_outputs = self._pack_with_none(self.output_tensors_index, + grad_output, + self.n_outputs) + + if self.user_pre_hooks: + expected_len = len(self.grad_outputs) + for user_pre_hook in self.user_pre_hooks: + hook_grad_outputs = user_pre_hook(self.module, self.grad_outputs) + if hook_grad_outputs is None: + continue + + actual_len = len(hook_grad_outputs) + if actual_len != expected_len: + raise RuntimeError("Backward pre hook returned an invalid number of grad_output, " + f"got {actual_len}, but expected {expected_len}") + self.grad_outputs = hook_grad_outputs + + # We need to be able to clear self.grad_outputs but also return it + local_grad_outputs = self.grad_outputs + + # Special case if no input required gradients, this hook should call the user + # hook directly + if self.input_tensors_index is None: + grad_inputs = self._pack_with_none([], [], self.n_inputs) + for user_hook in self.user_hooks: + res = user_hook(self.module, grad_inputs, self.grad_outputs) + if res is not None and not (isinstance(res, tuple) and all(el is None for el in res)): + raise RuntimeError("Backward hook for Modules where no input requires " + "gradient should always return None or None for all gradients.") + self.grad_outputs = None + + if local_grad_outputs is not None: + assert self.output_tensors_index is not None # mypy + return tuple(local_grad_outputs[i] for i in self.output_tensors_index) + + grad_fn.register_hook(hook) + + is_tuple = True + if not isinstance(args, tuple): + args = (args,) + is_tuple = False + + res, output_idx = self._apply_on_tensors(fn, args) + self.n_outputs = len(args) + self.output_tensors_index = output_idx + + if not is_tuple: + res = res[0] + return res diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/mkldnn.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/mkldnn.py new file mode 100644 index 0000000000000000000000000000000000000000..2d1d8cd89ff59d99c821ee3c8ec6b737e44a3b8f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/mkldnn.py @@ -0,0 +1,233 @@ +import torch + + +class MkldnnLinear(torch.jit.ScriptModule): + def __init__(self, dense_module, dtype): + super().__init__() + self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype)) + if dense_module.bias is not None: + # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy, + # we use fp32 dtype. + self.register_buffer('bias', dense_module.bias.to_mkldnn()) + else: + # TODO: Remove this once ScriptModule supports registering None buffer + self.register_buffer( + 'bias', + torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn()) + + @torch.jit.script_method + def __getstate__(self): + return (self.weight.to_dense(), self.bias.to_dense(), self.training) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = state[0].to_mkldnn() + self.bias = state[1].to_mkldnn() + self.training = state[2] + + @torch.jit.script_method + def forward(self, x): + x_mkldnn = x if x.is_mkldnn else x.to_mkldnn() + y_mkldnn = torch._C._nn.mkldnn_linear(x_mkldnn, self.weight, self.bias) + y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense() + return y + + +class _MkldnnConvNd(torch.jit.ScriptModule): + """Common base of MkldnnConv1d and MkldnnConv2d.""" + + __constants__ = ['stride', 'padding', 'dilation', 'groups'] + + def __init__(self, dense_module): + super().__init__() + + self.stride = dense_module.stride + self.padding = dense_module.padding + self.dilation = dense_module.dilation + self.groups = dense_module.groups + + if dense_module.bias is not None: + self.register_buffer('bias', dense_module.bias.to_mkldnn()) + else: + # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy, + # we use fp32 dtype. + # TODO: Remove this once ScriptModule supports registering None buffer + self.register_buffer( + 'bias', + torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn()) + + @torch.jit.script_method + def __getstate__(self): + return (self.weight.to_dense(), self.bias.to_dense(), self.training) + + @torch.jit.script_method + def forward(self, x): + return torch.mkldnn_convolution( + x, + self.weight, + self.bias, + self.padding, + self.stride, + self.dilation, + self.groups) + + +class MkldnnConv1d(_MkldnnConvNd): + def __init__(self, dense_module, dtype): + super().__init__(dense_module) + + self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype)) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = state[0].to_mkldnn() + self.bias = state[1].to_mkldnn() + self.training = state[2] + + +class MkldnnConv2d(_MkldnnConvNd): + def __init__(self, dense_module, dtype): + super().__init__(dense_module) + + self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv2d_weight( + dense_module.weight.to_mkldnn(dtype), + self.padding, + self.stride, + self.dilation, + self.groups)) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = torch._C._nn.mkldnn_reorder_conv2d_weight( + state[0].to_mkldnn(), + self.padding, + self.stride, + self.dilation, + self.groups) + self.bias = state[1].to_mkldnn() + self.training = state[2] + +class MkldnnConv3d(_MkldnnConvNd): + def __init__(self, dense_module, dtype): + super().__init__(dense_module) + + self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv3d_weight( + dense_module.weight.to_mkldnn(dtype), + self.padding, + self.stride, + self.dilation, + self.groups)) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = torch._C._nn.mkldnn_reorder_conv3d_weight( + state[0].to_mkldnn(), + self.padding, + self.stride, + self.dilation, + self.groups) + self.bias = state[1].to_mkldnn() + self.training = state[2] + + +class MkldnnBatchNorm(torch.jit.ScriptModule): + __constants__ = ['exponential_average_factor', 'eps'] + + def __init__(self, dense_module): + super().__init__() + + assert not dense_module.training + assert dense_module.track_running_stats + assert dense_module.affine + + if dense_module.momentum is None: + self.exponential_average_factor = 0.0 + else: + self.exponential_average_factor = dense_module.momentum + self.eps = dense_module.eps + + self.register_buffer('weight', dense_module.weight.to_mkldnn()) + self.register_buffer('bias', dense_module.bias.to_mkldnn()) + self.register_buffer('running_mean', dense_module.running_mean.to_mkldnn()) + self.register_buffer('running_var', dense_module.running_var.to_mkldnn()) + + @torch.jit.script_method + def __getstate__(self): + weight = self.weight.to_dense() + bias = self.bias.to_dense() + running_mean = self.running_mean.to_dense() + running_var = self.running_var.to_dense() + return (weight, bias, running_mean, running_var, self.training) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = state[0].to_mkldnn() + self.bias = state[1].to_mkldnn() + self.running_mean = state[2].to_mkldnn() + self.running_var = state[3].to_mkldnn() + self.training = state[4] + + @torch.jit.script_method + def forward(self, x): + return torch.batch_norm( + x, + self.weight, + self.bias, + self.running_mean, + self.running_var, + False, # training + self.exponential_average_factor, + self.eps, + False, # cuda_enabled + ) + +class MkldnnPrelu(torch.jit.ScriptModule): + def __init__(self, dense_module, dtype): + super().__init__() + self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype)) + + @torch.jit.script_method + def __getstate__(self): + return (self.weight.to_dense(), self.training) + + @torch.jit.script_method + def __setstate__(self, state): + self.weight = state[0].to_mkldnn() + self.training = state[1] + + @torch.jit.script_method + def forward(self, x): + x_mkldnn = x if x.is_mkldnn else x.to_mkldnn() + y_mkldnn = torch.prelu(x_mkldnn, self.weight) + y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense() + return y + +def to_mkldnn(module, dtype=torch.float): + assert dtype in [torch.float, torch.bfloat16, torch.half], \ + "MKLDNN only support float, bfloat16, and half path now" + + def m_fn(m, d): + if isinstance(m, torch.nn.Linear): + return MkldnnLinear(m, d) + elif isinstance(m, torch.nn.Conv1d): + return MkldnnConv1d(m, d) + elif isinstance(m, torch.nn.Conv2d): + return MkldnnConv2d(m, d) + elif isinstance(m, torch.nn.Conv3d): + return MkldnnConv3d(m, d) + elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)): + # For batchnorm bf16 path, OneDNN requires weight and bias need fp32 dtype. + # so it doesn't need dtype argument. + return MkldnnBatchNorm(m) + elif isinstance(m, torch.nn.PReLU): + return MkldnnPrelu(m, d) + else: + return m + + def m_fn_rec(m, d): + new_m = m_fn(m, d) + for name, sub_m in m.named_children(): + setattr(new_m, name, m_fn_rec(sub_m, d)) + return new_m + + return m_fn_rec(module, dtype) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/mobile_optimizer.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/mobile_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..e3a801850ef7c225993831c222b846cea3def9ec --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/mobile_optimizer.py @@ -0,0 +1,135 @@ +"""This module contains utility method for mobile model optimization and lint.""" + +import torch +from enum import Enum +from torch._C import _MobileOptimizerType as MobileOptimizerType +from typing import Optional, Set, List, AnyStr + +class LintCode(Enum): + BUNDLED_INPUT = 1 + REQUIRES_GRAD = 2 + DROPOUT = 3 + BATCHNORM = 4 + +def optimize_for_mobile( + script_module: torch.jit.ScriptModule, + optimization_blocklist: Optional[Set[MobileOptimizerType]] = None, + preserved_methods: Optional[List[AnyStr]] = None, + backend: str = 'CPU') -> torch.jit.RecursiveScriptModule: + """ + Optimize a torch script module for mobile deployment. + + Args: + script_module: An instance of torch script module with type of ScriptModule. + optimization_blocklist: A set with type of MobileOptimizerType. When set is not passed, + optimization method will run all the optimizer pass; otherwise, optimizer + method will run the optimization pass that is not included inside optimization_blocklist. + preserved_methods: A list of methods that needed to be preserved when freeze_module pass is invoked + backend: Device type to use for running the result model ('CPU'(default), 'Vulkan' or 'Metal'). + Returns: + A new optimized torch script module + """ + if not isinstance(script_module, torch.jit.ScriptModule): + raise TypeError( + f'Got {type(script_module)}, but ScriptModule is expected.') + + if optimization_blocklist is None: + optimization_blocklist = set() + + if preserved_methods is None: + preserved_methods = [] + + # Convert potential byte arrays into strings (if there is any) to pass type checking + # Here we use a new name as assigning it back to preserved_methods will invoke + # mypy errors (i.e. List[AnyStr] = List[str]) + preserved_methods_str: List[str] = [str(method) for method in preserved_methods] + + bundled_inputs_attributes = _get_bundled_inputs_preserved_attributes(script_module, preserved_methods_str) + if all(hasattr(script_module, method) for method in bundled_inputs_attributes): + preserved_methods_str = list(set(preserved_methods_str + bundled_inputs_attributes)) + + non_exist_methods = [] + for method in preserved_methods_str: + if not hasattr(script_module, method): + non_exist_methods.append(method) + if non_exist_methods: + raise AttributeError( + f"The following methods to preserve do not exist in script_module: {', '.join(non_exist_methods)}") + + backend = backend.lower() + if backend == 'cpu': + optimized_cpp_module = torch._C._jit_pass_optimize_for_mobile( + script_module._c, + optimization_blocklist, + preserved_methods_str) + elif backend == 'vulkan': + optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile( + script_module._c, + optimization_blocklist, + preserved_methods_str) + elif backend == 'metal': + optimized_cpp_module = torch._C._jit_pass_metal_optimize_for_mobile(script_module._c, preserved_methods_str) + else: + raise TypeError("Unknown backend, must be one of 'CPU', 'Vulkan' or 'Metal'") + + return torch.jit._recursive.wrap_cpp_module(optimized_cpp_module) + + +def generate_mobile_module_lints(script_module: torch.jit.ScriptModule): + """ + Generate a list of lints for a given torch script module. + + Args: + script_module: An instance of torch script module with type of ScriptModule. + + Returns: + lint_map: A list of dictionary that contains modules lints + """ + if not isinstance(script_module, torch.jit.ScriptModule): + raise TypeError( + f'Got {type(script_module)}, but ScriptModule is expected.') + + lint_list = [] + + if not hasattr(script_module, "_generate_bundled_inputs_for_forward"): + lint_list.append({"name": LintCode.BUNDLED_INPUT.name, "message": "No bundled input for forward, please add bundled inputs " + "before saving the module using torch.utils.bundled_inputs.augment_model_with_bundled_inputs."}) + + for name, param in script_module.named_parameters(): + if param.requires_grad: + lint_list.append({"name": LintCode.REQUIRES_GRAD.name, "message": f"Param {name} requires grad, " + "please set torch.no_grad() to reduce memory usage and improve computation speed during " + "inference phase."}) + + op_names = torch.jit.export_opnames(script_module) + for op_name in op_names: + if "dropout" in op_name: + lint_list.append({"name": LintCode.DROPOUT.name, "message": "Operator {} exists, remember to call eval() before " + "saving the module.and call torch.utils.mobile_optimizer.optimize_for_mobile to drop dropout " + "operator.".format(op_name)}) + if "batch_norm" in op_name: + lint_list.append({"name": LintCode.BATCHNORM.name, "message": "Operator {} exists, remember to call eval() before " + "saving the module and call torch.utils.mobile_optimizer.optimize_for_mobile to drop batch_norm " + "operator.".format(op_name)}) + + return lint_list + +def _get_bundled_inputs_preserved_attributes(script_module: torch.jit.ScriptModule, preserved_methods: List[str]) -> List[str]: + + bundled_inputs_attributes = [] + # Has bundled inputs for forward + if hasattr(script_module, 'get_all_bundled_inputs'): + bundled_inputs_attributes.append('get_all_bundled_inputs') + bundled_inputs_attributes.append('get_num_bundled_inputs') + + # Bundled inputs in module after the change that introduced bundled inputs for multiple functions + if hasattr(script_module, 'get_bundled_inputs_functions_and_info'): + bundled_inputs_attributes.append('get_bundled_inputs_functions_and_info') + all_info = script_module.get_bundled_inputs_functions_and_info() + for function_name in all_info: + if function_name not in preserved_methods: + bundled_inputs_attributes.append(function_name) + bundled_inputs_attributes.append("get_all_bundled_inputs_for_" + function_name) + bundled_inputs_attributes.append("_bundled_inputs_deflated_" + function_name) + + return bundled_inputs_attributes diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/model_zoo.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_zoo.py new file mode 100644 index 0000000000000000000000000000000000000000..e0c6004e23ea806a2c83e12cd2998e0279e0b16f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/model_zoo.py @@ -0,0 +1,2 @@ +# torchvision imports tqdm from here. +from torch.hub import tqdm, load_state_dict_from_url as load_url # noqa: F401 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cca0fb95146039f6dcaedf23485219904b8ee570 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__init__.py @@ -0,0 +1,13 @@ +import tensorboard +from torch._vendor.packaging.version import Version + +if not hasattr(tensorboard, "__version__") or Version( + tensorboard.__version__ +) < Version("1.15"): + raise ImportError("TensorBoard logging requires TensorBoard version 1.15 or above") + +del Version +del tensorboard + +from .writer import FileWriter, SummaryWriter # noqa: F401 +from tensorboard.summary.writer.record_writer import RecordWriter # noqa: F401 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9eb1165d04a2ea5555dc56af75c82b8fd996769 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_caffe2_graph.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_caffe2_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b074bb589b4dcbf5a2114a26a8c036a55aa91443 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_caffe2_graph.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_convert_np.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_convert_np.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fed93f9614111100d23febfffb4031d8be18adf5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_convert_np.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_embedding.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_embedding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14859b65b1d0118a7d6461a5a1a61acf9dbd3ba3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_embedding.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_onnx_graph.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_onnx_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ea3247941f1049dc86ec509896b60544eeef4b7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_onnx_graph.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_proto_graph.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_proto_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..915c9583e1b3c0165ea7a5320d826af764cb3d03 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_proto_graph.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_pytorch_graph.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_pytorch_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..165dc51223e3a0a9a68ecb3d6ce5b131adffb48a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_pytorch_graph.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe9f5f34da0dccc4a7125e1554addc68b611a0c4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/summary.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/summary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a27afe3020b71aa8e72027eac89560bc185b088f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/summary.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/writer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/writer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd4cd21b028dadec075cef3389be947abdd80eec Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/__pycache__/writer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_caffe2_graph.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_caffe2_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..536746026052d92438e62b60a2ffcdbb61c386ab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_caffe2_graph.py @@ -0,0 +1,822 @@ +import copy +import logging +import os +import re + +from tensorboard.compat.proto.graph_pb2 import GraphDef +from tensorboard.compat.proto.node_def_pb2 import NodeDef +from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto + +from caffe2.proto import caffe2_pb2 +from caffe2.python import core, workspace + +from typing import Set, Dict, Tuple, List + +log = logging.getLogger(__name__) + +def _make_unique_name(seen: Set[str], name: str, min_version: int = 0): + """ + Make the name unique by appending a unique number to the name. Used for SSA. + + Args: + seen (set): Set of names that have already been used (with respect to + some context). + name (str): The name to make unique + min_version (number): Starting index. Is incremented continually until + it can make the resulting name unique relative to 'seen'. + + Returns: + x (str): A version of name that is not in seen. + """ + assert name is not None + i = min_version + x = "%s_%d" % (name, i) if i else name + while x in seen: + i += 1 + x = "%s_%d" % (name, i) + seen.add(x) + return x + + +def _rename_tensorflow_style(shapes, blob_name_tracker, ops): + """ + Convert some of the common names in Caffe2 to tensorflow. + + NOTE: The common names in both Caffe2 and Tensorflow are currently + hardcoded, if either side changes at some point, then this code should + change as well. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + + Returns: + None. The _rename_all() call modifies blob_name_tracker and ops in-place. + """ + WEIGHT = re.compile(r"(_w)$") + WEIGHT_ = re.compile(r"(_w_)") + BN = re.compile(r"(_bn)$") + BN_ = re.compile(r"(_bn_)") + BIAS = re.compile(r"(_b)$") + BIAS_ = re.compile(r"(_b_)") + SCALE = re.compile(r"(_s)$") + SCALE_ = re.compile(r"(_s_)") + SUM = re.compile(r"(_sum)$") + SUM_ = re.compile(r"(_sum_)") + BRANCH = re.compile(r"(_branch)") + + def f(name): + inter_name = WEIGHT_.sub("/weight_", WEIGHT.sub("/weight", name)) + inter_name = BN_.sub("/batchnorm_", BN.sub("/batchnorm", inter_name)) + inter_name = BIAS_.sub("/bias_", BIAS.sub("/bias", inter_name)) + inter_name = SCALE_.sub("/scale_", SCALE.sub("/scale", inter_name)) + inter_name = SUM_.sub("/sum_", SUM.sub("/sum", inter_name)) + new_name = BRANCH.sub("/branch", inter_name) + return new_name + + _rename_all(shapes, blob_name_tracker, ops, f) + + +def _convert_to_ssa(shapes, blob_name_tracker, ops): + """ + Convert an operator graph to SSA (i.e. out-of-place). + + i.e. blobs will be renamed so that each blob is produced only once. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + + Returns: + None. Modifies blob_name_tracker and ops in-place. + """ + ir = core.IR(ops) + seen: Set[str] = set() + versioned: Dict[Tuple[str, int], int] = {} + new_shapes = {} + new_blob_name_tracker = {} + + def ssa_name(name: str, versions: Dict[str, int]) -> int: + assert name in versions + version = versions[name] + if (name, version) in versioned: + return versioned[(name, version)] + # Always setting name2 = `{name}_{version}` would work, but we also try + # to avoid a trailing `_0`, so we have to be careful not to introduce + # name collisions, such as (foo_1, 0) = foo_1 = (foo, 1). + # Note: operator names (if any) will be handled later. + new_name = _make_unique_name(seen, name, min_version=version) + versioned[(name, version)] = new_name + # Transfer shape. + if name in shapes: + new_shapes[new_name] = shapes[name] + if blob_name_tracker and name in blob_name_tracker: + new_blob_name_tracker[new_name] = blob_name_tracker[name] + return new_name + + for (op, ssa) in zip(ops, ir.ssa): + assert op is ssa.op + inputs = list(op.input) + outputs = list(op.output) + del op.input[:] + del op.output[:] + op.input.extend(ssa_name(name, ssa.in_versions) for name in inputs) + op.output.extend(ssa_name(name, ssa.out_versions) for name in outputs) + + shapes.clear() + shapes.update(new_shapes) + if blob_name_tracker: + blob_name_tracker.clear() + blob_name_tracker.update(new_blob_name_tracker) + + +def _get_blob_names(ops): + """ + Get all the operator input and output blobs and perform dedup on their names. + + Args: + ops: List of Caffe2 operators to extract inputs and outputs from + + Returns: + set containing distinct inputs and outputs from 'ops' + """ + names = set() + for op in ops: + names.update(op.input) + names.update(op.output) + return {name: name for name in names} + + +def _remap_keys(old_dict, rename_fn): + """ + Rename keys of 'old_dict' according to 'rename_fn'. + + Args: + old_dict: Dictionary (i.e. containing blob_name -> blob_name + relationships.) + rename_fn: Function string -> string for renaming. + + Returns: + None. Modifies old_dict in-place. + """ + new_dict = {rename_fn(key): value for key, value in old_dict.items()} + old_dict.clear() + old_dict.update(new_dict) + + +def _rename_all(shapes, blob_name_tracker, ops, rename_fn): + """ + Rename all the names in the operators. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + rename_fn: Function string -> string that specifies how to rename + + Returns: + None. Modifies shapes, blob_name_tracker and ops in-place using the + specified 'rename_fn'. + """ + seen: Set[str] = set() + renamed: Dict[Tuple[str, int], int] = {} + + def g(name): + """Collision-free version of f.""" + if name is None: + return None + if name in renamed: + return renamed[name] + new_name = _make_unique_name(seen, rename_fn(name)) + renamed[name] = new_name + return new_name + + for op in ops: + inputs = list(op.input) + outputs = list(op.output) + del op.input[:] + del op.output[:] + op.input.extend(g(name) for name in inputs) + op.output.extend(g(name) for name in outputs) + + _remap_keys(shapes, g) + if blob_name_tracker: + _remap_keys(blob_name_tracker, g) + # Rename all operator names (if any) independently so that the + # unique-fication happens only once in _fill_missing_operator_names(). + seen.clear() + renamed.clear() + for op in ops: + op.name = g(op.name) + + +def _add_gradient_scope(shapes, blob_name_tracker, ops): + """ + For all operators or blobs with name containing "_grad", add a "GRADIENTS/" scope. + + Note: breaks graph execution since the blob -> gradient mapping is + hardcoded. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + + Returns: + None. Modifies shapes, blob_name_tracker and ops in-place by renaming. + """ + + def f(name): + if "_grad" in name: + return f"GRADIENTS/{name}" + else: + return name + + _rename_all(shapes, blob_name_tracker, ops, f) + + +def _replace_colons(shapes, blob_name_tracker, ops, repl): + """ + `:i` has a special meaning in Tensorflow. This function replaces all colons with $ to avoid any possible conflicts. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + blob_name_tracker: Dictionary of all unique blob names (with respect to + some context). + ops: List of Caffe2 operators + repl: String representing the text to replace ':' with. Usually this is + '$'. + + Returns: + None. Modifies blob_name_tracker in-place. + + """ + + def f(name): + return name.replace(":", repl) + + _rename_all(shapes, blob_name_tracker, ops, f) + + +def _fill_missing_operator_names(ops): + """ + Give missing operators a name. + + We expect C2 operators to be generally unnamed. This gives them a scope + (inferred from their outputs) and a name after their type. Duplicates will + be postfixed by an index. + + Args: + ops: List of Caffe2 operators to assign names to. + + Returns: + None: Modifies 'ops' in-place. + """ + seen = set() + for op in ops: + # Make sure operator names don't collide with blobs. + seen.update(op.input) + seen.update(op.output) + for op in ops: + if op.name: + name = op.name + elif op.output or op.input: + name_list = [os.path.dirname(name) for name in op.output or op.input] + scope = os.path.commonprefix(name_list) + name = os.path.join(scope, op.type) + else: + name = op.type + assert name + op.name = _make_unique_name(seen, name) + + +def _tf_device(device_option): + """ + Handle the devices. + + Args: + device_option (caffe2_pb2.DeviceOption): DeviceOption protobuf, + associated to an operator, that contains information such as + device_type (optional), cuda_gpu_id (optional), node_name (optional, + tells which node the operator should execute on). See caffe2.proto + in caffe2/proto for the full list. + + Returns: + Formatted string representing device information contained in + device_option. + """ + if not device_option.HasField("device_type"): + return "" + if ( + device_option.device_type == caffe2_pb2.CPU + or device_option.device_type == caffe2_pb2.MKLDNN + ): + return "/cpu:*" + if device_option.device_type == caffe2_pb2.CUDA: + return f"/gpu:{device_option.device_id}" + raise Exception("Unhandled device", device_option) + + +def _add_tf_shape(attr_dict, ints): + """ + Convert a list of ints to a TensorShapeProto representing the dimensions of a blob/object. + + Args: + attr_dict: Dictionary to update (usually attributes of a Node) + ints: List of integers representing dimensions of some object. + + Returns: + None. Modifies attr_dict in-place. + """ + shape_proto = TensorShapeProto() + for i in ints: + dim = TensorShapeProto.Dim() + dim.size = i + shape_proto.dim.extend([dim]) + attr_dict["_output_shapes"].list.shape.extend([shape_proto]) + + +def _set_tf_attr(attr_dict, arg): + """ + Add attributes to a node. Key is the arg.name, and values can be shape, floats, strings, ints or an empty list. + + Args: + attr_dict: Dictionary to update (usually attributes of a Node) + arg: Object with name and data fields. + + Returns: + None. Modifies attr_dict in-place. + """ + k = arg.name + if k == "shape" and arg.ints: + _add_tf_shape(attr_dict, arg.ints) + return + # Float + if arg.HasField("f"): + attr_dict[k].f = arg.f + return + # Integer + if arg.HasField("i"): + attr_dict[k].i = arg.i + return + # String + if arg.HasField("s"): + attr_dict[k].s = ( + arg.s if isinstance(arg.s, bytes) else str(arg.s).encode("utf-8") + ) + return + if arg.floats: + attr_dict[k].list.f.extend(arg.floats) + return + if arg.ints: + attr_dict[k].list.i.extend(arg.ints) + return + if arg.strings: + attr_dict[k].list.s.extend( + s if isinstance(s, bytes) else str(s).encode("utf-8") for s in arg.strings + ) + return + # The value is an empty list. + attr_dict[k].list.s.extend([]) + + +def _operator_to_node(shapes, op): + """ + Convert an operator to a node in a TF graph. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + op: The Caffe2 operator to convert to a TF graph node. + + Returns: + n: The TF graph node created from op. + """ + assert op.name, op + n = NodeDef() + n.name = op.name + n.input.extend(op.input) + n.op = op.type + n.device = _tf_device(op.device_option) + if shapes: + # Add shapes in order. + for output in op.output: + if output not in shapes: + break + _add_tf_shape(n.attr, shapes[output]) + for arg in op.arg: + _set_tf_attr(n.attr, arg) + return n + + +def _operator_to_node_simp(op, inter_blobs, seen): + """ + Convert the operators to nodes. + + Args: + op: Caffe2 operator to convert to node + inter_blobs: Set of intermediate blobs + seen: Names that have already been used and are not unique + + Returns: + nodes: Nodes representing 'op' and the outputs of 'op' + """ + assert op + nodes = [] + outputs = [o for o in op.output if o not in inter_blobs] + seen.update(outputs) + len_outputs = len(outputs) + if len_outputs == 1: + n = NodeDef() + n.name = outputs[0] + # Here we are sure the name is unique. + n.input.extend(op.input) + n.op = op.type + n.device = _tf_device(op.device_option) + for arg in op.arg: + _set_tf_attr(n.attr, arg) + nodes.append(n) + elif len_outputs > 1: + # Create a name that is likely unique + if op.name: + name = op.name + else: + name_list = list(outputs) + scope = os.path.commonprefix(name_list) + name = os.path.join(scope, op.type) + assert name + op.name = _make_unique_name(seen, name) + device = _tf_device(op.device_option) + + # Create additional output nodes + for output in outputs: + n = NodeDef() + n.name = output + n.input.extend([op.name]) + n.op = "Blob" + n.device = device + nodes.append(n) + + # Node for the current op + n = NodeDef() + n.name = op.name + n.input.extend(op.input) + n.op = op.type + n.device = device + for arg in op.arg: + _set_tf_attr(n.attr, arg) + nodes.append(n) + + return nodes + + +def _blob_to_node(producing_ops, shapes, name): + """ + Convert a blob (operator input or output) to a node in a TF graph. + + Args: + producing_ops: Dictionary of blob name to list of + (producing_op, blob_index within producing_op.output) mapping. + shapes: Dictionary mapping blob names to their shapes/dimensions. + name: String representing the name of this blob. + + Returns: + n: The TF graph node created from this blob. + """ + assert name + n = NodeDef() + n.name = name + # Get all ops that have the blob corresponding to 'name' as one of their + # outputs. See _operators_to_graph_def. + produced_by = producing_ops.get(name, []) + if len(produced_by) > 0: + n.op = "Blob" + else: + # This blob is not produced but is instead a TF Placeholder where a + # value is passed in. + n.op = "Placeholder" + n.input.extend("%s:%d" % (p_op.name, i) for p_op, i in produced_by) + if produced_by: + device = produced_by[0][0].device_option + if all(producer[0].device_option == device for producer in produced_by): + n.device = _tf_device(device) + if shapes and name in shapes: + _add_tf_shape(n.attr, shapes[name]) + return n + + +def _clear_debug_info(ops, perform_clear): + """ + Remove debug information from operators, they are copious. + + Args: + ops: List of Caffe2 operators + perform_clear: Boolean passed from _operators_to_graph_def specifying + whether to remove the debug information. This boolean is passed into + this function to reduce the complexity of _operators_to_graph_def. + + Returns: + None. Modifies the list of Caffe2 operators in-place and removes the + 'debug_info' field. + + """ + if not perform_clear: + return + + for op in ops: + if op.HasField("debug_info"): + op.ClearField("debug_info") + + +def _check_if_forward(blob): + """ + Blobs with names containing '_m' or 'grad' are part of the backward pass. + + This function references facebookresearch/Detectron/detectron/utils/net.py. + + Args: + blob: The blob to inspect + + Returns: + Boolean representing whether this blob is part of the forward pass + """ + # + return blob.find("__m") < 0 or blob.find("grad") < 0 + + +def _check_if_cpu(blob): + """ + Check if the blob's name starts with '_gpu'. + + Args: + blob: The blob to inspect + + Returns: + Boolean representing whether this blob is associated with a gpu + """ + return not blob.startswith("_gpu") + + +def _compute_in_out(ops): + """ + Find the input, intermediate and output nodes of a set of operators. + + Args: + ops: List of Caffe2 operators to look through + + Returns: + input_blobs: The input nodes of the set of operators + inter_blobs: The intermediate nodes of the set of operators + output_blobs: The output nodes of the set of operators + """ + in_blobs = set() + out_blobs = set() + + for op in ops: + for input_blob in op.input: + in_blobs.add(input_blob) + for output_blob in op.output: + out_blobs.add(output_blob) + + input_blobs = list(in_blobs.difference(out_blobs)) + output_blobs = list(out_blobs.difference(in_blobs)) + inter_blobs = {b for b in output_blobs if b.startswith("_")} + output_blobs = [b for b in output_blobs if b not in inter_blobs] + + return input_blobs, inter_blobs, output_blobs + + +def _filter_ops(ops, filter_fn, perform_filter): + """ + Filter unwanted operators based on criteria in 'filter_fn'. + + Args: + ops: List of Caffe2 operators to filter + filter_fn: Criteria function for whether inputs/outputs in an operator + should be filtered. + perform_filter: Boolean passed from _operators_to_graph_def specifying + whether to filter operators + + Returns: + new_ops: Subset of ops containing a subset of their inputs and outputs. + """ + if not perform_filter: + return ops + + new_ops = [] + for op in ops: + inputs = list(op.input) + outputs = list(op.output) + del op.input[:] + del op.output[:] + new_inputs = [i for i in inputs if filter_fn(i)] + new_outputs = [o for o in outputs if filter_fn(o)] + + # Only add the op if output is not empty + if new_outputs: + op.input.extend(new_inputs) + op.output.extend(new_outputs) + new_ops.append(op) + + return new_ops + + +def _operators_to_graph_def( + shapes, + ops, + colon_replacement="$", + with_ssa=True, + with_gradient_scope=True, + blob_name_tracker=None, + show_simplified=False, + custom_rename=None, +): + """ + Convert a set of operators to a graph using the main function. + + Args: + shapes: Dictionary mapping blob names to their shapes/dimensions. + ops: List of Caffe2 operators, representing some computation graph + ### **kwargs (model_to_graph_def, nets_to_graph_def, protos_to_graph_def) ### + colon_replacement: Symbol to replace ':' with. ':i' in TF has a special + meaning, so we need to replace it with a non-conflicting symbol. + with_ssa: Boolean + with_gradient_scope: Boolean + blob_name_tracker: Dictionary tracking names of blobs (inputs/outputs + from operators) + show_simplified: Whether to show a simplified version of the model graph + Sets all of the following values: + clear_debug_info: Boolean representing whether to silence debug + info (which can be very verbose) + show_forward_only: Boolean representing whether to only show + blobs involved in the forward pass + show_cpu_only: Boolean representing whether to only show blobs + that are not associated with a gpu + use_tensorflow_naming: Boolean representing whether to convert + some common Caffe2 naming conventions to their Tensorflow + counterparts + custom_rename: Function string -> string that defines a custom + renaming function to use. + + Returns: + current_graph: GraphDef representing the computation graph formed by the + set of operators. + """ + if blob_name_tracker is not None: + blob_name_tracker.clear() + else: + blob_name_tracker = {} + + blob_name_tracker.update(_get_blob_names(ops)) + + _clear_debug_info(ops, show_simplified) # clear_debug_info + ops = _filter_ops(ops, _check_if_forward, show_simplified) # show_forward_only + ops = _filter_ops(ops, _check_if_cpu, show_simplified) # show_cpu_only + if custom_rename: + _rename_all(shapes, blob_name_tracker, ops, custom_rename) + if colon_replacement: + _replace_colons(shapes, blob_name_tracker, ops, colon_replacement) + if with_ssa: + _convert_to_ssa(shapes, blob_name_tracker, ops) + if with_gradient_scope: + _add_gradient_scope(shapes, blob_name_tracker, ops) + _fill_missing_operator_names(ops) + if show_simplified: # use_tensorflow_naming + _rename_tensorflow_style(shapes, blob_name_tracker, ops) + producing_ops: Dict[caffe2_pb2.OperatorDef, List] = {} + blobs = set() + input_blobs, inter_blobs, _ = _compute_in_out(ops) + current_graph = GraphDef() + seen = set(input_blobs) + for op in ops: + nodes_from_op = ( + _operator_to_node_simp(op, inter_blobs, seen) + if show_simplified + else [_operator_to_node(shapes, op)] + ) # .extend() expects an iterable + current_graph.node.extend(nodes_from_op) + for input_blob in op.input: + blobs.add(input_blob) + for i, output_blob in enumerate(op.output): + blobs.add(output_blob) + producing_ops.setdefault(output_blob, []).append((op, i)) + + if show_simplified: + # Show a cleaner, easier-to-interpret version of the model graph + blobs = input_blobs + + for blob in sorted(blobs): + current_graph.node.extend([_blob_to_node(producing_ops, {}, blob)]) + + return current_graph + + +def _propagate_device_option(net_def): + """ + Propagate the device options from net to operators. + + Args: + net_def: A caffe2_pb2.NetDef representing a computation graph. The graph + consists of Caffe2 operators. + + Returns: + None. Iterates through all ops contained within the net. For each op, + modifies the op device_option in-place to be the net device_option + if the op has no pre-existing device_option, and leaves the op as-is + if it already has a device_option. + """ + if not net_def.HasField("device_option"): + return + for op in net_def.op: + if not op.HasField("device_option"): + op.device_option.CopyFrom(net_def.device_option) + + +def _try_get_shapes(nets): + """ + Get missing shapes for all blobs contained in the nets. + + Args: + nets: List of core.Net to extract blob shape information from. + + Returns: + Dictionary containing blob name to shape/dimensions mapping. The net + is a computation graph that is composed of operators, and the + operators have input and output blobs, each with their own dims. + """ + try: + # Note: this will inspect the workspace for better or worse. + # We don't care about the types, only the shapes + shapes, _ = workspace.InferShapesAndTypes(nets) + return shapes + except Exception as e: + log.warning("Failed to compute shapes: %s", e) + return {} + + +def model_to_graph_def(model, **kwargs): + """ + Convert a Caffe2 model to a Tensorflow graph. + + This function extracts 'param_init_net' and 'net' from the model and passes it to nets_to_graph() + for further processing. + + Args: + model (cnn.CNNModelHelper, model_helper.ModelHelper): The model to + extract the nets (instances of core.Net) from. + + Returns: + Call to nets_to_graph_def() with extracted 'param_init_net', 'net' and + **kwargs. See _operators_to_graph_def for detailed **kwargs. + """ + nets = [model.param_init_net, model.net] + return nets_to_graph_def(nets, **kwargs) + + +def nets_to_graph_def(nets, shapes=None, **kwargs): + """ + Convert a set of Caffe2 nets to a Tensorflow graph. + + Args: + nets: List of core.Nets. core.Net is a wrapper around a NetDef protobuf. + The corresponding protobuf can be extracted using .Proto(). + shapes: Dictionary mapping blob names to their shapes/dimensions. + + Returns: + Call to protos_to_graph_def() with the extracted NetDef protobufs and + **kwargs. See _operators_to_graph_def for detailed **kwargs. + """ + # if shapes is None: + # shapes = _try_get_shapes(nets) + # _try_get_shapes(nets) depends on workspace.InferShapesAndTypes(nets), + # which is currently broken (segfault). We omit the shapes for now. + shapes = {} + nets = [copy.deepcopy(net.Proto()) for net in nets] + shapes = copy.deepcopy(shapes) + return protos_to_graph_def(nets, shapes, **kwargs) + + +def protos_to_graph_def(net_defs, shapes=None, **kwargs): + """ + Convert a set of Caffe2 net definitions to a Tensorflow graph. + + Args: + net_defs: List of caffe2_pb2.NetDef protobufs representing computation + graphs. + shapes: Dictionary mapping blob names to their shapes/dimensions. + + Returns: + Call to _operators_to_graph_def() with the extracted operators from the + NetDefs and **kwargs. See _operators_to_graph_def for detailed + **kwargs. + """ + for net in net_defs: + _propagate_device_option(net) + shapes = copy.deepcopy(shapes or {}) + ops = [op for net_def in net_defs for op in net_def.op] + return _operators_to_graph_def(shapes, ops, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_convert_np.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_convert_np.py new file mode 100644 index 0000000000000000000000000000000000000000..3a4eff542e35459d6e4ba5bd27bde94455b62247 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_convert_np.py @@ -0,0 +1,40 @@ +"""This module converts objects into numpy array.""" +import numpy as np +import torch + + +def make_np(x): + """ + Convert an object into numpy array. + + Args: + x: An instance of torch tensor or caffe blob name + + Returns: + numpy.array: Numpy array + """ + if isinstance(x, np.ndarray): + return x + if isinstance(x, str): # Caffe2 will pass name of blob(s) to fetch + return _prepare_caffe2(x) + if np.isscalar(x): + return np.array([x]) + if isinstance(x, torch.Tensor): + return _prepare_pytorch(x) + raise NotImplementedError( + f"Got {type(x)}, but numpy array, torch tensor, or caffe2 blob name are expected." + ) + + +def _prepare_pytorch(x): + if x.dtype == torch.bfloat16: + x = x.to(torch.float16) + x = x.detach().cpu().numpy() + return x + + +def _prepare_caffe2(x): + from caffe2.python import workspace + + x = workspace.FetchBlob(x) + return x diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_embedding.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..afbe68191aa98fcbbbfe1395c7c325e4c6754f9c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_embedding.py @@ -0,0 +1,85 @@ +import math +import numpy as np +from ._convert_np import make_np +from ._utils import make_grid +from tensorboard.compat import tf +from tensorboard.plugins.projector.projector_config_pb2 import EmbeddingInfo + + +_HAS_GFILE_JOIN = hasattr(tf.io.gfile, "join") + + +def _gfile_join(a, b): + # The join API is different between tensorboard's TF stub and TF: + # https://github.com/tensorflow/tensorboard/issues/6080 + # We need to try both because `tf` may point to either the stub or the real TF. + if _HAS_GFILE_JOIN: + return tf.io.gfile.join(a, b) + else: + fs = tf.io.gfile.get_filesystem(a) + return fs.join(a, b) + + +def make_tsv(metadata, save_path, metadata_header=None): + if not metadata_header: + metadata = [str(x) for x in metadata] + else: + assert len(metadata_header) == len( + metadata[0] + ), "len of header must be equal to the number of columns in metadata" + metadata = ["\t".join(str(e) for e in l) for l in [metadata_header] + metadata] + + metadata_bytes = tf.compat.as_bytes("\n".join(metadata) + "\n") + with tf.io.gfile.GFile(_gfile_join(save_path, "metadata.tsv"), "wb") as f: + f.write(metadata_bytes) + + +# https://github.com/tensorflow/tensorboard/issues/44 image label will be squared +def make_sprite(label_img, save_path): + from PIL import Image + from io import BytesIO + + # this ensures the sprite image has correct dimension as described in + # https://www.tensorflow.org/get_started/embedding_viz + nrow = int(math.ceil((label_img.size(0)) ** 0.5)) + arranged_img_CHW = make_grid(make_np(label_img), ncols=nrow) + + # augment images so that #images equals nrow*nrow + arranged_augment_square_HWC = np.zeros( + (arranged_img_CHW.shape[2], arranged_img_CHW.shape[2], 3) + ) + arranged_img_HWC = arranged_img_CHW.transpose(1, 2, 0) # chw -> hwc + arranged_augment_square_HWC[: arranged_img_HWC.shape[0], :, :] = arranged_img_HWC + im = Image.fromarray(np.uint8((arranged_augment_square_HWC * 255).clip(0, 255))) + + with BytesIO() as buf: + im.save(buf, format="PNG") + im_bytes = buf.getvalue() + + with tf.io.gfile.GFile(_gfile_join(save_path, "sprite.png"), "wb") as f: + f.write(im_bytes) + + +def get_embedding_info(metadata, label_img, subdir, global_step, tag): + info = EmbeddingInfo() + info.tensor_name = f"{tag}:{str(global_step).zfill(5)}" + info.tensor_path = _gfile_join(subdir, "tensors.tsv") + if metadata is not None: + info.metadata_path = _gfile_join(subdir, "metadata.tsv") + if label_img is not None: + info.sprite.image_path = _gfile_join(subdir, "sprite.png") + info.sprite.single_image_dim.extend([label_img.size(3), label_img.size(2)]) + return info + + +def write_pbtxt(save_path, contents): + config_path = _gfile_join(save_path, "projector_config.pbtxt") + with tf.io.gfile.GFile(config_path, "wb") as f: + f.write(tf.compat.as_bytes(contents)) + + +def make_mat(matlist, save_path): + with tf.io.gfile.GFile(_gfile_join(save_path, "tensors.tsv"), "wb") as f: + for x in matlist: + x = [str(i.item()) for i in x] + f.write(tf.compat.as_bytes("\t".join(x) + "\n")) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_onnx_graph.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_onnx_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..5c923fcb0ee569dacd74db14e448d0a6bf2135af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_onnx_graph.py @@ -0,0 +1,62 @@ +from tensorboard.compat.proto.graph_pb2 import GraphDef +from tensorboard.compat.proto.node_def_pb2 import NodeDef +from tensorboard.compat.proto.versions_pb2 import VersionDef +from tensorboard.compat.proto.attr_value_pb2 import AttrValue +from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto + + +def load_onnx_graph(fname): + import onnx + + m = onnx.load(fname) # type: ignore[attr-defined] + g = m.graph + return parse(g) + + +def parse(graph): + nodes = [] + import itertools + + nodes_proto = list(itertools.chain(graph.input, graph.output)) + + for node in nodes_proto: + print(node.name) + shapeproto = TensorShapeProto( + dim=[ + TensorShapeProto.Dim(size=d.dim_value) + for d in node.type.tensor_type.shape.dim + ] + ) + nodes.append( + NodeDef( + name=node.name.encode(encoding="utf_8"), + op="Variable", + input=[], + attr={ + "dtype": AttrValue(type=node.type.tensor_type.elem_type), + "shape": AttrValue(shape=shapeproto), + }, + ) + ) + + for node in graph.node: + _attr = [] + for s in node.attribute: + _attr.append(" = ".join([str(f[1]) for f in s.ListFields()])) + attr = ", ".join(_attr).encode(encoding="utf_8") + print(node.output[0]) + nodes.append( + NodeDef( + name=node.output[0].encode(encoding="utf_8"), + op=node.op_type, + input=node.input, + attr={"parameters": AttrValue(s=attr)}, + ) + ) + + # two pass token replacement, appends opname to object id + mapping = {} + for node in nodes: + mapping[node.name] = node.op + "_" + node.name + + return GraphDef(node=nodes, versions=VersionDef(producer=22)) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_proto_graph.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_proto_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0d15723d24586dd04a831d08cc04cbf93ed51d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_proto_graph.py @@ -0,0 +1,53 @@ +from typing import Optional +from tensorboard.compat.proto.node_def_pb2 import NodeDef +from tensorboard.compat.proto.attr_value_pb2 import AttrValue +from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto + + +def attr_value_proto(dtype, shape, s): + """Create a dict of objects matching a NodeDef's attr field. + + Follows https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/attr_value.proto + specifically designed for a NodeDef. The values have been reverse engineered from + standard TensorBoard logged data. + """ + attr = {} + if s is not None: + attr["attr"] = AttrValue(s=s.encode(encoding="utf_8")) + if shape is not None: + shapeproto = tensor_shape_proto(shape) + attr["_output_shapes"] = AttrValue(list=AttrValue.ListValue(shape=[shapeproto])) + return attr + + +def tensor_shape_proto(outputsize): + """Create an object matching a tensor_shape field. + + Follows https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/tensor_shape.proto . + """ + return TensorShapeProto(dim=[TensorShapeProto.Dim(size=d) for d in outputsize]) + + +def node_proto( + name, + op="UnSpecified", + input=None, + dtype=None, + shape: Optional[tuple] = None, + outputsize=None, + attributes="", +): + """Create an object matching a NodeDef. + + Follows https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/node_def.proto . + """ + if input is None: + input = [] + if not isinstance(input, list): + input = [input] + return NodeDef( + name=name.encode(encoding="utf_8"), + op=op, + input=input, + attr=attr_value_proto(dtype, outputsize, attributes), + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_pytorch_graph.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_pytorch_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..f4274199ffd33ce60792608620973e4b9ca75975 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_pytorch_graph.py @@ -0,0 +1,380 @@ +from collections import OrderedDict +import contextlib +from typing import Dict, Any + +from tensorboard.compat.proto.config_pb2 import RunMetadata +from tensorboard.compat.proto.graph_pb2 import GraphDef +from tensorboard.compat.proto.step_stats_pb2 import StepStats, DeviceStepStats +from tensorboard.compat.proto.versions_pb2 import VersionDef + +import torch +from ._proto_graph import node_proto + +methods_OP = [ + "attributeNames", + "hasMultipleOutputs", + "hasUses", + "inputs", + "kind", + "outputs", + "outputsSize", + "scopeName", +] +# Some additional methods to explure for methods_IO are +# +# 'unique' (type int) +# 'type' (type >) +# +# But the below are sufficient for now. +methods_IO = ["node", "offset", "debugName"] + +GETATTR_KIND = "prim::GetAttr" +CLASSTYPE_KIND = "ClassType" + + +class NodeBase: + def __init__( + self, + debugName=None, + inputs=None, + scope=None, + tensor_size=None, + op_type="UnSpecified", + attributes="", + ): + # TODO; Specify a __slots__ for this class or potentially + # used namedtuple instead + self.debugName = debugName + self.inputs = inputs + self.tensor_size = tensor_size + self.kind = op_type + self.attributes = attributes + self.scope = scope + + def __repr__(self): + repr = [] + repr.append(str(type(self))) + for m in dir(self): + if "__" not in m: + repr.append( + m + ": " + str(getattr(self, m)) + str(type(getattr(self, m))) + ) + return "\n".join(repr) + "\n\n" + + +class NodePy(NodeBase): + def __init__(self, node_cpp, valid_methods): + super().__init__(node_cpp) + valid_methods = valid_methods[:] + self.inputs = [] + + for m in valid_methods: + if m == "inputs" or m == "outputs": + list_of_node = list(getattr(node_cpp, m)()) + io_unique_names = [] + io_tensor_sizes = [] + for n in list_of_node: + io_unique_names.append(n.debugName()) + if n.isCompleteTensor(): + io_tensor_sizes.append(n.type().sizes()) + else: + io_tensor_sizes.append(None) + + setattr(self, m, io_unique_names) + setattr(self, m + "tensor_size", io_tensor_sizes) + + else: + setattr(self, m, getattr(node_cpp, m)()) + + +class NodePyIO(NodePy): + def __init__(self, node_cpp, input_or_output=None): + super().__init__(node_cpp, methods_IO) + try: + tensor_size = node_cpp.type().sizes() + except RuntimeError: + tensor_size = [ + 1, + ] # fail when constant model is used. + self.tensor_size = tensor_size + # Kind attribute string is purely descriptive and will be shown + # in detailed information for the node in TensorBoard's graph plugin. + # + # NodePyOP nodes get this from their kind() method. + self.kind = "Parameter" + if input_or_output: + self.input_or_output = input_or_output + self.kind = "IO Node" + + +class NodePyOP(NodePy): + def __init__(self, node_cpp): + super().__init__(node_cpp, methods_OP) + # Replace single quote which causes strange behavior in TensorBoard + # TODO: See if we can remove this in the future + self.attributes = str( + {k: _node_get(node_cpp, k) for k in node_cpp.attributeNames()} + ).replace("'", " ") + self.kind = node_cpp.kind() + + +class GraphPy: + """Helper class to convert torch.nn.Module to GraphDef proto and visualization with TensorBoard. + + GraphDef generation operates in two passes: + + In the first pass, all nodes are read and saved to two lists. + One list is for input/output nodes (nodes_io), which only have inbound + or outbound connections, but not both. Another list is for internal + operator nodes (nodes_op). The first pass also saves all scope name + appeared in the nodes in scope_name_appeared list for later processing. + + In the second pass, scope names are fully applied to all nodes. + debugNameToScopedName is a mapping from a node's ID to its fully qualified + scope name. e.g. Net1/Linear[0]/1. Unfortunately torch.jit doesn't have + totally correct scope output, so this is nontrivial. The function + populate_namespace_from_OP_to_IO and find_common_root are used to + assign scope name to a node based on the connection between nodes + in a heuristic kind of way. Bookkeeping is done with shallowest_scope_name + and scope_name_appeared. + """ + + def __init__(self): + self.nodes_op = [] + self.nodes_io = OrderedDict() + self.unique_name_to_scoped_name = {} + self.shallowest_scope_name = "default" + self.scope_name_appeared = [] + + def append(self, x): + if isinstance(x, NodePyIO): + self.nodes_io[x.debugName] = x + if isinstance(x, NodePyOP): + self.nodes_op.append(x) + + def printall(self): + print("all nodes") + for node in self.nodes_op: + print(node) + for key in self.nodes_io: + print(self.nodes_io[key]) + + def find_common_root(self): + for fullscope in self.scope_name_appeared: + if fullscope: + self.shallowest_scope_name = fullscope.split("/")[0] + + def populate_namespace_from_OP_to_IO(self): + for node in self.nodes_op: + for node_output, outputSize in zip(node.outputs, node.outputstensor_size): + self.scope_name_appeared.append(node.scopeName) + self.nodes_io[node_output] = NodeBase( + node_output, + node.inputs, + node.scopeName, + outputSize, + op_type=node.kind, + attributes=node.attributes, + ) + + self.find_common_root() + + for node in self.nodes_op: + for input_node_id in node.inputs: + self.unique_name_to_scoped_name[input_node_id] = ( + node.scopeName + "/" + input_node_id + ) + + for key, node in self.nodes_io.items(): + if type(node) == NodeBase: + self.unique_name_to_scoped_name[key] = node.scope + "/" + node.debugName + if hasattr(node, "input_or_output"): + self.unique_name_to_scoped_name[key] = ( + node.input_or_output + "/" + node.debugName + ) + + if hasattr(node, "scope") and node.scope is not None: + self.unique_name_to_scoped_name[key] = node.scope + "/" + node.debugName + if node.scope == "" and self.shallowest_scope_name: + self.unique_name_to_scoped_name[node.debugName] = ( + self.shallowest_scope_name + "/" + node.debugName + ) + + # replace name + for key, node in self.nodes_io.items(): + self.nodes_io[key].inputs = [ + self.unique_name_to_scoped_name[node_input_id] + for node_input_id in node.inputs + ] + if node.debugName in self.unique_name_to_scoped_name: + self.nodes_io[key].debugName = self.unique_name_to_scoped_name[ + node.debugName + ] + + def to_proto(self): + """Convert graph representation of GraphPy object to TensorBoard required format.""" + # TODO: compute correct memory usage and CPU time once + # PyTorch supports it + nodes = [] + for v in self.nodes_io.values(): + nodes.append( + node_proto( + v.debugName, + input=v.inputs, + outputsize=v.tensor_size, + op=v.kind, + attributes=v.attributes, + ) + ) + return nodes + + +def parse(graph, trace, args=None, omit_useless_nodes=True): + """Parse an optimized PyTorch model graph and produces a list of nodes and node stats. + + Useful for eventual conversion to TensorBoard protobuf format. + + Args: + graph (PyTorch module): The model graph to be parsed. + trace (PyTorch JIT TracedModule): The model trace to be parsed. + args (tuple): input tensor[s] for the model. + omit_useless_nodes (boolean): Whether to remove nodes from the graph. + """ + n_inputs = len(args) + + scope = {} + nodes_py = GraphPy() + for node in graph.inputs(): + if omit_useless_nodes: + if ( + len(node.uses()) == 0 + ): # number of user of the node (= number of outputs/ fanout) + continue + + if node.type().kind() != CLASSTYPE_KIND: + nodes_py.append(NodePyIO(node, "input")) + + attr_to_scope: Dict[Any, str] = {} + for node in graph.nodes(): + if node.kind() == GETATTR_KIND: + attr_name = node.s("name") + attr_key = node.output().debugName() + parent = node.input().node() + if ( + parent.kind() == GETATTR_KIND + ): # If the parent node is not the top-level "self" node + parent_attr_name = parent.s("name") + parent_attr_key = parent.output().debugName() + parent_scope = attr_to_scope[parent_attr_key] + attr_scope = parent_scope.split("/")[-1] + attr_to_scope[attr_key] = f"{parent_scope}/{attr_scope}.{attr_name}" + else: + attr_to_scope[attr_key] = f"__module.{attr_name}" + # We don't need classtype nodes; scope will provide this information + if node.output().type().kind() != CLASSTYPE_KIND: + node_py = NodePyOP(node) + node_py.scopeName = attr_to_scope[attr_key] # type: ignore[attr-defined] + nodes_py.append(node_py) + else: + nodes_py.append(NodePyOP(node)) + + for i, node in enumerate(graph.outputs()): # Create sink nodes for output ops + node_pyio = NodePyIO(node, "output") + node_pyio.debugName = f"output.{i + 1}" + node_pyio.inputs = [node.debugName()] + nodes_py.append(node_pyio) + + def parse_traced_name(module): + if isinstance(module, torch.jit.TracedModule): + module_name = module._name + else: + module_name = getattr(module, "original_name", "Module") + return module_name + + alias_to_name = {} + base_name = parse_traced_name(trace) + for name, module in trace.named_modules(prefix="__module"): + mod_name = parse_traced_name(module) + attr_name = name.split(".")[-1] + alias_to_name[name] = f"{mod_name}[{attr_name}]" + + for node in nodes_py.nodes_op: + module_aliases = node.scopeName.split("/") + replacements = [ + alias_to_name[alias] if alias in alias_to_name else alias.split(".")[-1] + for alias in module_aliases + ] + node.scopeName = base_name + if any(replacements): + node.scopeName += "/" + "/".join(replacements) + + nodes_py.populate_namespace_from_OP_to_IO() + return nodes_py.to_proto() + + +def graph(model, args, verbose=False, use_strict_trace=True): + """ + Process a PyTorch model and produces a `GraphDef` proto that can be logged to TensorBoard. + + Args: + model (PyTorch module): The model to be parsed. + args (tuple): input tensor[s] for the model. + verbose (bool): Whether to print out verbose information while + processing. + use_strict_trace (bool): Whether to pass keyword argument `strict` to + `torch.jit.trace`. Pass False when you want the tracer to + record your mutable container types (list, dict) + """ + with _set_model_to_eval(model): + try: + trace = torch.jit.trace(model, args, strict=use_strict_trace) + graph = trace.graph + torch._C._jit_pass_inline(graph) + except RuntimeError as e: + print(e) + print("Error occurs, No graph saved") + raise e + + if verbose: + print(graph) + list_of_nodes = parse(graph, trace, args) + # We are hardcoding that this was run on CPU even though it might have actually + # run on GPU. Note this is what is shown in TensorBoard and has no bearing + # on actual execution. + # TODO: See if we can extract GPU vs CPU information from the PyTorch model + # and pass it correctly to TensorBoard. + # + # Definition of StepStats and DeviceStepStats can be found at + # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/graph/tf_graph_common/test/graph-test.ts + # and + # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/step_stats.proto + stepstats = RunMetadata( + step_stats=StepStats(dev_stats=[DeviceStepStats(device="/device:CPU:0")]) + ) + return GraphDef(node=list_of_nodes, versions=VersionDef(producer=22)), stepstats + # The producer version has been reverse engineered from standard + # TensorBoard logged data. + + +@contextlib.contextmanager +def _set_model_to_eval(model): + """Context manager to temporarily set the training mode of ``model`` to eval.""" + if not isinstance(model, torch.jit.ScriptFunction): + originally_training = model.training + model.train(False) + try: + yield + finally: + model.train(originally_training) + else: + # Do nothing for ScriptFunction + try: + yield + finally: + pass + + +def _node_get(node: torch._C.Node, key: str): + """Get attributes of a node which is polymorphic over return type.""" + sel = node.kindOf(key) + return getattr(node, sel)(key) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f79f59749f534d47ceae2b26900569f9be9d4c79 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/_utils.py @@ -0,0 +1,125 @@ +import numpy as np + + +# Functions for converting +def figure_to_image(figures, close=True): + """Render matplotlib figure to numpy format. + + Note that this requires the ``matplotlib`` package. + + Args: + figures (matplotlib.pyplot.figure or list of figures): figure or a list of figures + close (bool): Flag to automatically close the figure + + Returns: + numpy.array: image in [CHW] order + """ + import matplotlib.pyplot as plt + import matplotlib.backends.backend_agg as plt_backend_agg + + def render_to_rgb(figure): + canvas = plt_backend_agg.FigureCanvasAgg(figure) + canvas.draw() + data: np.ndarray = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8) + w, h = figure.canvas.get_width_height() + image_hwc = data.reshape([h, w, 4])[:, :, 0:3] + image_chw = np.moveaxis(image_hwc, source=2, destination=0) + if close: + plt.close(figure) + return image_chw + + if isinstance(figures, list): + images = [render_to_rgb(figure) for figure in figures] + return np.stack(images) + else: + image = render_to_rgb(figures) + return image + + +def _prepare_video(V): + """ + Convert a 5D tensor into 4D tensor. + + Convesrion is done from [batchsize, time(frame), channel(color), height, width] (5D tensor) + to [time(frame), new_width, new_height, channel] (4D tensor). + + A batch of images are spreaded to a grid, which forms a frame. + e.g. Video with batchsize 16 will have a 4x4 grid. + """ + b, t, c, h, w = V.shape + + if V.dtype == np.uint8: + V = np.float32(V) / 255.0 + + def is_power2(num): + return num != 0 and ((num & (num - 1)) == 0) + + # pad to nearest power of 2, all at once + if not is_power2(V.shape[0]): + len_addition = int(2 ** V.shape[0].bit_length() - V.shape[0]) + V = np.concatenate((V, np.zeros(shape=(len_addition, t, c, h, w))), axis=0) + + n_rows = 2 ** ((b.bit_length() - 1) // 2) + n_cols = V.shape[0] // n_rows + + V = np.reshape(V, newshape=(n_rows, n_cols, t, c, h, w)) + V = np.transpose(V, axes=(2, 0, 4, 1, 5, 3)) + V = np.reshape(V, newshape=(t, n_rows * h, n_cols * w, c)) + + return V + + +def make_grid(I, ncols=8): + # I: N1HW or N3HW + assert isinstance(I, np.ndarray), "plugin error, should pass numpy array here" + if I.shape[1] == 1: + I = np.concatenate([I, I, I], 1) + assert I.ndim == 4 and I.shape[1] == 3 + nimg = I.shape[0] + H = I.shape[2] + W = I.shape[3] + ncols = min(nimg, ncols) + nrows = int(np.ceil(float(nimg) / ncols)) + canvas = np.zeros((3, H * nrows, W * ncols), dtype=I.dtype) + i = 0 + for y in range(nrows): + for x in range(ncols): + if i >= nimg: + break + canvas[:, y * H : (y + 1) * H, x * W : (x + 1) * W] = I[i] + i = i + 1 + return canvas + + # if modality == 'IMG': + # if x.dtype == np.uint8: + # x = x.astype(np.float32) / 255.0 + + +def convert_to_HWC(tensor, input_format): # tensor: numpy array + assert len(set(input_format)) == len( + input_format + ), f"You can not use the same dimension shordhand twice. input_format: {input_format}" + assert len(tensor.shape) == len( + input_format + ), f"size of input tensor and input format are different. \ + tensor shape: {tensor.shape}, input_format: {input_format}" + input_format = input_format.upper() + + if len(input_format) == 4: + index = [input_format.find(c) for c in "NCHW"] + tensor_NCHW = tensor.transpose(index) + tensor_CHW = make_grid(tensor_NCHW) + return tensor_CHW.transpose(1, 2, 0) + + if len(input_format) == 3: + index = [input_format.find(c) for c in "HWC"] + tensor_HWC = tensor.transpose(index) + if tensor_HWC.shape[2] == 1: + tensor_HWC = np.concatenate([tensor_HWC, tensor_HWC, tensor_HWC], 2) + return tensor_HWC + + if len(input_format) == 2: + index = [input_format.find(c) for c in "HW"] + tensor = tensor.transpose(index) + tensor = np.stack([tensor, tensor, tensor], 2) + return tensor diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/summary.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/summary.py new file mode 100644 index 0000000000000000000000000000000000000000..8211f6e0c80241bebacd19ea5824c5b338018849 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/summary.py @@ -0,0 +1,983 @@ +import json +import logging +import os +import struct + +from typing import Any, List, Optional + +import torch +import numpy as np + +from google.protobuf import struct_pb2 + +from tensorboard.compat.proto.summary_pb2 import ( + HistogramProto, + Summary, + SummaryMetadata, +) +from tensorboard.compat.proto.tensor_pb2 import TensorProto +from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto +from tensorboard.plugins.custom_scalar import layout_pb2 +from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData +from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData + +from ._convert_np import make_np +from ._utils import _prepare_video, convert_to_HWC + +__all__ = [ + "half_to_int", + "int_to_half", + "hparams", + "scalar", + "histogram_raw", + "histogram", + "make_histogram", + "image", + "image_boxes", + "draw_boxes", + "make_image", + "video", + "make_video", + "audio", + "custom_scalars", + "text", + "tensor_proto", + "pr_curve_raw", + "pr_curve", + "compute_curve", + "mesh", +] + +logger = logging.getLogger(__name__) + +def half_to_int(f: float) -> int: + """Casts a half-precision float value into an integer. + + Converts a half precision floating point value, such as `torch.half` or + `torch.bfloat16`, into an integer value which can be written into the + half_val field of a TensorProto for storage. + + To undo the effects of this conversion, use int_to_half(). + + """ + buf = struct.pack("f", f) + return struct.unpack("i", buf)[0] + +def int_to_half(i: int) -> float: + """Casts an integer value to a half-precision float. + + Converts an integer value obtained from half_to_int back into a floating + point value. + + """ + buf = struct.pack("i", i) + return struct.unpack("f", buf)[0] + +def _tensor_to_half_val(t: torch.Tensor) -> List[int]: + return [half_to_int(x) for x in t.flatten().tolist()] + +def _tensor_to_complex_val(t: torch.Tensor) -> List[float]: + return torch.view_as_real(t).flatten().tolist() + +def _tensor_to_list(t: torch.Tensor) -> List[Any]: + return t.flatten().tolist() + +# type maps: torch.Tensor type -> (protobuf type, protobuf val field) +_TENSOR_TYPE_MAP = { + torch.half: ("DT_HALF", "half_val", _tensor_to_half_val), + torch.float16: ("DT_HALF", "half_val", _tensor_to_half_val), + torch.bfloat16: ("DT_BFLOAT16", "half_val", _tensor_to_half_val), + torch.float32: ("DT_FLOAT", "float_val", _tensor_to_list), + torch.float: ("DT_FLOAT", "float_val", _tensor_to_list), + torch.float64: ("DT_DOUBLE", "double_val", _tensor_to_list), + torch.double: ("DT_DOUBLE", "double_val", _tensor_to_list), + torch.int8: ("DT_INT8", "int_val", _tensor_to_list), + torch.uint8: ("DT_UINT8", "int_val", _tensor_to_list), + torch.qint8: ("DT_UINT8", "int_val", _tensor_to_list), + torch.int16: ("DT_INT16", "int_val", _tensor_to_list), + torch.short: ("DT_INT16", "int_val", _tensor_to_list), + torch.int: ("DT_INT32", "int_val", _tensor_to_list), + torch.int32: ("DT_INT32", "int_val", _tensor_to_list), + torch.qint32: ("DT_INT32", "int_val", _tensor_to_list), + torch.int64: ("DT_INT64", "int64_val", _tensor_to_list), + torch.complex32: ("DT_COMPLEX32", "scomplex_val", _tensor_to_complex_val), + torch.chalf: ("DT_COMPLEX32", "scomplex_val", _tensor_to_complex_val), + torch.complex64: ("DT_COMPLEX64", "scomplex_val", _tensor_to_complex_val), + torch.cfloat: ("DT_COMPLEX64", "scomplex_val", _tensor_to_complex_val), + torch.bool: ("DT_BOOL", "bool_val", _tensor_to_list), + torch.complex128: ("DT_COMPLEX128", "dcomplex_val", _tensor_to_complex_val), + torch.cdouble: ("DT_COMPLEX128", "dcomplex_val", _tensor_to_complex_val), + torch.uint8: ("DT_UINT8", "uint32_val", _tensor_to_list), + torch.quint8: ("DT_UINT8", "uint32_val", _tensor_to_list), + torch.quint4x2: ("DT_UINT8", "uint32_val", _tensor_to_list), +} + + +def _calc_scale_factor(tensor): + converted = tensor.numpy() if not isinstance(tensor, np.ndarray) else tensor + return 1 if converted.dtype == np.uint8 else 255 + + +def _draw_single_box( + image, + xmin, + ymin, + xmax, + ymax, + display_str, + color="black", + color_text="black", + thickness=2, +): + from PIL import ImageDraw, ImageFont + + font = ImageFont.load_default() + draw = ImageDraw.Draw(image) + (left, right, top, bottom) = (xmin, xmax, ymin, ymax) + draw.line( + [(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], + width=thickness, + fill=color, + ) + if display_str: + text_bottom = bottom + # Reverse list and print from bottom to top. + text_width, text_height = font.getsize(display_str) + margin = np.ceil(0.05 * text_height) + draw.rectangle( + [ + (left, text_bottom - text_height - 2 * margin), + (left + text_width, text_bottom), + ], + fill=color, + ) + draw.text( + (left + margin, text_bottom - text_height - margin), + display_str, + fill=color_text, + font=font, + ) + return image + + +def hparams(hparam_dict=None, metric_dict=None, hparam_domain_discrete=None): + """Output three `Summary` protocol buffers needed by hparams plugin. + + `Experiment` keeps the metadata of an experiment, such as the name of the + hyperparameters and the name of the metrics. + `SessionStartInfo` keeps key-value pairs of the hyperparameters + `SessionEndInfo` describes status of the experiment e.g. STATUS_SUCCESS + + Args: + hparam_dict: A dictionary that contains names of the hyperparameters + and their values. + metric_dict: A dictionary that contains names of the metrics + and their values. + hparam_domain_discrete: (Optional[Dict[str, List[Any]]]) A dictionary that + contains names of the hyperparameters and all discrete values they can hold + + Returns: + The `Summary` protobufs for Experiment, SessionStartInfo and + SessionEndInfo + """ + import torch + from tensorboard.plugins.hparams.api_pb2 import ( + DataType, + Experiment, + HParamInfo, + MetricInfo, + MetricName, + Status, + ) + from tensorboard.plugins.hparams.metadata import ( + EXPERIMENT_TAG, + PLUGIN_DATA_VERSION, + PLUGIN_NAME, + SESSION_END_INFO_TAG, + SESSION_START_INFO_TAG, + ) + from tensorboard.plugins.hparams.plugin_data_pb2 import ( + HParamsPluginData, + SessionEndInfo, + SessionStartInfo, + ) + + # TODO: expose other parameters in the future. + # hp = HParamInfo(name='lr',display_name='learning rate', + # type=DataType.DATA_TYPE_FLOAT64, domain_interval=Interval(min_value=10, + # max_value=100)) + # mt = MetricInfo(name=MetricName(tag='accuracy'), display_name='accuracy', + # description='', dataset_type=DatasetType.DATASET_VALIDATION) + # exp = Experiment(name='123', description='456', time_created_secs=100.0, + # hparam_infos=[hp], metric_infos=[mt], user='tw') + + if not isinstance(hparam_dict, dict): + logger.warning("parameter: hparam_dict should be a dictionary, nothing logged.") + raise TypeError( + "parameter: hparam_dict should be a dictionary, nothing logged." + ) + if not isinstance(metric_dict, dict): + logger.warning("parameter: metric_dict should be a dictionary, nothing logged.") + raise TypeError( + "parameter: metric_dict should be a dictionary, nothing logged." + ) + + hparam_domain_discrete = hparam_domain_discrete or {} + if not isinstance(hparam_domain_discrete, dict): + raise TypeError( + "parameter: hparam_domain_discrete should be a dictionary, nothing logged." + ) + for k, v in hparam_domain_discrete.items(): + if ( + k not in hparam_dict + or not isinstance(v, list) + or not all(isinstance(d, type(hparam_dict[k])) for d in v) + ): + raise TypeError( + f"parameter: hparam_domain_discrete[{k}] should be a list of same type as hparam_dict[{k}]." + ) + hps = [] + + ssi = SessionStartInfo() + for k, v in hparam_dict.items(): + if v is None: + continue + if isinstance(v, (int, float)): + ssi.hparams[k].number_value = v + + if k in hparam_domain_discrete: + domain_discrete: Optional[struct_pb2.ListValue] = struct_pb2.ListValue( + values=[ + struct_pb2.Value(number_value=d) + for d in hparam_domain_discrete[k] + ] + ) + else: + domain_discrete = None + + hps.append( + HParamInfo( + name=k, + type=DataType.Value("DATA_TYPE_FLOAT64"), + domain_discrete=domain_discrete, + ) + ) + continue + + if isinstance(v, str): + ssi.hparams[k].string_value = v + + if k in hparam_domain_discrete: + domain_discrete = struct_pb2.ListValue( + values=[ + struct_pb2.Value(string_value=d) + for d in hparam_domain_discrete[k] + ] + ) + else: + domain_discrete = None + + hps.append( + HParamInfo( + name=k, + type=DataType.Value("DATA_TYPE_STRING"), + domain_discrete=domain_discrete, + ) + ) + continue + + if isinstance(v, bool): + ssi.hparams[k].bool_value = v + + if k in hparam_domain_discrete: + domain_discrete = struct_pb2.ListValue( + values=[ + struct_pb2.Value(bool_value=d) + for d in hparam_domain_discrete[k] + ] + ) + else: + domain_discrete = None + + hps.append( + HParamInfo( + name=k, + type=DataType.Value("DATA_TYPE_BOOL"), + domain_discrete=domain_discrete, + ) + ) + continue + + if isinstance(v, torch.Tensor): + v = make_np(v)[0] + ssi.hparams[k].number_value = v + hps.append(HParamInfo(name=k, type=DataType.Value("DATA_TYPE_FLOAT64"))) + continue + raise ValueError( + "value should be one of int, float, str, bool, or torch.Tensor" + ) + + content = HParamsPluginData(session_start_info=ssi, version=PLUGIN_DATA_VERSION) + smd = SummaryMetadata( + plugin_data=SummaryMetadata.PluginData( + plugin_name=PLUGIN_NAME, content=content.SerializeToString() + ) + ) + ssi = Summary(value=[Summary.Value(tag=SESSION_START_INFO_TAG, metadata=smd)]) + + mts = [MetricInfo(name=MetricName(tag=k)) for k in metric_dict.keys()] + + exp = Experiment(hparam_infos=hps, metric_infos=mts) + + content = HParamsPluginData(experiment=exp, version=PLUGIN_DATA_VERSION) + smd = SummaryMetadata( + plugin_data=SummaryMetadata.PluginData( + plugin_name=PLUGIN_NAME, content=content.SerializeToString() + ) + ) + exp = Summary(value=[Summary.Value(tag=EXPERIMENT_TAG, metadata=smd)]) + + sei = SessionEndInfo(status=Status.Value("STATUS_SUCCESS")) + content = HParamsPluginData(session_end_info=sei, version=PLUGIN_DATA_VERSION) + smd = SummaryMetadata( + plugin_data=SummaryMetadata.PluginData( + plugin_name=PLUGIN_NAME, content=content.SerializeToString() + ) + ) + sei = Summary(value=[Summary.Value(tag=SESSION_END_INFO_TAG, metadata=smd)]) + + return exp, ssi, sei + + +def scalar(name, tensor, collections=None, new_style=False, double_precision=False): + """Output a `Summary` protocol buffer containing a single scalar value. + + The generated Summary has a Tensor.proto containing the input Tensor. + Args: + name: A name for the generated node. Will also serve as the series name in + TensorBoard. + tensor: A real numeric Tensor containing a single value. + collections: Optional list of graph collections keys. The new summary op is + added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. + new_style: Whether to use new style (tensor field) or old style (simple_value + field). New style could lead to faster data loading. + Returns: + A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf. + Raises: + ValueError: If tensor has the wrong shape or type. + """ + tensor = make_np(tensor).squeeze() + assert ( + tensor.ndim == 0 + ), f"Tensor should contain one element (0 dimensions). Was given size: {tensor.size} and {tensor.ndim} dimensions." + # python float is double precision in numpy + scalar = float(tensor) + if new_style: + tensor_proto = TensorProto(float_val=[scalar], dtype="DT_FLOAT") + if double_precision: + tensor_proto = TensorProto(double_val=[scalar], dtype="DT_DOUBLE") + + plugin_data = SummaryMetadata.PluginData(plugin_name="scalars") + smd = SummaryMetadata(plugin_data=plugin_data) + return Summary( + value=[ + Summary.Value( + tag=name, + tensor=tensor_proto, + metadata=smd, + ) + ] + ) + else: + return Summary(value=[Summary.Value(tag=name, simple_value=scalar)]) + + +def tensor_proto(tag, tensor): + """Outputs a `Summary` protocol buffer containing the full tensor. + The generated Summary has a Tensor.proto containing the input Tensor. + Args: + name: A name for the generated node. Will also serve as the series name in + TensorBoard. + tensor: Tensor to be converted to protobuf + Returns: + A tensor protobuf in a `Summary` protobuf. + Raises: + ValueError: If tensor is too big to be converted to protobuf, or + tensor data type is not supported + """ + if tensor.numel() * tensor.itemsize >= (1 << 31): + raise ValueError( + "tensor is bigger than protocol buffer's hard limit of 2GB in size" + ) + + if tensor.dtype in _TENSOR_TYPE_MAP: + dtype, field_name, conversion_fn = _TENSOR_TYPE_MAP[tensor.dtype] + tensor_proto = TensorProto( + **{ + "dtype": dtype, + "tensor_shape": TensorShapeProto( + dim=[TensorShapeProto.Dim(size=x) for x in tensor.shape] + ), + field_name: conversion_fn(tensor), + }, + ) + else: + raise ValueError(f"{tag} has unsupported tensor dtype {tensor.dtype}") + + plugin_data = SummaryMetadata.PluginData(plugin_name="tensor") + smd = SummaryMetadata(plugin_data=plugin_data) + return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor_proto)]) + + +def histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_counts): + # pylint: disable=line-too-long + """Output a `Summary` protocol buffer with a histogram. + + The generated + [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + has one summary value containing a histogram for `values`. + Args: + name: A name for the generated node. Will also serve as a series name in + TensorBoard. + min: A float or int min value + max: A float or int max value + num: Int number of values + sum: Float or int sum of all values + sum_squares: Float or int sum of squares for all values + bucket_limits: A numeric `Tensor` with upper value per bucket + bucket_counts: A numeric `Tensor` with number of values per bucket + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + hist = HistogramProto( + min=min, + max=max, + num=num, + sum=sum, + sum_squares=sum_squares, + bucket_limit=bucket_limits, + bucket=bucket_counts, + ) + return Summary(value=[Summary.Value(tag=name, histo=hist)]) + + +def histogram(name, values, bins, max_bins=None): + # pylint: disable=line-too-long + """Output a `Summary` protocol buffer with a histogram. + + The generated + [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + has one summary value containing a histogram for `values`. + This op reports an `InvalidArgument` error if any value is not finite. + Args: + name: A name for the generated node. Will also serve as a series name in + TensorBoard. + values: A real numeric `Tensor`. Any shape. Values to use to + build the histogram. + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + values = make_np(values) + hist = make_histogram(values.astype(float), bins, max_bins) + return Summary(value=[Summary.Value(tag=name, histo=hist)]) + + +def make_histogram(values, bins, max_bins=None): + """Convert values into a histogram proto using logic from histogram.cc.""" + if values.size == 0: + raise ValueError("The input has no element.") + values = values.reshape(-1) + counts, limits = np.histogram(values, bins=bins) + num_bins = len(counts) + if max_bins is not None and num_bins > max_bins: + subsampling = num_bins // max_bins + subsampling_remainder = num_bins % subsampling + if subsampling_remainder != 0: + counts = np.pad( + counts, + pad_width=[[0, subsampling - subsampling_remainder]], + mode="constant", + constant_values=0, + ) + counts = counts.reshape(-1, subsampling).sum(axis=-1) + new_limits = np.empty((counts.size + 1,), limits.dtype) + new_limits[:-1] = limits[:-1:subsampling] + new_limits[-1] = limits[-1] + limits = new_limits + + # Find the first and the last bin defining the support of the histogram: + + cum_counts = np.cumsum(np.greater(counts, 0)) + start, end = np.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side="right") + start = int(start) + end = int(end) + 1 + del cum_counts + + # TensorBoard only includes the right bin limits. To still have the leftmost limit + # included, we include an empty bin left. + # If start == 0, we need to add an empty one left, otherwise we can just include the bin left to the + # first nonzero-count bin: + counts = ( + counts[start - 1 : end] if start > 0 else np.concatenate([[0], counts[:end]]) + ) + limits = limits[start : end + 1] + + if counts.size == 0 or limits.size == 0: + raise ValueError("The histogram is empty, please file a bug report.") + + sum_sq = values.dot(values) + return HistogramProto( + min=values.min(), + max=values.max(), + num=len(values), + sum=values.sum(), + sum_squares=sum_sq, + bucket_limit=limits.tolist(), + bucket=counts.tolist(), + ) + + +def image(tag, tensor, rescale=1, dataformats="NCHW"): + """Output a `Summary` protocol buffer with images. + + The summary has up to `max_images` summary values containing images. The + images are built from `tensor` which must be 3-D with shape `[height, width, + channels]` and where `channels` can be: + * 1: `tensor` is interpreted as Grayscale. + * 3: `tensor` is interpreted as RGB. + * 4: `tensor` is interpreted as RGBA. + The `name` in the outputted Summary.Value protobufs is generated based on the + name, with a suffix depending on the max_outputs setting: + * If `max_outputs` is 1, the summary value tag is '*name*/image'. + * If `max_outputs` is greater than 1, the summary value tags are + generated sequentially as '*name*/image/0', '*name*/image/1', etc. + Args: + tag: A name for the generated node. Will also serve as a series name in + TensorBoard. + tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width, + channels]` where `channels` is 1, 3, or 4. + 'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8). + The image() function will scale the image values to [0, 255] by applying + a scale factor of either 1 (uint8) or 255 (float32). Out-of-range values + will be clipped. + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + """ + tensor = make_np(tensor) + tensor = convert_to_HWC(tensor, dataformats) + # Do not assume that user passes in values in [0, 255], use data type to detect + scale_factor = _calc_scale_factor(tensor) + tensor = tensor.astype(np.float32) + tensor = (tensor * scale_factor).clip(0, 255).astype(np.uint8) + image = make_image(tensor, rescale=rescale) + return Summary(value=[Summary.Value(tag=tag, image=image)]) + + +def image_boxes( + tag, tensor_image, tensor_boxes, rescale=1, dataformats="CHW", labels=None +): + """Output a `Summary` protocol buffer with images.""" + tensor_image = make_np(tensor_image) + tensor_image = convert_to_HWC(tensor_image, dataformats) + tensor_boxes = make_np(tensor_boxes) + tensor_image = tensor_image.astype(np.float32) * _calc_scale_factor(tensor_image) + image = make_image( + tensor_image.clip(0, 255).astype(np.uint8), + rescale=rescale, + rois=tensor_boxes, + labels=labels, + ) + return Summary(value=[Summary.Value(tag=tag, image=image)]) + + +def draw_boxes(disp_image, boxes, labels=None): + # xyxy format + num_boxes = boxes.shape[0] + list_gt = range(num_boxes) + for i in list_gt: + disp_image = _draw_single_box( + disp_image, + boxes[i, 0], + boxes[i, 1], + boxes[i, 2], + boxes[i, 3], + display_str=None if labels is None else labels[i], + color="Red", + ) + return disp_image + + +def make_image(tensor, rescale=1, rois=None, labels=None): + """Convert a numpy representation of an image to Image protobuf.""" + from PIL import Image + + height, width, channel = tensor.shape + scaled_height = int(height * rescale) + scaled_width = int(width * rescale) + image = Image.fromarray(tensor) + if rois is not None: + image = draw_boxes(image, rois, labels=labels) + try: + ANTIALIAS = Image.Resampling.LANCZOS + except AttributeError: + ANTIALIAS = Image.ANTIALIAS + image = image.resize((scaled_width, scaled_height), ANTIALIAS) + import io + + output = io.BytesIO() + image.save(output, format="PNG") + image_string = output.getvalue() + output.close() + return Summary.Image( + height=height, + width=width, + colorspace=channel, + encoded_image_string=image_string, + ) + + +def video(tag, tensor, fps=4): + tensor = make_np(tensor) + tensor = _prepare_video(tensor) + # If user passes in uint8, then we don't need to rescale by 255 + scale_factor = _calc_scale_factor(tensor) + tensor = tensor.astype(np.float32) + tensor = (tensor * scale_factor).clip(0, 255).astype(np.uint8) + video = make_video(tensor, fps) + return Summary(value=[Summary.Value(tag=tag, image=video)]) + + +def make_video(tensor, fps): + try: + import moviepy # noqa: F401 + except ImportError: + print("add_video needs package moviepy") + return + try: + from moviepy import editor as mpy + except ImportError: + print( + "moviepy is installed, but can't import moviepy.editor.", + "Some packages could be missing [imageio, requests]", + ) + return + import tempfile + + t, h, w, c = tensor.shape + + # encode sequence of images into gif string + clip = mpy.ImageSequenceClip(list(tensor), fps=fps) + + filename = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name + try: # newer version of moviepy use logger instead of progress_bar argument. + clip.write_gif(filename, verbose=False, logger=None) + except TypeError: + try: # older version of moviepy does not support progress_bar argument. + clip.write_gif(filename, verbose=False, progress_bar=False) + except TypeError: + clip.write_gif(filename, verbose=False) + + with open(filename, "rb") as f: + tensor_string = f.read() + + try: + os.remove(filename) + except OSError: + logger.warning("The temporary file used by moviepy cannot be deleted.") + + return Summary.Image( + height=h, width=w, colorspace=c, encoded_image_string=tensor_string + ) + + +def audio(tag, tensor, sample_rate=44100): + array = make_np(tensor) + array = array.squeeze() + if abs(array).max() > 1: + print("warning: audio amplitude out of range, auto clipped.") + array = array.clip(-1, 1) + assert array.ndim == 1, "input tensor should be 1 dimensional." + array = (array * np.iinfo(np.int16).max).astype(" 127: # weird, value > 127 breaks protobuf + num_thresholds = 127 + data = np.stack((tp, fp, tn, fn, precision, recall)) + pr_curve_plugin_data = PrCurvePluginData( + version=0, num_thresholds=num_thresholds + ).SerializeToString() + plugin_data = SummaryMetadata.PluginData( + plugin_name="pr_curves", content=pr_curve_plugin_data + ) + smd = SummaryMetadata(plugin_data=plugin_data) + tensor = TensorProto( + dtype="DT_FLOAT", + float_val=data.reshape(-1).tolist(), + tensor_shape=TensorShapeProto( + dim=[ + TensorShapeProto.Dim(size=data.shape[0]), + TensorShapeProto.Dim(size=data.shape[1]), + ] + ), + ) + return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)]) + + +def pr_curve(tag, labels, predictions, num_thresholds=127, weights=None): + # weird, value > 127 breaks protobuf + num_thresholds = min(num_thresholds, 127) + data = compute_curve( + labels, predictions, num_thresholds=num_thresholds, weights=weights + ) + pr_curve_plugin_data = PrCurvePluginData( + version=0, num_thresholds=num_thresholds + ).SerializeToString() + plugin_data = SummaryMetadata.PluginData( + plugin_name="pr_curves", content=pr_curve_plugin_data + ) + smd = SummaryMetadata(plugin_data=plugin_data) + tensor = TensorProto( + dtype="DT_FLOAT", + float_val=data.reshape(-1).tolist(), + tensor_shape=TensorShapeProto( + dim=[ + TensorShapeProto.Dim(size=data.shape[0]), + TensorShapeProto.Dim(size=data.shape[1]), + ] + ), + ) + return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)]) + + +# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py +def compute_curve(labels, predictions, num_thresholds=None, weights=None): + _MINIMUM_COUNT = 1e-7 + + if weights is None: + weights = 1.0 + + # Compute bins of true positives and false positives. + bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1))) + float_labels = labels.astype(np.float64) + histogram_range = (0, num_thresholds - 1) + tp_buckets, _ = np.histogram( + bucket_indices, + bins=num_thresholds, + range=histogram_range, + weights=float_labels * weights, + ) + fp_buckets, _ = np.histogram( + bucket_indices, + bins=num_thresholds, + range=histogram_range, + weights=(1.0 - float_labels) * weights, + ) + + # Obtain the reverse cumulative sum. + tp = np.cumsum(tp_buckets[::-1])[::-1] + fp = np.cumsum(fp_buckets[::-1])[::-1] + tn = fp[0] - fp + fn = tp[0] - tp + precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp) + recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn) + return np.stack((tp, fp, tn, fn, precision, recall)) + + +def _get_tensor_summary( + name, display_name, description, tensor, content_type, components, json_config +): + """Create a tensor summary with summary metadata. + + Args: + name: Uniquely identifiable name of the summary op. Could be replaced by + combination of name and type to make it unique even outside of this + summary. + display_name: Will be used as the display name in TensorBoard. + Defaults to `name`. + description: A longform readable description of the summary data. Markdown + is supported. + tensor: Tensor to display in summary. + content_type: Type of content inside the Tensor. + components: Bitmask representing present parts (vertices, colors, etc.) that + belong to the summary. + json_config: A string, JSON-serialized dictionary of ThreeJS classes + configuration. + + Returns: + Tensor summary with metadata. + """ + import torch + from tensorboard.plugins.mesh import metadata + + tensor = torch.as_tensor(tensor) + + tensor_metadata = metadata.create_summary_metadata( + name, + display_name, + content_type, + components, + tensor.shape, + description, + json_config=json_config, + ) + + tensor = TensorProto( + dtype="DT_FLOAT", + float_val=tensor.reshape(-1).tolist(), + tensor_shape=TensorShapeProto( + dim=[ + TensorShapeProto.Dim(size=tensor.shape[0]), + TensorShapeProto.Dim(size=tensor.shape[1]), + TensorShapeProto.Dim(size=tensor.shape[2]), + ] + ), + ) + + tensor_summary = Summary.Value( + tag=metadata.get_instance_name(name, content_type), + tensor=tensor, + metadata=tensor_metadata, + ) + + return tensor_summary + + +def _get_json_config(config_dict): + """Parse and returns JSON string from python dictionary.""" + json_config = "{}" + if config_dict is not None: + json_config = json.dumps(config_dict, sort_keys=True) + return json_config + + +# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/mesh/summary.py +def mesh( + tag, vertices, colors, faces, config_dict, display_name=None, description=None +): + """Output a merged `Summary` protocol buffer with a mesh/point cloud. + + Args: + tag: A name for this summary operation. + vertices: Tensor of shape `[dim_1, ..., dim_n, 3]` representing the 3D + coordinates of vertices. + faces: Tensor of shape `[dim_1, ..., dim_n, 3]` containing indices of + vertices within each triangle. + colors: Tensor of shape `[dim_1, ..., dim_n, 3]` containing colors for each + vertex. + display_name: If set, will be used as the display name in TensorBoard. + Defaults to `name`. + description: A longform readable description of the summary data. Markdown + is supported. + config_dict: Dictionary with ThreeJS classes names and configuration. + + Returns: + Merged summary for mesh/point cloud representation. + """ + from tensorboard.plugins.mesh import metadata + from tensorboard.plugins.mesh.plugin_data_pb2 import MeshPluginData + + json_config = _get_json_config(config_dict) + + summaries = [] + tensors = [ + (vertices, MeshPluginData.VERTEX), + (faces, MeshPluginData.FACE), + (colors, MeshPluginData.COLOR), + ] + tensors = [tensor for tensor in tensors if tensor[0] is not None] + components = metadata.get_components_bitmask( + [content_type for (tensor, content_type) in tensors] + ) + + for tensor, content_type in tensors: + summaries.append( + _get_tensor_summary( + tag, + display_name, + description, + tensor, + content_type, + components, + json_config, + ) + ) + + return Summary(value=summaries) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/writer.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/writer.py new file mode 100644 index 0000000000000000000000000000000000000000..e9a1e039040f3e1c50c8709632052206db87565b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/tensorboard/writer.py @@ -0,0 +1,1270 @@ +"""Provide an API for writing protocol buffers to event files to be consumed by TensorBoard for visualization.""" + +import os +import time +from typing import List, Optional, Union, TYPE_CHECKING + +import torch + +if TYPE_CHECKING: + from matplotlib.figure import Figure +from tensorboard.compat import tf +from tensorboard.compat.proto import event_pb2 +from tensorboard.compat.proto.event_pb2 import Event, SessionLog +from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig +from tensorboard.summary.writer.event_file_writer import EventFileWriter + +from ._convert_np import make_np +from ._embedding import get_embedding_info, make_mat, make_sprite, make_tsv, write_pbtxt +from ._onnx_graph import load_onnx_graph +from ._pytorch_graph import graph +from ._utils import figure_to_image +from .summary import ( + audio, + custom_scalars, + histogram, + histogram_raw, + hparams, + image, + image_boxes, + mesh, + pr_curve, + pr_curve_raw, + scalar, + tensor_proto, + text, + video, +) + +__all__ = ["FileWriter", "SummaryWriter"] + + +class FileWriter: + """Writes protocol buffers to event files to be consumed by TensorBoard. + + The `FileWriter` class provides a mechanism to create an event file in a + given directory and add summaries and events to it. The class updates the + file contents asynchronously. This allows a training program to call methods + to add data to the file directly from the training loop, without slowing down + training. + """ + + def __init__(self, log_dir, max_queue=10, flush_secs=120, filename_suffix=""): + """Create a `FileWriter` and an event file. + + On construction the writer creates a new event file in `log_dir`. + The other arguments to the constructor control the asynchronous writes to + the event file. + + Args: + log_dir: A string. Directory where event file will be written. + max_queue: Integer. Size of the queue for pending events and + summaries before one of the 'add' calls forces a flush to disk. + Default is ten items. + flush_secs: Number. How often, in seconds, to flush the + pending events and summaries to disk. Default is every two minutes. + filename_suffix: A string. Suffix added to all event filenames + in the log_dir directory. More details on filename construction in + tensorboard.summary.writer.event_file_writer.EventFileWriter. + """ + # Sometimes PosixPath is passed in and we need to coerce it to + # a string in all cases + # TODO: See if we can remove this in the future if we are + # actually the ones passing in a PosixPath + log_dir = str(log_dir) + self.event_writer = EventFileWriter( + log_dir, max_queue, flush_secs, filename_suffix + ) + + def get_logdir(self): + """Return the directory where event file will be written.""" + return self.event_writer.get_logdir() + + def add_event(self, event, step=None, walltime=None): + """Add an event to the event file. + + Args: + event: An `Event` protocol buffer. + step: Number. Optional global step value for training process + to record with the event. + walltime: float. Optional walltime to override the default (current) + walltime (from time.time()) seconds after epoch + """ + event.wall_time = time.time() if walltime is None else walltime + if step is not None: + # Make sure step is converted from numpy or other formats + # since protobuf might not convert depending on version + event.step = int(step) + self.event_writer.add_event(event) + + def add_summary(self, summary, global_step=None, walltime=None): + """Add a `Summary` protocol buffer to the event file. + + This method wraps the provided summary in an `Event` protocol buffer + and adds it to the event file. + + Args: + summary: A `Summary` protocol buffer. + global_step: Number. Optional global step value for training process + to record with the summary. + walltime: float. Optional walltime to override the default (current) + walltime (from time.time()) seconds after epoch + """ + event = event_pb2.Event(summary=summary) + self.add_event(event, global_step, walltime) + + def add_graph(self, graph_profile, walltime=None): + """Add a `Graph` and step stats protocol buffer to the event file. + + Args: + graph_profile: A `Graph` and step stats protocol buffer. + walltime: float. Optional walltime to override the default (current) + walltime (from time.time()) seconds after epoch + """ + graph = graph_profile[0] + stepstats = graph_profile[1] + event = event_pb2.Event(graph_def=graph.SerializeToString()) + self.add_event(event, None, walltime) + + trm = event_pb2.TaggedRunMetadata( + tag="step1", run_metadata=stepstats.SerializeToString() + ) + event = event_pb2.Event(tagged_run_metadata=trm) + self.add_event(event, None, walltime) + + def add_onnx_graph(self, graph, walltime=None): + """Add a `Graph` protocol buffer to the event file. + + Args: + graph: A `Graph` protocol buffer. + walltime: float. Optional walltime to override the default (current) + _get_file_writerfrom time.time()) + """ + event = event_pb2.Event(graph_def=graph.SerializeToString()) + self.add_event(event, None, walltime) + + def flush(self): + """Flushes the event file to disk. + + Call this method to make sure that all pending events have been written to + disk. + """ + self.event_writer.flush() + + def close(self): + """Flushes the event file to disk and close the file. + + Call this method when you do not need the summary writer anymore. + """ + self.event_writer.close() + + def reopen(self): + """Reopens the EventFileWriter. + + Can be called after `close()` to add more events in the same directory. + The events will go into a new events file. + Does nothing if the EventFileWriter was not closed. + """ + self.event_writer.reopen() + + +class SummaryWriter: + """Writes entries directly to event files in the log_dir to be consumed by TensorBoard. + + The `SummaryWriter` class provides a high-level API to create an event file + in a given directory and add summaries and events to it. The class updates the + file contents asynchronously. This allows a training program to call methods + to add data to the file directly from the training loop, without slowing down + training. + """ + + def __init__( + self, + log_dir=None, + comment="", + purge_step=None, + max_queue=10, + flush_secs=120, + filename_suffix="", + ): + """Create a `SummaryWriter` that will write out events and summaries to the event file. + + Args: + log_dir (str): Save directory location. Default is + runs/**CURRENT_DATETIME_HOSTNAME**, which changes after each run. + Use hierarchical folder structure to compare + between runs easily. e.g. pass in 'runs/exp1', 'runs/exp2', etc. + for each new experiment to compare across them. + comment (str): Comment log_dir suffix appended to the default + ``log_dir``. If ``log_dir`` is assigned, this argument has no effect. + purge_step (int): + When logging crashes at step :math:`T+X` and restarts at step :math:`T`, + any events whose global_step larger or equal to :math:`T` will be + purged and hidden from TensorBoard. + Note that crashed and resumed experiments should have the same ``log_dir``. + max_queue (int): Size of the queue for pending events and + summaries before one of the 'add' calls forces a flush to disk. + Default is ten items. + flush_secs (int): How often, in seconds, to flush the + pending events and summaries to disk. Default is every two minutes. + filename_suffix (str): Suffix added to all event filenames in + the log_dir directory. More details on filename construction in + tensorboard.summary.writer.event_file_writer.EventFileWriter. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + + # create a summary writer with automatically generated folder name. + writer = SummaryWriter() + # folder location: runs/May04_22-14-54_s-MacBook-Pro.local/ + + # create a summary writer using the specified folder name. + writer = SummaryWriter("my_experiment") + # folder location: my_experiment + + # create a summary writer with comment appended. + writer = SummaryWriter(comment="LR_0.1_BATCH_16") + # folder location: runs/May04_22-14-54_s-MacBook-Pro.localLR_0.1_BATCH_16/ + + """ + torch._C._log_api_usage_once("tensorboard.create.summarywriter") + if not log_dir: + import socket + from datetime import datetime + + current_time = datetime.now().strftime("%b%d_%H-%M-%S") + log_dir = os.path.join( + "runs", current_time + "_" + socket.gethostname() + comment + ) + self.log_dir = log_dir + self.purge_step = purge_step + self.max_queue = max_queue + self.flush_secs = flush_secs + self.filename_suffix = filename_suffix + + # Initialize the file writers, but they can be cleared out on close + # and recreated later as needed. + self.file_writer = self.all_writers = None + self._get_file_writer() + + # Create default bins for histograms, see generate_testdata.py in tensorflow/tensorboard + v = 1e-12 + buckets = [] + neg_buckets = [] + while v < 1e20: + buckets.append(v) + neg_buckets.append(-v) + v *= 1.1 + self.default_bins = neg_buckets[::-1] + [0] + buckets + + def _check_caffe2_blob(self, item): + """ + Check if the input is a string representing a Caffe2 blob name. + + Caffe2 users have the option of passing a string representing the name of a blob + in the workspace instead of passing the actual Tensor/array containing the numeric values. + Thus, we need to check if we received a string as input + instead of an actual Tensor/array, and if so, we need to fetch the Blob + from the workspace corresponding to that name. Fetching can be done with the + following: + + from caffe2.python import workspace (if not already imported) + workspace.FetchBlob(blob_name) + workspace.FetchBlobs([blob_name1, blob_name2, ...]) + """ + return isinstance(item, str) + + def _get_file_writer(self): + """Return the default FileWriter instance. Recreates it if closed.""" + if self.all_writers is None or self.file_writer is None: + self.file_writer = FileWriter( + self.log_dir, self.max_queue, self.flush_secs, self.filename_suffix + ) + self.all_writers = {self.file_writer.get_logdir(): self.file_writer} + if self.purge_step is not None: + most_recent_step = self.purge_step + self.file_writer.add_event( + Event(step=most_recent_step, file_version="brain.Event:2") + ) + self.file_writer.add_event( + Event( + step=most_recent_step, + session_log=SessionLog(status=SessionLog.START), + ) + ) + self.purge_step = None + return self.file_writer + + def get_logdir(self): + """Return the directory where event files will be written.""" + return self.log_dir + + def add_hparams( + self, hparam_dict, metric_dict, hparam_domain_discrete=None, run_name=None, global_step=None + ): + """Add a set of hyperparameters to be compared in TensorBoard. + + Args: + hparam_dict (dict): Each key-value pair in the dictionary is the + name of the hyper parameter and it's corresponding value. + The type of the value can be one of `bool`, `string`, `float`, + `int`, or `None`. + metric_dict (dict): Each key-value pair in the dictionary is the + name of the metric and it's corresponding value. Note that the key used + here should be unique in the tensorboard record. Otherwise the value + you added by ``add_scalar`` will be displayed in hparam plugin. In most + cases, this is unwanted. + hparam_domain_discrete: (Optional[Dict[str, List[Any]]]) A dictionary that + contains names of the hyperparameters and all discrete values they can hold + run_name (str): Name of the run, to be included as part of the logdir. + If unspecified, will use current timestamp. + global_step (int): Global step value to record + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + with SummaryWriter() as w: + for i in range(5): + w.add_hparams({'lr': 0.1*i, 'bsize': i}, + {'hparam/accuracy': 10*i, 'hparam/loss': 10*i}) + + Expected result: + + .. image:: _static/img/tensorboard/add_hparam.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_hparams") + if type(hparam_dict) is not dict or type(metric_dict) is not dict: + raise TypeError("hparam_dict and metric_dict should be dictionary.") + exp, ssi, sei = hparams(hparam_dict, metric_dict, hparam_domain_discrete) + + if not run_name: + run_name = str(time.time()) + logdir = os.path.join(self._get_file_writer().get_logdir(), run_name) + with SummaryWriter(log_dir=logdir) as w_hp: + w_hp.file_writer.add_summary(exp, global_step) + w_hp.file_writer.add_summary(ssi, global_step) + w_hp.file_writer.add_summary(sei, global_step) + for k, v in metric_dict.items(): + w_hp.add_scalar(k, v, global_step) + + def add_scalar( + self, + tag, + scalar_value, + global_step=None, + walltime=None, + new_style=False, + double_precision=False, + ): + """Add scalar data to summary. + + Args: + tag (str): Data identifier + scalar_value (float or string/blobname): Value to save + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + with seconds after epoch of event + new_style (boolean): Whether to use new style (tensor field) or old + style (simple_value field). New style could lead to faster data loading. + Examples:: + + from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() + x = range(100) + for i in x: + writer.add_scalar('y=2x', i * 2, i) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_scalar.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_scalar") + if self._check_caffe2_blob(scalar_value): + from caffe2.python import workspace + + scalar_value = workspace.FetchBlob(scalar_value) + + summary = scalar( + tag, scalar_value, new_style=new_style, double_precision=double_precision + ) + self._get_file_writer().add_summary(summary, global_step, walltime) + + def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, walltime=None): + """Add many scalar data to summary. + + Args: + main_tag (str): The parent name for the tags + tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() + r = 5 + for i in range(100): + writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/r), + 'xcosx':i*np.cos(i/r), + 'tanx': np.tan(i/r)}, i) + writer.close() + # This call adds three values to the same scalar plot with the tag + # 'run_14h' in TensorBoard's scalar section. + + Expected result: + + .. image:: _static/img/tensorboard/add_scalars.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_scalars") + walltime = time.time() if walltime is None else walltime + fw_logdir = self._get_file_writer().get_logdir() + for tag, scalar_value in tag_scalar_dict.items(): + fw_tag = fw_logdir + "/" + main_tag.replace("/", "_") + "_" + tag + assert self.all_writers is not None + if fw_tag in self.all_writers.keys(): + fw = self.all_writers[fw_tag] + else: + fw = FileWriter( + fw_tag, self.max_queue, self.flush_secs, self.filename_suffix + ) + self.all_writers[fw_tag] = fw + if self._check_caffe2_blob(scalar_value): + from caffe2.python import workspace + + scalar_value = workspace.FetchBlob(scalar_value) + fw.add_summary(scalar(main_tag, scalar_value), global_step, walltime) + + def add_tensor( + self, + tag, + tensor, + global_step=None, + walltime=None, + ): + """Add tensor data to summary. + + Args: + tag (str): Data identifier + tensor (torch.Tensor): tensor to save + global_step (int): Global step value to record + Examples:: + + from torch.utils.tensorboard import SummaryWriter + writer = SummaryWriter() + x = torch.tensor([1,2,3]) + writer.add_scalar('x', x) + writer.close() + + Expected result: + Summary::tensor::float_val [1,2,3] + ::tensor::shape [3] + ::tag 'x' + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_tensor") + if self._check_caffe2_blob(tensor): + from caffe2.python import workspace + + tensor = torch.tensor(workspace.FetchBlob(tensor)) + + summary = tensor_proto(tag, tensor) + self._get_file_writer().add_summary(summary, global_step, walltime) + + def add_histogram( + self, + tag, + values, + global_step=None, + bins="tensorflow", + walltime=None, + max_bins=None, + ): + """Add histogram to summary. + + Args: + tag (str): Data identifier + values (torch.Tensor, numpy.ndarray, or string/blobname): Values to build histogram + global_step (int): Global step value to record + bins (str): One of {'tensorflow','auto', 'fd', ...}. This determines how the bins are made. You can find + other options in: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + writer = SummaryWriter() + for i in range(10): + x = np.random.random(1000) + writer.add_histogram('distribution centers', x + i, i) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_histogram.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_histogram") + if self._check_caffe2_blob(values): + from caffe2.python import workspace + + values = workspace.FetchBlob(values) + if isinstance(bins, str) and bins == "tensorflow": + bins = self.default_bins + self._get_file_writer().add_summary( + histogram(tag, values, bins, max_bins=max_bins), global_step, walltime + ) + + def add_histogram_raw( + self, + tag, + min, + max, + num, + sum, + sum_squares, + bucket_limits, + bucket_counts, + global_step=None, + walltime=None, + ): + """Add histogram with raw data. + + Args: + tag (str): Data identifier + min (float or int): Min value + max (float or int): Max value + num (int): Number of values + sum (float or int): Sum of all values + sum_squares (float or int): Sum of squares for all values + bucket_limits (torch.Tensor, numpy.ndarray): Upper value per bucket. + The number of elements of it should be the same as `bucket_counts`. + bucket_counts (torch.Tensor, numpy.ndarray): Number of values per bucket + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/histogram/README.md + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + writer = SummaryWriter() + dummy_data = [] + for idx, value in enumerate(range(50)): + dummy_data += [idx + 0.001] * value + + bins = list(range(50+2)) + bins = np.array(bins) + values = np.array(dummy_data).astype(float).reshape(-1) + counts, limits = np.histogram(values, bins=bins) + sum_sq = values.dot(values) + writer.add_histogram_raw( + tag='histogram_with_raw_data', + min=values.min(), + max=values.max(), + num=len(values), + sum=values.sum(), + sum_squares=sum_sq, + bucket_limits=limits[1:].tolist(), + bucket_counts=counts.tolist(), + global_step=0) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_histogram_raw.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_histogram_raw") + if len(bucket_limits) != len(bucket_counts): + raise ValueError( + "len(bucket_limits) != len(bucket_counts), see the document." + ) + self._get_file_writer().add_summary( + histogram_raw( + tag, min, max, num, sum, sum_squares, bucket_limits, bucket_counts + ), + global_step, + walltime, + ) + + def add_image( + self, tag, img_tensor, global_step=None, walltime=None, dataformats="CHW" + ): + """Add image data to summary. + + Note that this requires the ``pillow`` package. + + Args: + tag (str): Data identifier + img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + dataformats (str): Image data format specification of the form + CHW, HWC, HW, WH, etc. + Shape: + img_tensor: Default is :math:`(3, H, W)`. You can use ``torchvision.utils.make_grid()`` to + convert a batch of tensor into 3xHxW format or call ``add_images`` and let us do the job. + Tensor with :math:`(1, H, W)`, :math:`(H, W)`, :math:`(H, W, 3)` is also suitable as long as + corresponding ``dataformats`` argument is passed, e.g. ``CHW``, ``HWC``, ``HW``. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + img = np.zeros((3, 100, 100)) + img[0] = np.arange(0, 10000).reshape(100, 100) / 10000 + img[1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 + + img_HWC = np.zeros((100, 100, 3)) + img_HWC[:, :, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 + img_HWC[:, :, 1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 + + writer = SummaryWriter() + writer.add_image('my_image', img, 0) + + # If you have non-default dimension setting, set the dataformats argument. + writer.add_image('my_image_HWC', img_HWC, 0, dataformats='HWC') + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_image.png + :scale: 50 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_image") + if self._check_caffe2_blob(img_tensor): + from caffe2.python import workspace + + img_tensor = workspace.FetchBlob(img_tensor) + self._get_file_writer().add_summary( + image(tag, img_tensor, dataformats=dataformats), global_step, walltime + ) + + def add_images( + self, tag, img_tensor, global_step=None, walltime=None, dataformats="NCHW" + ): + """Add batched image data to summary. + + Note that this requires the ``pillow`` package. + + Args: + tag (str): Data identifier + img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + dataformats (str): Image data format specification of the form + NCHW, NHWC, CHW, HWC, HW, WH, etc. + Shape: + img_tensor: Default is :math:`(N, 3, H, W)`. If ``dataformats`` is specified, other shape will be + accepted. e.g. NCHW or NHWC. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + + img_batch = np.zeros((16, 3, 100, 100)) + for i in range(16): + img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i + img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100) / 10000) / 16 * i + + writer = SummaryWriter() + writer.add_images('my_image_batch', img_batch, 0) + writer.close() + + Expected result: + + .. image:: _static/img/tensorboard/add_images.png + :scale: 30 % + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_images") + if self._check_caffe2_blob(img_tensor): + from caffe2.python import workspace + + img_tensor = workspace.FetchBlob(img_tensor) + self._get_file_writer().add_summary( + image(tag, img_tensor, dataformats=dataformats), global_step, walltime + ) + + def add_image_with_boxes( + self, + tag, + img_tensor, + box_tensor, + global_step=None, + walltime=None, + rescale=1, + dataformats="CHW", + labels=None, + ): + """Add image and draw bounding boxes on the image. + + Args: + tag (str): Data identifier + img_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Image data + box_tensor (torch.Tensor, numpy.ndarray, or string/blobname): Box data (for detected objects) + box should be represented as [x1, y1, x2, y2]. + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + rescale (float): Optional scale override + dataformats (str): Image data format specification of the form + NCHW, NHWC, CHW, HWC, HW, WH, etc. + labels (list of string): The label to be shown for each bounding box. + Shape: + img_tensor: Default is :math:`(3, H, W)`. It can be specified with ``dataformats`` argument. + e.g. CHW or HWC + + box_tensor: (torch.Tensor, numpy.ndarray, or string/blobname): NX4, where N is the number of + boxes and each 4 elements in a row represents (xmin, ymin, xmax, ymax). + """ + torch._C._log_api_usage_once("tensorboard.logging.add_image_with_boxes") + if self._check_caffe2_blob(img_tensor): + from caffe2.python import workspace + + img_tensor = workspace.FetchBlob(img_tensor) + if self._check_caffe2_blob(box_tensor): + from caffe2.python import workspace + + box_tensor = workspace.FetchBlob(box_tensor) + if labels is not None: + if isinstance(labels, str): + labels = [labels] + if len(labels) != box_tensor.shape[0]: + labels = None + self._get_file_writer().add_summary( + image_boxes( + tag, + img_tensor, + box_tensor, + rescale=rescale, + dataformats=dataformats, + labels=labels, + ), + global_step, + walltime, + ) + + def add_figure( + self, + tag: str, + figure: Union["Figure", List["Figure"]], + global_step: Optional[int] = None, + close: bool = True, + walltime: Optional[float] = None + ) -> None: + """Render matplotlib figure into an image and add it to summary. + + Note that this requires the ``matplotlib`` package. + + Args: + tag: Data identifier + figure: Figure or a list of figures + global_step: Global step value to record + close: Flag to automatically close the figure + walltime: Optional override default walltime (time.time()) + seconds after epoch of event + """ + torch._C._log_api_usage_once("tensorboard.logging.add_figure") + if isinstance(figure, list): + self.add_image( + tag, + figure_to_image(figure, close), + global_step, + walltime, + dataformats="NCHW", + ) + else: + self.add_image( + tag, + figure_to_image(figure, close), + global_step, + walltime, + dataformats="CHW", + ) + + def add_video(self, tag, vid_tensor, global_step=None, fps=4, walltime=None): + """Add video data to summary. + + Note that this requires the ``moviepy`` package. + + Args: + tag (str): Data identifier + vid_tensor (torch.Tensor): Video data + global_step (int): Global step value to record + fps (float or int): Frames per second + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Shape: + vid_tensor: :math:`(N, T, C, H, W)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`. + """ + torch._C._log_api_usage_once("tensorboard.logging.add_video") + self._get_file_writer().add_summary( + video(tag, vid_tensor, fps), global_step, walltime + ) + + def add_audio( + self, tag, snd_tensor, global_step=None, sample_rate=44100, walltime=None + ): + """Add audio data to summary. + + Args: + tag (str): Data identifier + snd_tensor (torch.Tensor): Sound data + global_step (int): Global step value to record + sample_rate (int): sample rate in Hz + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Shape: + snd_tensor: :math:`(1, L)`. The values should lie between [-1, 1]. + """ + torch._C._log_api_usage_once("tensorboard.logging.add_audio") + if self._check_caffe2_blob(snd_tensor): + from caffe2.python import workspace + + snd_tensor = workspace.FetchBlob(snd_tensor) + self._get_file_writer().add_summary( + audio(tag, snd_tensor, sample_rate=sample_rate), global_step, walltime + ) + + def add_text(self, tag, text_string, global_step=None, walltime=None): + """Add text data to summary. + + Args: + tag (str): Data identifier + text_string (str): String to save + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + Examples:: + + writer.add_text('lstm', 'This is an lstm', 0) + writer.add_text('rnn', 'This is an rnn', 10) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_text") + self._get_file_writer().add_summary( + text(tag, text_string), global_step, walltime + ) + + def add_onnx_graph(self, prototxt): + torch._C._log_api_usage_once("tensorboard.logging.add_onnx_graph") + self._get_file_writer().add_onnx_graph(load_onnx_graph(prototxt)) + + def add_graph( + self, model, input_to_model=None, verbose=False, use_strict_trace=True + ): + """Add graph data to summary. + + Args: + model (torch.nn.Module): Model to draw. + input_to_model (torch.Tensor or list of torch.Tensor): A variable or a tuple of + variables to be fed. + verbose (bool): Whether to print graph structure in console. + use_strict_trace (bool): Whether to pass keyword argument `strict` to + `torch.jit.trace`. Pass False when you want the tracer to + record your mutable container types (list, dict) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_graph") + if hasattr(model, "forward"): + # A valid PyTorch model should have a 'forward' method + self._get_file_writer().add_graph( + graph(model, input_to_model, verbose, use_strict_trace) + ) + else: + # Caffe2 models do not have the 'forward' method + from caffe2.proto import caffe2_pb2 + from caffe2.python import core + + from ._caffe2_graph import ( + model_to_graph_def, + nets_to_graph_def, + protos_to_graph_def, + ) + + if isinstance(model, list): + if isinstance(model[0], core.Net): + current_graph = nets_to_graph_def(model) + elif isinstance(model[0], caffe2_pb2.NetDef): + current_graph = protos_to_graph_def(model) + else: + # Handles cnn.CNNModelHelper, model_helper.ModelHelper + current_graph = model_to_graph_def(model) + event = event_pb2.Event(graph_def=current_graph.SerializeToString()) # type: ignore[possibly-undefined] + self._get_file_writer().add_event(event) + + @staticmethod + def _encode(rawstr): + # I'd use urllib but, I'm unsure about the differences from python3 to python2, etc. + retval = rawstr + retval = retval.replace("%", f"%{ord('%'):02x}") + retval = retval.replace("/", f"%{ord('/'):02x}") + retval = retval.replace("\\", "%%%02x" % (ord("\\"))) + return retval + + def add_embedding( + self, + mat, + metadata=None, + label_img=None, + global_step=None, + tag="default", + metadata_header=None, + ): + """Add embedding projector data to summary. + + Args: + mat (torch.Tensor or numpy.ndarray): A matrix which each row is the feature vector of the data point + metadata (list): A list of labels, each element will be convert to string + label_img (torch.Tensor): Images correspond to each data point + global_step (int): Global step value to record + tag (str): Name for the embedding + Shape: + mat: :math:`(N, D)`, where N is number of data and D is feature dimension + + label_img: :math:`(N, C, H, W)` + + Examples:: + + import keyword + import torch + meta = [] + while len(meta)<100: + meta = meta+keyword.kwlist # get some strings + meta = meta[:100] + + for i, v in enumerate(meta): + meta[i] = v+str(i) + + label_img = torch.rand(100, 3, 10, 32) + for i in range(100): + label_img[i]*=i/100.0 + + writer.add_embedding(torch.randn(100, 5), metadata=meta, label_img=label_img) + writer.add_embedding(torch.randn(100, 5), label_img=label_img) + writer.add_embedding(torch.randn(100, 5), metadata=meta) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_embedding") + mat = make_np(mat) + if global_step is None: + global_step = 0 + # clear pbtxt? + + # Maybe we should encode the tag so slashes don't trip us up? + # I don't think this will mess us up, but better safe than sorry. + subdir = f"{str(global_step).zfill(5)}/{self._encode(tag)}" + save_path = os.path.join(self._get_file_writer().get_logdir(), subdir) + + fs = tf.io.gfile + if fs.exists(save_path): + if fs.isdir(save_path): + print( + "warning: Embedding dir exists, did you set global_step for add_embedding()?" + ) + else: + raise Exception( + f"Path: `{save_path}` exists, but is a file. Cannot proceed." + ) + else: + fs.makedirs(save_path) + + if metadata is not None: + assert mat.shape[0] == len( + metadata + ), "#labels should equal with #data points" + make_tsv(metadata, save_path, metadata_header=metadata_header) + + if label_img is not None: + assert ( + mat.shape[0] == label_img.shape[0] + ), "#images should equal with #data points" + make_sprite(label_img, save_path) + + assert ( + mat.ndim == 2 + ), "mat should be 2D, where mat.size(0) is the number of data points" + make_mat(mat, save_path) + + # Filesystem doesn't necessarily have append semantics, so we store an + # internal buffer to append to and re-write whole file after each + # embedding is added + if not hasattr(self, "_projector_config"): + self._projector_config = ProjectorConfig() + embedding_info = get_embedding_info( + metadata, label_img, subdir, global_step, tag + ) + self._projector_config.embeddings.extend([embedding_info]) + + from google.protobuf import text_format + + config_pbtxt = text_format.MessageToString(self._projector_config) + write_pbtxt(self._get_file_writer().get_logdir(), config_pbtxt) + + def add_pr_curve( + self, + tag, + labels, + predictions, + global_step=None, + num_thresholds=127, + weights=None, + walltime=None, + ): + """Add precision recall curve. + + Plotting a precision-recall curve lets you understand your model's + performance under different threshold settings. With this function, + you provide the ground truth labeling (T/F) and prediction confidence + (usually the output of your model) for each target. The TensorBoard UI + will let you choose the threshold interactively. + + Args: + tag (str): Data identifier + labels (torch.Tensor, numpy.ndarray, or string/blobname): + Ground truth data. Binary label for each element. + predictions (torch.Tensor, numpy.ndarray, or string/blobname): + The probability that an element be classified as true. + Value should be in [0, 1] + global_step (int): Global step value to record + num_thresholds (int): Number of thresholds used to draw the curve. + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + import numpy as np + labels = np.random.randint(2, size=100) # binary label + predictions = np.random.rand(100) + writer = SummaryWriter() + writer.add_pr_curve('pr_curve', labels, predictions, 0) + writer.close() + + """ + torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve") + labels, predictions = make_np(labels), make_np(predictions) + self._get_file_writer().add_summary( + pr_curve(tag, labels, predictions, num_thresholds, weights), + global_step, + walltime, + ) + + def add_pr_curve_raw( + self, + tag, + true_positive_counts, + false_positive_counts, + true_negative_counts, + false_negative_counts, + precision, + recall, + global_step=None, + num_thresholds=127, + weights=None, + walltime=None, + ): + """Add precision recall curve with raw data. + + Args: + tag (str): Data identifier + true_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): true positive counts + false_positive_counts (torch.Tensor, numpy.ndarray, or string/blobname): false positive counts + true_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): true negative counts + false_negative_counts (torch.Tensor, numpy.ndarray, or string/blobname): false negative counts + precision (torch.Tensor, numpy.ndarray, or string/blobname): precision + recall (torch.Tensor, numpy.ndarray, or string/blobname): recall + global_step (int): Global step value to record + num_thresholds (int): Number of thresholds used to draw the curve. + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/README.md + """ + torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve_raw") + self._get_file_writer().add_summary( + pr_curve_raw( + tag, + true_positive_counts, + false_positive_counts, + true_negative_counts, + false_negative_counts, + precision, + recall, + num_thresholds, + weights, + ), + global_step, + walltime, + ) + + def add_custom_scalars_multilinechart( + self, tags, category="default", title="untitled" + ): + """Shorthand for creating multilinechart. Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*. + + Args: + tags (list): list of tags that have been used in ``add_scalar()`` + + Examples:: + + writer.add_custom_scalars_multilinechart(['twse/0050', 'twse/2330']) + """ + torch._C._log_api_usage_once( + "tensorboard.logging.add_custom_scalars_multilinechart" + ) + layout = {category: {title: ["Multiline", tags]}} + self._get_file_writer().add_summary(custom_scalars(layout)) + + def add_custom_scalars_marginchart( + self, tags, category="default", title="untitled" + ): + """Shorthand for creating marginchart. + + Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*, + which should have exactly 3 elements. + + Args: + tags (list): list of tags that have been used in ``add_scalar()`` + + Examples:: + + writer.add_custom_scalars_marginchart(['twse/0050', 'twse/2330', 'twse/2006']) + """ + torch._C._log_api_usage_once( + "tensorboard.logging.add_custom_scalars_marginchart" + ) + assert len(tags) == 3 + layout = {category: {title: ["Margin", tags]}} + self._get_file_writer().add_summary(custom_scalars(layout)) + + def add_custom_scalars(self, layout): + """Create special chart by collecting charts tags in 'scalars'. + + NOTE: This function can only be called once for each SummaryWriter() object. + + Because it only provides metadata to tensorboard, the function can be called before or after the training loop. + + Args: + layout (dict): {categoryName: *charts*}, where *charts* is also a dictionary + {chartName: *ListOfProperties*}. The first element in *ListOfProperties* is the chart's type + (one of **Multiline** or **Margin**) and the second element should be a list containing the tags + you have used in add_scalar function, which will be collected into the new chart. + + Examples:: + + layout = {'Taiwan':{'twse':['Multiline',['twse/0050', 'twse/2330']]}, + 'USA':{ 'dow':['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']], + 'nasdaq':['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}} + + writer.add_custom_scalars(layout) + """ + torch._C._log_api_usage_once("tensorboard.logging.add_custom_scalars") + self._get_file_writer().add_summary(custom_scalars(layout)) + + def add_mesh( + self, + tag, + vertices, + colors=None, + faces=None, + config_dict=None, + global_step=None, + walltime=None, + ): + """Add meshes or 3D point clouds to TensorBoard. + + The visualization is based on Three.js, + so it allows users to interact with the rendered object. Besides the basic definitions + such as vertices, faces, users can further provide camera parameter, lighting condition, etc. + Please check https://threejs.org/docs/index.html#manual/en/introduction/Creating-a-scene for + advanced usage. + + Args: + tag (str): Data identifier + vertices (torch.Tensor): List of the 3D coordinates of vertices. + colors (torch.Tensor): Colors for each vertex + faces (torch.Tensor): Indices of vertices within each triangle. (Optional) + config_dict: Dictionary with ThreeJS classes names and configuration. + global_step (int): Global step value to record + walltime (float): Optional override default walltime (time.time()) + seconds after epoch of event + + Shape: + vertices: :math:`(B, N, 3)`. (batch, number_of_vertices, channels) + + colors: :math:`(B, N, 3)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`. + + faces: :math:`(B, N, 3)`. The values should lie in [0, number_of_vertices] for type `uint8`. + + Examples:: + + from torch.utils.tensorboard import SummaryWriter + vertices_tensor = torch.as_tensor([ + [1, 1, 1], + [-1, -1, 1], + [1, -1, -1], + [-1, 1, -1], + ], dtype=torch.float).unsqueeze(0) + colors_tensor = torch.as_tensor([ + [255, 0, 0], + [0, 255, 0], + [0, 0, 255], + [255, 0, 255], + ], dtype=torch.int).unsqueeze(0) + faces_tensor = torch.as_tensor([ + [0, 2, 3], + [0, 3, 1], + [0, 1, 2], + [1, 3, 2], + ], dtype=torch.int).unsqueeze(0) + + writer = SummaryWriter() + writer.add_mesh('my_mesh', vertices=vertices_tensor, colors=colors_tensor, faces=faces_tensor) + + writer.close() + """ + torch._C._log_api_usage_once("tensorboard.logging.add_mesh") + self._get_file_writer().add_summary( + mesh(tag, vertices, colors, faces, config_dict), global_step, walltime + ) + + def flush(self): + """Flushes the event file to disk. + + Call this method to make sure that all pending events have been written to + disk. + """ + if self.all_writers is None: + return + for writer in self.all_writers.values(): + writer.flush() + + def close(self): + if self.all_writers is None: + return # ignore double close + for writer in self.all_writers.values(): + writer.flush() + writer.close() + self.file_writer = self.all_writers = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/viz/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/viz/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/viz/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/viz/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2db08c2d7f3173f4c7026454f5a5f6e58526c76d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/viz/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/viz/__pycache__/_cycles.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/viz/__pycache__/_cycles.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbdf0ffa89fd2f21265104c253e81504ac7c92a9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/viz/__pycache__/_cycles.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/viz/_cycles.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/viz/_cycles.py new file mode 100644 index 0000000000000000000000000000000000000000..f17348e401c34a74d60337eb21009aebea278dd4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/viz/_cycles.py @@ -0,0 +1,447 @@ +import gc +import sys +from typing import Any, Dict, List, NamedTuple, Optional, Tuple +import types +import weakref +import json +from tempfile import NamedTemporaryFile +import torch +from torch.cuda._memory_viz import _frames_fmt, _block_extra +import atexit +import logging +logger = logging.getLogger(__name__) + +def observe_garbage(observer): + enabled = True + + def disable(): + # when GC runs during exit, things like `sys` will already be unloaded + # so we have to disable the callback to avoid hitting errors. + nonlocal enabled + enabled = False + atexit.register(disable) + + def gc_callback(phase, info): + nonlocal enabled + if not enabled: + return + if phase == "start": + gc.set_debug(gc.DEBUG_SAVEALL) + elif phase == "stop": + orig_trace = sys.getprofile() + self_return = [False] + + def do_collect(*args, **kwargs): + nonlocal enabled + if not self_return[0]: + self_return[0] = True + else: + sys.setprofile(orig_trace) + enabled = False + try: + # things in gc.garbage have survived a collection + # so to free them we have to collect a generation greater than them + # but that might _also_ free other stuff and we don't want to miss + # that stuff. So we have to now force gc at the highest level here, + # report all of what we found, _then_ we can free it up. + if info['generation'] != 2: + gc.collect() + observer(gc.garbage) + gc.garbage.clear() + # we have to re-run GC to clean up the cycles + # we saved from before. + gc.set_debug(0) + before = torch.cuda.memory_allocated() + gc.collect() + after = torch.cuda.memory_allocated() + if before != after: + logger.warning("CUDA Memory changed during GC, %d bytes freed.", before - after) + finally: + enabled = True + if orig_trace is not None: + return orig_trace(*args, **kwargs) + sys.setprofile(do_collect) + + gc.callbacks.append(gc_callback) + + # provide a way to disarm the callback + def remove(): + gc.callbacks.remove(gc_callback) + return remove + +# Function to visualize cycles adapated from refcycle: +# Copyright 2013 Mark Dickinson +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +def _get_cell_type(): + def f(x=None): + return lambda: x + return type(f().__closure__[0]) + +CellType = _get_cell_type() + +def annotated_references(obj): + """ + Return known information about references held by the given object. + + Returns a mapping from referents to lists of descriptions. Note that there + may be more than one edge leading to any particular referent; hence the + need for a list. Descriptions are currently strings. + + """ + references: Dict[int, List[str]] = {} + + def add_reference(name, obj): + references.setdefault(id(obj), []).append(name) + + def add_attrs(*attrs): + for attr in attrs: + if hasattr(obj, attr): + add_reference(attr, getattr(obj, attr)) + + def add_cell_references(): + try: + add_attrs("cell_contents") + except ValueError: + # if cell_contents is empty, + # accessing it raises ValueError + # in this case there is no object to + # annotate + pass + + def add_function_references(): + add_attrs("__defaults__", + "__closure__", + "__globals__", + "__code__", + "__name__", + "__module__", + "__doc__" + "__qualname__", + "__annotations__", + "__kwdefaults__") + + + def add_sequence_references(): + for position, item in enumerate(obj): + add_reference(f"[{position}]", item) + + def add_dict_references(): + for key, value in obj.items(): + add_reference("key", key) + add_reference(f"[{repr(key)}]", value) + + def add_set_references(): + for elt in obj: + add_reference("element", elt) + + def add_bound_method_references(): + add_attrs("__self__", "__func__", "im_class") + + def add_weakref_references(): + # For subclasses of weakref, we can't reliably distinguish the + # callback (if any) from other attributes. + if type(obj) is weakref.ref: + referents = gc.get_referents(obj) + if len(referents) == 1: + target = referents[0] + add_reference("__callback__", target) + + + def add_frame_references(): + f_locals = obj.f_locals + add_attrs("f_back", "f_code", "f_builtins", "f_globals", "f_trace", "f_locals") + # Some badly-behaved code replaces the f_locals dict with + # something that doesn't support the full dict interface. So we + # only continue with the annotation if f_locals is a Python dict. + if type(f_locals) is dict: + for name, local in obj.f_locals.items(): + add_reference(f"local {name}", local) + + def add_getset_descriptor_references(): + add_attrs("__objclass__", "__name__", "__doc__") + + type_based_references = { + tuple: add_sequence_references, + list: add_sequence_references, + dict: add_dict_references, + set: add_set_references, + frozenset: add_set_references, + types.FunctionType: add_function_references, + types.FrameType: add_frame_references, + CellType: add_cell_references, + types.MethodType: add_bound_method_references, + weakref.ref: add_weakref_references, + types.GetSetDescriptorType: add_getset_descriptor_references, + } + + for type_ in type(obj).__mro__: + if type_ in type_based_references: + type_based_references[type_]() + + add_attrs("__dict__", "__class__") + if isinstance(obj, type): + add_attrs("__mro__") + + return references + +############################################################################### +# Object annotations. + + +BASE_TYPES = (int, float, complex, type(None), str, bytes) +FRAME_FILENAME_LIMIT = 32 + +def object_annotation(obj): + """ + Return a string to be used for Graphviz nodes. + + The string should be short but as informative as possible. + """ + + def format_sequence(obj): + body = ','.join(repr(x) if isinstance(x, BASE_TYPES) else type(x).__name__ for i, x in zip(range(8), obj)) + if len(obj) > 8: + body = f'{body}, ...{len(obj) - 8}' + return body + + # For basic types, use the repr. + if isinstance(obj, BASE_TYPES): + return repr(obj) + if type(obj).__name__ == 'function': + return f"function\n{obj.__name__}" + elif isinstance(obj, types.MethodType): + try: + func_name = obj.__func__.__qualname__ + except AttributeError: + func_name = "" + return f"instancemethod\n{func_name}" + elif isinstance(obj, list): + return f"[{format_sequence(obj)}]" + elif isinstance(obj, tuple): + return f"({format_sequence(obj)})" + elif isinstance(obj, dict): + return f"dict[{len(obj)}]" + elif isinstance(obj, types.ModuleType): + return f"module\n{obj.__name__}" + elif isinstance(obj, type): + return f"type\n{obj.__name__}" + elif isinstance(obj, weakref.ref): + referent = obj() + if referent is None: + return "weakref (dead referent)" + else: + return f"weakref to id 0x{id(referent):x}" + elif isinstance(obj, types.FrameType): + filename = obj.f_code.co_filename + if len(filename) > FRAME_FILENAME_LIMIT: + filename = "..." + filename[-(FRAME_FILENAME_LIMIT - 3):] + return f"frame\n{filename}:{obj.f_lineno}" + else: + return f"object\n{type(obj).__module__}.{type(obj).__name__}" + + + +class Node(NamedTuple): + label: str + context: Optional[str] + root: bool + referrents: List[Tuple[str, int]] + +def create_graph(objects, *, context=None, filter=None): + if context is None: + context = cuda_allocation_context() + if filter is None: + filter = is_cuda_tensor + + nodes = [Node(object_annotation(obj), context(obj), filter(obj), []) for obj in objects] + node_referrers: List[List[int]] = [[] for obj in objects] + + id_to_node = {id(obj): i for i, obj in enumerate(objects)} + for obj in objects: + fidx = id_to_node[id(obj)] + f = nodes[fidx] + references = annotated_references(obj) + for referrent in gc.get_referents(obj): + rid = id(referrent) + tidx = id_to_node.get(rid, None) + if tidx is None: + continue + t = nodes[tidx] + labels = references.get(rid, ["?"]) + node_referrers[tidx].append(fidx) + for label in labels: + f.referrents.append((label, tidx)) + + to_search = [i for i, n in enumerate(nodes) if n.root] + to_keep = set() + while to_search: + idx = to_search.pop() + if idx in to_keep: + continue + to_keep.add(idx) + referrers = node_referrers[idx] + to_search.extend(referrers) + id_to_filtered_id: Dict[int, int] = {} + filtered: List[Any] = [] + for i, n in enumerate(nodes): + if i in to_keep: + id_to_filtered_id[i] = len(id_to_filtered_id) + filtered.append(n) + for n in filtered: + n.referrents[:] = [(label, id_to_filtered_id[idx]) + for (label, idx) in n.referrents + if idx in id_to_filtered_id] + return filtered + +def escape(n): + return json.dumps(n) + + +def is_cuda_tensor(obj): + return isinstance(obj, torch.Tensor) and obj.is_cuda and not isinstance(obj, torch._subclasses.FakeTensor) + +def cuda_allocation_context(): + snapshot = torch.cuda.memory._snapshot() + addr_to_frame = {} + for seg in snapshot['segments']: + addr = seg['address'] + for blk in seg['blocks']: + if blk['state'] == 'active_allocated': + frames, real_size = _block_extra(blk) + addr_to_frame[addr] = frames + addr += blk['size'] + + def object_context(obj): + if is_cuda_tensor(obj): + addr = obj.untyped_storage().data_ptr() + frames = addr_to_frame.get(addr) + if frames is not None: + return '\n'.join(_frames_fmt(frames, full_filename=True)) + return None + return object_context + +def to_dot(nodes): + lines = ["digraph GraphName {", "node [shape=rect];", 'rankdir=LR;'] + for i, n in enumerate(nodes): + lines.append(f'{i} [label={escape(n.label)}, color={ "red" if n.root else "black"}];') + + for i, f in enumerate(nodes): + for label, j in f.referrents: + lines.append(f'{i} -> {j} [label = {escape(label)}]') + lines.append("}\n") + return '\n'.join(lines) + +_template = """ + + + + + + +
+
+
+
+
Mouse over tensor objects to see where they were allocated.
+
+
+ + + + +""" +_listener_template = """ +document.getElementById('node{id}').addEventListener('mouseover', function(event) {{ + document.getElementById("stacktrace").textContent = {stack} +}}) +""" +def to_html(nodes): + listeners = [] + for i, n in enumerate(nodes): + if n.context is None: + continue + s = _listener_template.format(id=str(i + 1), stack=escape(f'{n.label}:\n{n.context}')) + listeners.append(s) + dot = to_dot(nodes) + return _template.replace('$DOT', repr(dot)).replace('$LISTENERS', '\n'.join(listeners)) + +def observe_tensor_cycles(callback): + torch.cuda.memory._record_memory_history(max_entries=100000) + + def observer(garbage): + if garbage: + if not any(is_cuda_tensor(obj) for obj in garbage): + logger.info("No CUDA Tensors found in garbage") + return + callback(to_html(create_graph(garbage))) + return observe_garbage(observer) + + +def warn_tensor_cycles(): + """ + Install a warning that reports whenever a cycle that is holding CUDA memory is observed. + + The warning produces an .html file that visualizes the cycle, + and links it to the stack frame that allocted the CUDA tensor. + + Reference cycles are freed by the cycle collector rather than being cleaned up + when the objects in the cycle first become unreachable. If a cycle points to a tensor, + the CUDA memory for that tensor will not be freed until garbage collection runs. + Accumulation of CUDA allocations can lead to out of memory errors (OOMs), as well as + non-deterministic allocation behavior which is harder to debug. + """ + logger.info("Watching Python reference cycles for CUDA Tensors.") + + def write_and_log(html): + with NamedTemporaryFile('w', suffix='.html', delete=False) as f: + f.write(html) + logger.warning('Reference cycle includes a CUDA Tensor see visualization of cycle %s', f.name) + return observe_tensor_cycles(write_and_log) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/weak.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/weak.py new file mode 100644 index 0000000000000000000000000000000000000000..a5e33a34d7aac3489480b3383e6c76dbedf47c19 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/weak.py @@ -0,0 +1,321 @@ +from __future__ import annotations + +import weakref +from weakref import ref +from _weakrefset import _IterationGuard # type: ignore[attr-defined] +from collections.abc import MutableMapping, Mapping +from torch import Tensor +import collections.abc as _collections_abc + + +WeakRef = ref + + +__all__ = ['TensorWeakRef', 'WeakIdRef', 'WeakIdKeyDictionary', 'WeakTensorKeyDictionary'] + + +# This file defines a variant of WeakKeyDictionary that overrides the hashing +# behavior of the key to use object identity, rather than the builtin +# __eq__/__hash__ functions. This is useful for Tensor weak keys, as their +# __eq__ implementation return a Tensor (elementwise equality), which means +# you can't use them directly with the WeakKeyDictionary in standard library. +# +# Our implementation strategy is to create a wrapper weak key object, which we +# use as a key in a stock Python dictionary. This is similar to how weakref +# implements WeakKeyDictionary, but instead of using weakref.ref as the +# wrapper, we use a custom wrapper that has different __eq__ and __hash__ +# behavior. Note that we subsequently store this weak key directly in an +# ORDINARY dictionary, since the newly constructed WeakIdKey's only use would +# be a dictionary so it would have no strong references. Ensuring that +# only live WeakIdKeys are in the map is handled by putting finalizers on the +# original key object. + + +# It is simpler to implement this with composition, but if we want to +# directly reuse the callback mechanism on weakref, we need the weakref +# and the key to be exactly the same object. Reusing the callback mechanism +# minimizes the divergence between our implementation and Lib/weakref.py +# +# NB: Prefer using this when working with weakrefs of Tensors; e.g., do +# WeakIdRef(tensor) rather than weakref.ref(tensor); it handles a number of +# easy to get wrong cases transparently for you. +class WeakIdRef(weakref.ref): + __slots__ = ['_id'] + + def __init__(self, key, callback=None): + # Unlike stock weakref, which preserves hash semantics of the + # original object but lazily defers hash calls until the first + # time the user attempts to hash the weakref, we can eagerly + # cache the id of the key as we know this is definitely the hash + # method + self._id = id(key) + super().__init__(key, callback) # type: ignore[call-arg] + + def __call__(self): + r = super().__call__() + # Special logic for Tensor PyObject resurrection + if hasattr(r, '_fix_weakref'): + r._fix_weakref() # type: ignore[union-attr] + return r + + def __hash__(self): + return self._id + + def __eq__(self, other): + # An attractive but wrong alternate implementation is to only test if + # the stored _ids match. This can lead to an ABA problem if you have: + # + # a1 = A() + # w1 = WeakIdRef(a1) + # del a1 + # a2 = A() # suppose it gets the same ID as a1 + # w2 = WeakIdRef(a2) + # print(w1 == w2) + # + # This should be False, as a1 and a2 are unrelated (and a1 is + # dead anyway) + a = self() + b = other() + if a is not None and b is not None: + return a is b + return self is other + +# This is the same as WeakIdRef but equality is checked using hash() rather than id. +# This will be equivalent to the one above except for classes where hash is not their id. +class _WeakHashRef(weakref.ref): + __slots__ = ['_id'] + + def __init__(self, key, callback=None): + # Unlike stock weakref, which preserves hash semantics of the + # original object but lazily defers hash calls until the first + # time the user attempts to hash the weakref, we can eagerly + # cache the id of the key as we know this is definitely the hash + # method + self._id = hash(key) + super().__init__(key, callback) # type: ignore[call-arg] + + def __call__(self): + r = super().__call__() + # Special logic for Tensor PyObject resurrection + if hasattr(r, '_fix_weakref'): + r._fix_weakref() # type: ignore[union-attr] + return r + + def __hash__(self): + return self._id + + def __eq__(self, other): + # Use hash equality to determine ref equality. + # ScriptObject implements __hash__ to return the wrapped IValue's id, so + # this is equivalent to doing an identity comparison. + a = self() + b = other() + if a is not None and b is not None: + return hash(a) == hash(b) + return self is other + +# This is directly adapted from cpython/Lib/weakref.py +class WeakIdKeyDictionary(MutableMapping): + def __init__(self, dict=None, ref_type=WeakIdRef): # CHANGED + self.data = {} + + self.ref_type = ref_type # CHANGED + + def remove(k, selfref=ref(self)): + self = selfref() + if self is not None: + if self._iterating: + self._pending_removals.append(k) + else: + try: + del self.data[k] + except KeyError: + pass + self._remove = remove + # A list of dead weakrefs (keys to be removed) + self._pending_removals = [] + self._iterating = set() + self._dirty_len = False + if dict is not None: + self.update(dict) + + def _commit_removals(self): + # NOTE: We don't need to call this method before mutating the dict, + # because a dead weakref never compares equal to a live weakref, + # even if they happened to refer to equal objects. + # However, it means keys may already have been removed. + pop = self._pending_removals.pop + d = self.data + while True: + try: + key = pop() + except IndexError: + return + + try: + del d[key] + except KeyError: + pass + + def _scrub_removals(self): + d = self.data + self._pending_removals = [k for k in self._pending_removals if k in d] + self._dirty_len = False + + def __delitem__(self, key): + self._dirty_len = True + del self.data[self.ref_type(key)] # CHANGED + + def __getitem__(self, key): + return self.data[self.ref_type(key)] # CHANGED + + def __len__(self): + if self._dirty_len and self._pending_removals: + # self._pending_removals may still contain keys which were + # explicitly removed, we have to scrub them (see issue #21173). + self._scrub_removals() + return len(self.data) - len(self._pending_removals) + + def __repr__(self): + return f"<{self.__class__.__name__} at {id(self):#x}>" + + def __setitem__(self, key, value): + self.data[self.ref_type(key, self._remove)] = value # CHANGED + + def copy(self): + new = WeakIdKeyDictionary() + with _IterationGuard(self): + for key, value in self.data.items(): + o = key() + if o is not None: + new[o] = value + return new + + __copy__ = copy + + def __deepcopy__(self, memo): + from copy import deepcopy + new = self.__class__() + with _IterationGuard(self): + for key, value in self.data.items(): + o = key() + if o is not None: + new[o] = deepcopy(value, memo) + return new + + def get(self, key, default=None): + return self.data.get(self.ref_type(key), default) # CHANGED + + def __contains__(self, key): + try: + wr = self.ref_type(key) # CHANGED + except TypeError: + return False + return wr in self.data + + def items(self): + with _IterationGuard(self): + for wr, value in self.data.items(): + key = wr() + if key is not None: + yield key, value + + def keys(self): + with _IterationGuard(self): + for wr in self.data: + obj = wr() + if obj is not None: + yield obj + + __iter__ = keys + + def values(self): + with _IterationGuard(self): + for wr, value in self.data.items(): + if wr() is not None: + yield value + + def keyrefs(self): + """Return a list of weak references to the keys. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the keys around longer than needed. + + """ + return list(self.data) + + def popitem(self): + self._dirty_len = True + while True: + key, value = self.data.popitem() + o = key() + if o is not None: + return o, value + + def pop(self, key, *args): + self._dirty_len = True + return self.data.pop(self.ref_type(key), *args) # CHANGED + + def setdefault(self, key, default=None): + return self.data.setdefault(self.ref_type(key, self._remove), default) # CHANGED + + def update(self, dict=None, **kwargs): + d = self.data + if dict is not None: + if not hasattr(dict, "items"): + dict = type({})(dict) + for key, value in dict.items(): + d[self.ref_type(key, self._remove)] = value # CHANGED + if len(kwargs): + self.update(kwargs) + + def __ior__(self, other): + self.update(other) + return self + + def __or__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.copy() + c.update(other) + return c + return NotImplemented + + def __ror__(self, other): + if isinstance(other, _collections_abc.Mapping): + c = self.__class__() + c.update(other) + c.update(self) + return c + return NotImplemented + + # Default Mapping equality will tests keys for equality, but + # we want to test ids for equality + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return {id(k): v for k, v in self.items()} == {id(k): v for k, v in other.items()} + +# Convenience alias +WeakTensorKeyDictionary = WeakIdKeyDictionary + + +class TensorWeakRef: + """Wrapper around a weak ref of a Tensor that handles the _fix_weakref() call required when unwrapping a Tensor weakref.""" + + ref: WeakRef[Tensor] + + def __init__(self, tensor: Tensor): + assert isinstance(tensor, Tensor) + self.ref = weakref.ref(tensor) + + def __call__(self): + out = self.ref() + if out is None: + return out + assert isinstance(out, Tensor) + # TODO, add _fix_weakref type binding + out._fix_weakref() # type: ignore[attr-defined] + return out