diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/closure.py b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/closure.py new file mode 100644 index 0000000000000000000000000000000000000000..07f1055ee82783643bf5e57c8713d90aa1d15df6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/closure.py @@ -0,0 +1,134 @@ +import os +import threading +from queue import Empty as EmptyQueue, Queue + +from torch._lazy.device_context import get_device_context + + +class ClosureHandler: + def __init__(self): + pass + + def run(self, closure): + """Run closure function + + Args: + closure: callable function to run + """ + closure() + + def __call__(self, closures): + for closure in closures: + self.run(closure) + + +class AsyncClosureHandler(ClosureHandler): + """Handler for Asynchronous Step Closures + Args: + max_queue_size: The maximum length of the closure queue after which + the training loop will block until closures are evaluated. + By default, a reasonable limit of a maximum of 100 on the queue. + This value can be set using the `XLA_MAX_ASYNC_QUEUE` environment + variable. + """ + + def __init__(self, max_queue_size=100): + super().__init__() + self._closure_queue: Queue = Queue( + int(os.environ.get("LTC_MAX_ASYNC_QUEUE", max_queue_size)) + ) + self._closure_exception: Queue = Queue() + self._closure_lock = threading.Lock() + self._closure_event_loop_finished = threading.Event() + self._closure_event_loop = None + + def start_event_loop(self): + """Start closure event loop if not started""" + if self._closure_event_loop is None: + + def event_loop(): + # Run loop until closure event is set and closure queue is empty + while True: + try: + closure = self._closure_queue.get(block=True, timeout=3) + closure() + self._closure_queue.task_done() + except EmptyQueue: + with self._closure_lock: + if self._closure_queue.empty(): + self._closure_event_loop_finished.set() + return + except Exception as e: + self._closure_exception.put(e) + return + + self._closure_event_loop = threading.Thread(target=event_loop) + self._closure_event_loop.start() + + def run(self, closure): + with self._closure_lock: + self._closure_queue.put(closure, block=True) + if ( + self._closure_event_loop is None + or not self._closure_event_loop.is_alive() + ): + try: + e = self._closure_exception.get(block=False) + raise RuntimeError( + "Cannot run asynchronous closure due to previously raised exception" + ) from e + except EmptyQueue: + self._closure_event_loop = None + self.start_event_loop() + + +def add_step_closure(closure, args=(), run_async=False): + """Adds a closure to the list of the ones to be run at the end of the step. + Many times during model training there is the need to print/report (print to + console, post to tensorboard, etc...) information which require the content of + intermediary tensors to be inspected. + Inspecting different tensors content in different points of the model code + requires many executions and typically causes performance issues. + Adding a step closure will ensure that it will be run after the barrier, when + all the live tensors will be already materialized to device data. + Live tensors which will include the ones captured by the closure arguments. + So using `add_step_closure()` will ensure a single execution will be + performed, even when multiple closures are queued, requiring multiple tensors + to be inspected. + Step closures will be run sequentially in the order they have been queued. + Note that even though using this API the execution will be optimized, it is + advised to throttle the printing/reporting events once every N steps. + Args: + closure (callable): The function to be called. + args (tuple): The arguments to be passed to the closure. + run_async: If True, run the closure asynchronously. + """ + devctx = get_device_context() + closures_type = "async_step_closures" if run_async else "step_closures" + step_closures = getattr(devctx, closures_type, None) + if step_closures is None: + step_closures = [] + setattr(devctx, closures_type, step_closures) + step_closures.append(lambda a=args: closure(*a)) + + +def run_step_closures(): + devctx = get_device_context() + async_step_closures = getattr(devctx, "async_step_closures", None) + if async_step_closures is not None: + devctx.async_step_closures = [] + async_closure_handler = getattr(devctx, "async_closure_handler", None) + if async_closure_handler is None: + async_closure_handler = AsyncClosureHandler() + devctx.async_closure_handler = async_closure_handler + async_closure_handler(async_step_closures) + + step_closures = getattr(devctx, "step_closures", None) + if step_closures is not None: + devctx.step_closures = [] + closure_handler = getattr(devctx, "closure_handler", None) + if closure_handler is None: + closure_handler = ClosureHandler() + devctx.closure_handler = closure_handler + closure_handler(step_closures) + return devctx diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/device_context.py b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/device_context.py new file mode 100644 index 0000000000000000000000000000000000000000..840c7f8e50d039c9b72f31b16e8d69f706920534 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/device_context.py @@ -0,0 +1,25 @@ +import threading +from typing import Any, Dict + +import torch._C._lazy + + +class DeviceContext: + _CONTEXTS: Dict[str, Any] = dict() + _CONTEXTS_LOCK = threading.Lock() + + def __init__(self, device): + self.device = device + + +def get_device_context(device=None): + if device is None: + device = torch._C._lazy._get_default_device_type() + else: + device = str(device) + with DeviceContext._CONTEXTS_LOCK: + devctx = DeviceContext._CONTEXTS.get(device, None) + if devctx is None: + devctx = DeviceContext(device) + DeviceContext._CONTEXTS[device] = devctx + return devctx diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/ir_cache.py b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/ir_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..4270684d29434747f53177e48a58fd8dc9c7c44b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/ir_cache.py @@ -0,0 +1,13 @@ +import torch._C._lazy + + +def dump(dot_file_name: str): + """Dump TrieCache in the dot format""" + return torch._C._lazy._dump_ir_cache(dot_file_name) + + +def reset(): + """Clear TrieCache. This is needed in testing to avoid + node reusing between different tests. + """ + return torch._C._lazy._clear_ir_cache() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/metrics.py b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..2d7db730556779a353a1bb9f4b2529464d4bfc95 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/metrics.py @@ -0,0 +1,21 @@ +import torch._C._lazy + + +def reset(): + """Resets all metric counters.""" + torch._C._lazy._reset_metrics() + + +def counter_names(): + """Retrieves all the currently active counter names.""" + return torch._C._lazy._counter_names() + + +def counter_value(name: str): + """Return the value of the counter with the speficied name""" + return torch._C._lazy._counter_value(name) + + +def metrics_report(): + """Return the combined (lazy core and backend) metric report""" + return torch._C._lazy._metrics_report() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_lazy/ts_backend.py b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/ts_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..184223771932d80274e479a39c829300c9c872a7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_lazy/ts_backend.py @@ -0,0 +1,6 @@ +import torch._C._lazy_ts_backend + + +def init(): + """Initializes the lazy Torchscript backend""" + torch._C._lazy_ts_backend._init() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cpu/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cpu/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1356c1d8eb4fb123c0e7d5aa039790fdf93a8605 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cpu/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e4fe09f55632e42d0e8cd448305440b7fc2dc602 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__init__.py @@ -0,0 +1 @@ +from .autocast_mode import autocast diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..110ea1f0425b1e33dd8719d24012154db99fca62 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/autocast_mode.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/autocast_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f59488b819fe6a31dbfde5265d99eb6e33cebb1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/autocast_mode.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/autocast_mode.py b/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/autocast_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..a29a968917229d832d9a6789d9f73cb974bd011a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/autocast_mode.py @@ -0,0 +1,43 @@ +from typing import Any + +import torch + +__all__ = ["autocast"] + + +class autocast(torch.amp.autocast_mode.autocast): + r""" + See :class:`torch.autocast`. + ``torch.cpu.amp.autocast(args...)`` is equivalent to ``torch.autocast("cpu", args...)`` + """ + + def __init__( + self, + enabled: bool = True, + dtype: torch.dtype = torch.bfloat16, + cache_enabled: bool = True, + ): + if torch._jit_internal.is_scripting(): + self._enabled = enabled + self.device = "cpu" + self.fast_dtype = dtype + return + super().__init__( + "cpu", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled + ) + + def __enter__(self): + if torch._jit_internal.is_scripting(): + return self + return super().__enter__() + + # TODO: discuss a unified TorchScript-friendly API for autocast + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override] + if torch._jit_internal.is_scripting(): + return + return super().__exit__(exc_type, exc_val, exc_tb) + + def __call__(self, func): + if torch._jit_internal.is_scripting(): + return func + return super().__call__(func) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..79fe92f5f0ce9add148bd7d05a0a6ecd64199432 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/__init__.py @@ -0,0 +1,39 @@ +""" +:mod:`torch.optim` is a package implementing various optimization algorithms. + +Most commonly used methods are already supported, and the interface is general +enough, so that more sophisticated ones can also be easily integrated in the +future. +""" + +from .adadelta import Adadelta +from .adagrad import Adagrad +from .adam import Adam +from .adamw import AdamW +from .sparse_adam import SparseAdam +from .adamax import Adamax +from .asgd import ASGD +from .sgd import SGD +from .radam import RAdam +from .rprop import Rprop +from .rmsprop import RMSprop +from .optimizer import Optimizer +from .nadam import NAdam +from .lbfgs import LBFGS +from . import lr_scheduler +from . import swa_utils + +del adadelta +del adagrad +del adam +del adamw +del sparse_adam +del adamax +del asgd +del sgd +del radam +del rprop +del rmsprop +del optimizer +del nadam +del lbfgs diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__init__.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8d35bab14c207391d7a56573fba3a9861f37d242 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/__init__.pyi @@ -0,0 +1,15 @@ +from . import lr_scheduler as lr_scheduler, swa_utils as swa_utils +from .adadelta import Adadelta as Adadelta +from .adagrad import Adagrad as Adagrad +from .adam import Adam as Adam +from .adamax import Adamax as Adamax +from .adamw import AdamW as AdamW +from .asgd import ASGD as ASGD +from .lbfgs import LBFGS as LBFGS +from .nadam import NAdam as NAdam +from .optimizer import Optimizer as Optimizer +from .radam import RAdam as RAdam +from .rmsprop import RMSprop as RMSprop +from .rprop import Rprop as Rprop +from .sgd import SGD as SGD +from .sparse_adam import SparseAdam as SparseAdam diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2646cdb28afe101c74283eaa8200800db56c4149 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/_functional.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/_functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6bb1094bee74e6c2d02a18a11bbd0033fb97659 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/_functional.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adadelta.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adadelta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5d03952c1a886a086cd650d4c0b95c697c7a5a0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adadelta.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8afdbe923d9e576af1329c3694c1cf785cb8e2af Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adam.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8dcf6d966691bce80ef0bfef6374d9959b2a317 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adam.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed53d542ab3ece0cdc5dc0414e5867e173b95ffc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3f5f63002d5df2d47fed9ff8f73cd77ccbdc2b3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29750676216dec21d27f6931712b3881f813f103 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/lbfgs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/lbfgs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a228cdf58442adc460d8d900c935d9bac58fe1f5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/lbfgs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/lr_scheduler.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/lr_scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d57e05a077461ad483b95653f18b7f5fd75cc328 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/lr_scheduler.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/nadam.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/nadam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fca2aa0a4442218af665380e3e798fece94b148 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/nadam.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/optimizer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba43a0fe191dd47dfb275855fb2bfc994060e5a0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/optimizer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1b3462d70dbb788410b849ec5c5a0c1b845eec1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc381252046db72153f5268a8d14a67ea7c3d4bf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22dce55fd730b9d7d7a1dadb81ef60c3a8e8673e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e04487484bf1f275cc82ff247faafc193779ce56 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/sparse_adam.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/sparse_adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fb2d11f66e1e15cbf6b1fc33d49a71768fc3bfd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/sparse_adam.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/swa_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/swa_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..384fb15a61f8a79b5aa792d430099645ae9c471a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/swa_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/_functional.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/_functional.py new file mode 100644 index 0000000000000000000000000000000000000000..7ea361d8efba10c59e3ecf3b7353035ccb995f4e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/_functional.py @@ -0,0 +1,79 @@ +r"""Functional interface.""" +import math +from torch import Tensor +from typing import List + +from .adadelta import adadelta # type: ignore[attr-defined] # noqa: F401 +from .adagrad import adagrad, _make_sparse # type: ignore[attr-defined] # noqa: F401 +from .adam import adam # type: ignore[attr-defined] # noqa: F401 +from .adamw import adamw # type: ignore[attr-defined] # noqa: F401 +from .adamax import adamax # type: ignore[attr-defined] # noqa: F401 +from .asgd import asgd # type: ignore[attr-defined] # noqa: F401 +from .nadam import nadam # type: ignore[attr-defined] # noqa: F401 +from .radam import radam # type: ignore[attr-defined] # noqa: F401 +from .rmsprop import rmsprop # type: ignore[attr-defined] # noqa: F401 +from .rprop import rprop # type: ignore[attr-defined] # noqa: F401 +from .sgd import sgd # type: ignore[attr-defined] # noqa: F401 + + +# TODO: use foreach API in optim._functional to do all the computation + + +def sparse_adam(params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + state_steps: List[int], + *, + eps: float, + beta1: float, + beta2: float, + lr: float, + maximize: bool): + r"""Functional API that performs Sparse Adam algorithm computation. + + See :class:`~torch.optim.SparseAdam` for details. + """ + for i, param in enumerate(params): + grad = grads[i] + grad = grad if not maximize else -grad + grad = grad.coalesce() # the update is non-linear so indices must be unique + grad_indices = grad._indices() + grad_values = grad._values() + if grad_values.numel() == 0: + # Skip update for empty grad + continue + size = grad.size() + + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + step = state_steps[i] + + + def make_sparse(values): + constructor = grad.new + if grad_indices.dim() == 0 or values.dim() == 0: + return constructor().resize_as_(grad) + return constructor(grad_indices, values, size) + + # Decay the first and second moment running average coefficient + # old <- b * old + (1 - b) * new + # <==> old += (1 - b) * (new - old) + old_exp_avg_values = exp_avg.sparse_mask(grad)._values() + exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1) + exp_avg.add_(make_sparse(exp_avg_update_values)) + old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values() + exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2) + exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values)) + + # Dense addition again is intended, avoiding another sparse_mask + numer = exp_avg_update_values.add_(old_exp_avg_values) + exp_avg_sq_update_values.add_(old_exp_avg_sq_values) + denom = exp_avg_sq_update_values.sqrt_().add_(eps) + del exp_avg_update_values, exp_avg_sq_update_values + + bias_correction1 = 1 - beta1 ** step + bias_correction2 = 1 - beta2 ** step + step_size = lr * math.sqrt(bias_correction2) / bias_correction1 + + param.add_(make_sparse(-step_size * numer.div_(denom))) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/_multi_tensor/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/_multi_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..32ea419566044ef8cdddcd9f1ccb370b4ece2e6a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/_multi_tensor/__init__.py @@ -0,0 +1,28 @@ +""" +:mod:`torch.optim._multi_tensor` is a package implementing various optimization algorithms. +Most commonly used methods are already supported, and the interface is general +enough, so that more sophisticated ones can be also easily integrated in the +future. +""" +from functools import partialmethod +from torch import optim + +def partialclass(cls, *args, **kwargs): + + class NewCls(cls): + __init__ = partialmethod(cls.__init__, *args, **kwargs) + + return NewCls + + +Adam = partialclass(optim.Adam, foreach=True) +AdamW = partialclass(optim.AdamW, foreach=True) +NAdam = partialclass(optim.NAdam, foreach=True) +SGD = partialclass(optim.SGD, foreach=True) +RAdam = partialclass(optim.RAdam, foreach=True) +RMSprop = partialclass(optim.RMSprop, foreach=True) +Rprop = partialclass(optim.Rprop, foreach=True) +ASGD = partialclass(optim.ASGD, foreach=True) +Adamax = partialclass(optim.Adamax, foreach=True) +Adadelta = partialclass(optim.Adadelta, foreach=True) +Adagrad = partialclass(optim.Adagrad, foreach=True) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/_multi_tensor/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/optim/_multi_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d563b2a7b3f4ac933aa3cb2465c7956d49b269b4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/optim/_multi_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/adadelta.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/adadelta.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0f475331c168677de6ff760350e797565957eb51 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/adadelta.pyi @@ -0,0 +1,11 @@ +from .optimizer import Optimizer, ParamsT + +class Adadelta(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + rho: float = ..., + eps: float = ..., + weight_decay: float = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/adagrad.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..2634333863f9962139a066e0eb44fcc04ac2a45f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/adagrad.py @@ -0,0 +1,383 @@ +import torch +from torch import Tensor + +from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _view_as_real, + _default_to_fused_or_foreach, _differentiable_doc, _foreach_doc, _maximize_doc) +from typing import List, Optional + +__all__ = ["Adagrad", "adagrad"] + + +class Adagrad(Optimizer): + def __init__( + self, + params, + lr=1e-2, + lr_decay=0, + weight_decay=0, + initial_accumulator_value=0, + eps=1e-10, + foreach: Optional[bool] = None, + *, + maximize: bool = False, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= lr_decay: + raise ValueError(f"Invalid lr_decay value: {lr_decay}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if not 0.0 <= initial_accumulator_value: + raise ValueError( + f"Invalid initial_accumulator_value value: {initial_accumulator_value}" + ) + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + + defaults = dict( + lr=lr, + lr_decay=lr_decay, + eps=eps, + weight_decay=weight_decay, + initial_accumulator_value=initial_accumulator_value, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + ) + super().__init__(params, defaults) + + for group in self.param_groups: + for p in group["params"]: + state = self.state[p] + state["step"] = torch.tensor(0.0, dtype=torch.float32) + init_value = ( + complex(initial_accumulator_value, initial_accumulator_value) + if torch.is_complex(p) + else initial_accumulator_value + ) + state["sum"] = torch.full_like( + p, init_value, memory_format=torch.preserve_format + ) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + + state_values = list(self.state.values()) + step_is_tensor = (len(state_values) != 0) and torch.is_tensor( + state_values[0]["step"] + ) + if not step_is_tensor: + for s in state_values: + s["step"] = torch.tensor(float(s["step"]), dtype=torch.float32) + + def share_memory(self): + for group in self.param_groups: + for p in group["params"]: + state = self.state[p] + state["sum"].share_memory_() + + def _init_group(self, group, params_with_grad, grads, state_sums, state_steps): + has_sparse_grad, has_complex = False, False + for p in group["params"]: + if p.grad is not None: + has_sparse_grad |= p.grad.is_sparse + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + grads.append(p.grad) + state = self.state[p] + state_sums.append(state["sum"]) + state_steps.append(state["step"]) + + return has_sparse_grad, has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + state_sums = [] + state_steps = [] + + has_sparse_grad, has_complex = self._init_group(group, params_with_grad, grads, state_sums, state_steps) + + adagrad( + params_with_grad, + grads, + state_sums, + state_steps, + lr=group["lr"], + weight_decay=group["weight_decay"], + lr_decay=group["lr_decay"], + eps=group["eps"], + has_sparse_grad=has_sparse_grad, + foreach=group["foreach"], + maximize=group["maximize"], + differentiable=group["differentiable"], + has_complex=has_complex, + ) + + return loss + + +Adagrad.__doc__ = r"""Implements Adagrad algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) + \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\ + &\hspace{12mm} \tau \text{ (initial accumulator value)}, \: \eta\text{ (lr decay)}\\ + &\textbf{initialize} : state\_sum_0 \leftarrow 0 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \tilde{\gamma} \leftarrow \gamma / (1 +(t-1) \eta) \\ + &\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}state\_sum_t \leftarrow state\_sum_{t-1} + g^2_t \\ + &\hspace{5mm}\theta_t \leftarrow + \theta_{t-1}- \tilde{\gamma} \frac{g_t}{\sqrt{state\_sum_t}+\epsilon} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adaptive Subgradient Methods for Online Learning + and Stochastic Optimization`_. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + lr_decay (float, optional): learning rate decay (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-10) + {_foreach_doc} + {_maximize_doc} + {_differentiable_doc} + + .. _Adaptive Subgradient Methods for Online Learning and Stochastic + Optimization: http://jmlr.org/papers/v12/duchi11a.html + + """ + + +def adagrad( + params: List[Tensor], + grads: List[Tensor], + state_sums: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting these as kwargs for now as functional API is compiled by torch/distributed/optim + has_sparse_grad: bool = None, + foreach: Optional[bool] = None, + differentiable: bool = False, + has_complex: bool = False, + *, + lr: float, + weight_decay: float, + lr_decay: float, + eps: float, + maximize: bool, +): + r"""Functional API that performs Adagrad algorithm computation. + + See :class:`~torch.optim.Adagrad` for details. + """ + if not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + if foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adagrad + else: + func = _single_tensor_adagrad + + func( + params, + grads, + state_sums, + state_steps, + lr=lr, + weight_decay=weight_decay, + lr_decay=lr_decay, + eps=eps, + has_sparse_grad=has_sparse_grad, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + ) + + +def _make_sparse(grad, grad_indices, values): + size = grad.size() + if grad_indices.numel() == 0 or values.numel() == 0: + return torch.empty_like(grad) + return torch.sparse_coo_tensor(grad_indices, values, size) + + +def _single_tensor_adagrad( + params: List[Tensor], + grads: List[Tensor], + state_sums: List[Tensor], + state_steps: List[Tensor], + *, + lr: float, + weight_decay: float, + lr_decay: float, + eps: float, + has_sparse_grad: bool, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + for (param, grad, state_sum, step_t) in zip(params, grads, state_sums, state_steps): + # update step + step_t += 1 + step = _get_value(step_t) + grad = grad if not maximize else -grad + + if weight_decay != 0: + if grad.is_sparse: + raise RuntimeError( + "weight_decay option is not compatible with sparse gradients" + ) + grad = grad.add(param, alpha=weight_decay) + + clr = lr / (1 + (step - 1) * lr_decay) + + if grad.is_sparse: + grad = grad.coalesce() # the update is non-linear so indices must be unique + grad_indices = grad._indices() + grad_values = grad._values() + + state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2))) + std = state_sum.sparse_mask(grad) + std_values = std._values().sqrt_().add_(eps) + param.add_( + _make_sparse(grad, grad_indices, grad_values / std_values), alpha=-clr + ) + else: + is_complex = torch.is_complex(param) + if is_complex: + grad = torch.view_as_real(grad) + state_sum = torch.view_as_real(state_sum) + param = torch.view_as_real(param) + state_sum.addcmul_(grad, grad, value=1) + if differentiable: + std = state_sum.sqrt() + eps + else: + std = state_sum.sqrt().add_(eps) + param.addcdiv_(grad, std, value=-clr) + if is_complex: + param = torch.view_as_complex(param) + state_sum = torch.view_as_complex(state_sum) + + +def _multi_tensor_adagrad( + params: List[Tensor], + grads: List[Tensor], + state_sums: List[Tensor], + state_steps: List[Tensor], + *, + lr: float, + weight_decay: float, + lr_decay: float, + eps: float, + has_sparse_grad: bool, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + assert not differentiable, "_foreach ops don't support autograd" + + # Foreach functions will throw errors if given empty lists + if len(params) == 0: + return + + grouped_tensorlists = Optimizer._group_tensors_by_device_and_dtype([params, grads, state_sums, state_steps]) + for ((device_params, device_grads, device_state_sums, device_state_steps), _) in grouped_tensorlists.values(): + device_has_sparse_grad = has_sparse_grad and any(grad.is_sparse for grad in device_grads) + + if device_has_sparse_grad: + _single_tensor_adagrad( + device_params, + device_grads, + device_state_sums, + device_state_steps, + lr=lr, + weight_decay=weight_decay, + lr_decay=lr_decay, + eps=eps, + has_sparse_grad=True, + maximize=False, + differentiable=differentiable, + has_complex=has_complex, + ) + continue + + if maximize: + device_grads = torch._foreach_neg(device_grads) + + # Handle complex parameters + if has_complex: + _view_as_real(device_params, device_grads, device_state_sums) + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if device_state_steps[0].is_cpu: + torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0) + else: + torch._foreach_add_(device_state_steps, 1) + + if weight_decay != 0: + # Re-use the intermediate memory (device_grads) already allocated for maximize + if maximize: + torch._foreach_add_(device_grads, device_params, alpha=weight_decay) + else: + device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay) + + minus_clr = [-lr / (1 + (_get_value(step) - 1) * lr_decay) for step in device_state_steps] + + torch._foreach_addcmul_(device_state_sums, device_grads, device_grads, value=1) + + std = torch._foreach_sqrt(device_state_sums) + torch._foreach_add_(std, eps) + + if weight_decay != 0 or maximize: + # Again, re-use the intermediate memory (device_grads) already allocated + torch._foreach_mul_(device_grads, minus_clr) + numerator = device_grads + else: + numerator = torch._foreach_mul(device_grads, minus_clr) + + torch._foreach_addcdiv_(device_params, numerator, std) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/adagrad.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/adagrad.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4557ece1417f9bf3d3c56497355e9147cc4dedbd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/adagrad.pyi @@ -0,0 +1,12 @@ +from .optimizer import Optimizer, ParamsT + +class Adagrad(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + lr_decay: float = ..., + weight_decay: float = ..., + initial_accumulator_value: float = ..., + eps: float = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/adam.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/adam.py new file mode 100644 index 0000000000000000000000000000000000000000..fade018c883472dd9d9156466c083b0817d1a5e1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/adam.py @@ -0,0 +1,658 @@ +from typing import List, Optional, Union, Tuple + +import torch +from torch import Tensor +from .optimizer import (Optimizer, ParamsT, _use_grad_for_differentiable, _get_value, + _stack_if_compiling, _dispatch_sqrt, _default_to_fused_or_foreach, + _capturable_doc, _differentiable_doc, _foreach_doc, _fused_doc, + _maximize_doc, _view_as_real) +from torch.utils._foreach_utils import _get_fused_kernels_supported_devices + +__all__ = ['Adam', 'adam'] + + +class Adam(Optimizer): + def __init__(self, + params: ParamsT, + lr: Union[float, Tensor] = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0, + amsgrad: bool = False, + *, + foreach: Optional[bool] = None, + maximize: bool = False, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if isinstance(lr, Tensor) and foreach and not capturable: + raise ValueError("lr as a Tensor is not supported for capturable=False and foreach=True") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad, + maximize=maximize, foreach=foreach, capturable=capturable, + differentiable=differentiable, fused=fused) + super().__init__(params, defaults) + + if fused: + if differentiable: + raise RuntimeError("`fused` does not support `differentiable`") + self._step_supports_amp_scaling = True + # TODO(crcrpar): [low prec params & their higher prec copy] + # Support AMP with FP16/BF16 model params which would need + # higher prec copy of params to do update math in higher prec to + # alleviate the loss of information. + fused_supported_devices = _get_fused_kernels_supported_devices() + if not all( + p.device.type in fused_supported_devices and + torch.is_floating_point(p) for pg in self.param_groups for p in pg['params'] + ): + raise RuntimeError("`fused=True` requires all the params to be floating point Tensors of " + f"supported devices: {fused_supported_devices}.") + if foreach: + raise RuntimeError("`fused` and `foreach` cannot be `True` together.") + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + group.setdefault('maximize', False) + group.setdefault('foreach', None) + group.setdefault('capturable', False) + group.setdefault('differentiable', False) + group.setdefault('fused', None) + state_values = list(self.state.values()) + step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step']) + if not step_is_tensor: + for s in state_values: + s['step'] = torch.tensor(float(s['step']), dtype=torch.float32) + + def _init_group( + self, + group, + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps + ): + has_complex = False + for p in group['params']: + if p.grad is not None: + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + grads.append(p.grad) + + state = self.state[p] + # Lazy state initialization + if len(state) == 0: + # note(crcrpar): [special device hosting for step] + # Deliberately host `step` on CPU if both capturable and fused are off. + # This is because kernel launches are costly on CUDA and XLA. + state['step'] = ( + torch.zeros((), dtype=torch.float32, device=p.device) + if group['capturable'] or group['fused'] + else torch.tensor(0.0, dtype=torch.float32) + ) + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + if group['amsgrad']: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + + if group['amsgrad']: + max_exp_avg_sqs.append(state['max_exp_avg_sq']) + if group['differentiable'] and state['step'].requires_grad: + raise RuntimeError('`requires_grad` is not supported for `step` in differentiable mode') + + # Foreach without capturable does not support a tensor lr + if group['foreach'] and torch.is_tensor(group['lr']) and not group['capturable']: + raise RuntimeError('lr as a Tensor is not supported for capturable=False and foreach=True') + + state_steps.append(state['step']) + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + max_exp_avg_sqs = [] + state_steps = [] + beta1, beta2 = group['betas'] + + has_complex = self._init_group( + group, + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps) + + adam( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=group['amsgrad'], + has_complex=has_complex, + beta1=beta1, + beta2=beta2, + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + maximize=group['maximize'], + foreach=group['foreach'], + capturable=group['capturable'], + differentiable=group['differentiable'], + fused=group['fused'], + grad_scale=getattr(self, "grad_scale", None), + found_inf=getattr(self, "found_inf", None), + ) + + return loss + + +Adam.__doc__ = r"""Implements Adam algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 + \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)} \\ + &\hspace{13mm} \lambda \text{ (weight decay)}, \: \textit{amsgrad}, + \:\textit{maximize} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + v_0\leftarrow 0 \text{ (second moment)},\: \widehat{v_0}^{max}\leftarrow 0\\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + + &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ + &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\textbf{if} \: amsgrad \\ + &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max}, + \widehat{v_t}) \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR + is not yet supported for all our implementations. Please use a float + LR if you are not also specifying fused=True or capturable=True. + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (bool, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + {_foreach_doc} + {_maximize_doc} + {_capturable_doc} + {_differentiable_doc} + {_fused_doc} + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + + """ + + +def adam(params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + grad_scale: Optional[Tensor] = None, + found_inf: Optional[Tensor] = None, + has_complex: bool = False, + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool): + r"""Functional API that performs Adam algorithm computation. + + See :class:`~torch.optim.Adam` for details. + """ + # Respect when the user inputs False/True for foreach or fused. We only want to change + # the default when neither have been user-specified. Note that we default to foreach + # and pass False to use_fused. This is not a mistake--we want to give the fused impl + # bake-in time before making it the default, even if it is typically faster. + if fused is None and foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + # Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False. + if foreach and isinstance(lr, Tensor) and not capturable: + foreach = False + if fused is None: + fused = False + if foreach is None: + foreach = False + + # this check is slow during compilation, so we skip it + # if it's strictly needed we can add this check back in dynamo + if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors") + + if foreach and torch.jit.is_scripting(): + raise RuntimeError('torch.jit.script not supported with foreach optimizers') + if fused and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with fused optimizers") + + if fused and not torch.jit.is_scripting(): + func = _fused_adam + elif foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adam + else: + func = _single_tensor_adam + + func(params, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + has_complex=has_complex, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + capturable=capturable, + differentiable=differentiable, + grad_scale=grad_scale, + found_inf=found_inf) + + +def _single_tensor_adam(params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + has_complex: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool): + + assert grad_scale is None and found_inf is None + + if torch.jit.is_scripting(): + # this assert is due to JIT being dumb and not realizing that the ops below + # have overloads to handle both float and Tensor lrs, so we just assert it's + # a float since most people using JIT are using floats + assert isinstance(lr, float) + + for i, param in enumerate(params): + grad = grads[i] if not maximize else -grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + step_t = state_steps[i] + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + assert ( + (param.is_cuda and step_t.is_cuda) or (param.is_xla and step_t.is_xla) + ), "If capturable=True, params and state_steps must be CUDA or XLA tensors." + + # update step + step_t += 1 + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + if torch.is_complex(param): + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_avg_sq = torch.view_as_real(exp_avg_sq) + if amsgrad: + max_exp_avg_sqs[i] = torch.view_as_real(max_exp_avg_sqs[i]) + param = torch.view_as_real(param) + + # Decay the first and second moment running average coefficient + exp_avg.lerp_(grad, 1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2) + + if capturable or differentiable: + step = step_t + + bias_correction1 = 1 - beta1 ** step + bias_correction2 = 1 - beta2 ** step + + step_size = lr / bias_correction1 + step_size_neg = step_size.neg() + + bias_correction2_sqrt = bias_correction2.sqrt() + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + if differentiable: + max_exp_avg_sq = max_exp_avg_sqs[i].clone() + else: + max_exp_avg_sq = max_exp_avg_sqs[i] + + max_exp_avg_sqs[i].copy_(torch.maximum(max_exp_avg_sq, exp_avg_sq)) + + # Uses the max. for normalizing running avg. of gradient + # Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write + # (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor) + denom = (max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) + else: + denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) + + param.addcdiv_(exp_avg, denom) + else: + step = _get_value(step_t) + + bias_correction1 = 1 - beta1 ** step + bias_correction2 = 1 - beta2 ** step + + step_size = lr / bias_correction1 + + bias_correction2_sqrt = _dispatch_sqrt(bias_correction2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i]) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps) + else: + denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) + + param.addcdiv_(exp_avg, denom, value=-step_size) + + # Lastly, switch back to complex view + if amsgrad and torch.is_complex(params[i]): + max_exp_avg_sqs[i] = torch.view_as_complex(max_exp_avg_sqs[i]) + + +def _multi_tensor_adam(params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + has_complex: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool): + if len(params) == 0: + return + + if isinstance(lr, Tensor) and not capturable: + raise RuntimeError("lr as a Tensor is not supported for capturable=False and foreach=True") + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + assert all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)), \ + "If capturable=True, params and state_steps must be CUDA tensors." + + assert grad_scale is None and found_inf is None + + assert not differentiable, "_foreach ops don't support autograd" + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]) + for (( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + ), _) in grouped_tensors.values(): + + if maximize: + device_grads = torch._foreach_neg(device_grads) + + # Handle complex parameters + if has_complex: + if amsgrad: + _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs, device_max_exp_avg_sqs) + else: + _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs) + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if device_state_steps[0].is_cpu: + torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0) + else: + torch._foreach_add_(device_state_steps, 1) + + if weight_decay != 0: + # Re-use the intermediate memory (device_grads) already allocated for maximize + if maximize: + torch._foreach_add_(device_grads, device_params, alpha=weight_decay) + else: + device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay) + + # Decay the first and second moment running average coefficient + torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1) + + torch._foreach_mul_(device_exp_avg_sqs, beta2) + torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads, 1 - beta2) + + # Delete the local intermediate since it won't be used anymore to save on peak memory + del device_grads + + if capturable: + bias_correction1 = torch._foreach_pow(beta1, device_state_steps) + bias_correction2 = torch._foreach_pow(beta2, device_state_steps) + # foreach_sub doesn't allow a scalar as the first arg + torch._foreach_sub_(bias_correction1, 1) + torch._foreach_sub_(bias_correction2, 1) + # we do not negate bias_correction1 as it'll need to be negated later anyway + torch._foreach_neg_(bias_correction2) + + # foreach_div doesn't allow a scalar as the first arg + torch._foreach_div_(bias_correction1, lr) + torch._foreach_reciprocal_(bias_correction1) + + torch._foreach_sqrt_(bias_correction2) + + # Re-assign for clarity as we maintain minimal intermediates: we'll have + # step_size = - lr / (1 - beta1 ^ t) where t = num_steps + # bias_correction2_sqrt = sqrt(1 - beta2 ^ t) + step_size = bias_correction1 + bias_correction2_sqrt = bias_correction2 + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) # type: ignore[assignment] + + # Set intermediate to the max. for normalizing running avg. of gradient when amsgrad + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_div_(exp_avg_sq_sqrt, step_size) + + # at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr + torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt) + else: + bias_correction1 = [1 - beta1 ** _get_value(step) for step in device_state_steps] + bias_correction2 = [1 - beta2 ** _get_value(step) for step in device_state_steps] + + step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1]) + + bias_correction2_sqrt = [_dispatch_sqrt(bc) for bc in bias_correction2] + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) + + # Use the max. for normalizing running avg. of gradient + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size) + + +def _fused_adam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + has_complex: bool, # Needed for consistency. + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, # Needed for consistency. + differentiable: bool, +) -> None: + if not params: + return + if differentiable: + raise RuntimeError("Adam with fused=True does not support differentiable=True") + + grad_scale_dict = {grad_scale.device: grad_scale} if grad_scale is not None else None + found_inf_dict = {found_inf.device: found_inf} if found_inf is not None else None + + # We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer + # treating it as a scalar. + lr_dict = {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]) + for (device, _), ((device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps,), _) in grouped_tensors.items(): + device_grad_scale, device_found_inf = None, None + if grad_scale is not None: + if device not in grad_scale_dict: + grad_scale_dict[device] = grad_scale.to(device, non_blocking=True) + device_grad_scale = grad_scale_dict[device] + if found_inf is not None: + if found_inf not in found_inf_dict: + found_inf_dict[device] = found_inf.to(device, non_blocking=True) + device_found_inf = found_inf_dict[device] + if lr_dict is not None and device not in lr_dict: + lr_dict[device] = lr.to(device=device, non_blocking=True) + lr = lr_dict[device] + torch._foreach_add_(device_state_steps, 1) + torch._fused_adam_( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + amsgrad=amsgrad, + lr=lr, + beta1=beta1, + beta2=beta2, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + grad_scale=device_grad_scale, + found_inf=device_found_inf, + ) + if device_found_inf is not None: + torch._foreach_sub_(device_state_steps, [device_found_inf] * len(device_state_steps)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/adam.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/adam.pyi new file mode 100644 index 0000000000000000000000000000000000000000..aef8ed69a9c99497d10c9eb4b0be659602e18d56 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/adam.pyi @@ -0,0 +1,22 @@ +from typing import Optional, Tuple, Union + +from torch import Tensor + +from .optimizer import Optimizer, ParamsT + +class Adam(Optimizer): + def __init__( + self, + params: ParamsT, + lr: Union[float, Tensor] = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0, + amsgrad: bool = False, + *, + foreach: Optional[bool] = None, + maximize: bool = False, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/adamw.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/adamw.pyi new file mode 100644 index 0000000000000000000000000000000000000000..17c35ebec8a6a5920b49a9e6793928d8a789994e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/adamw.pyi @@ -0,0 +1,22 @@ +from typing import Optional, Tuple, Union + +from torch import Tensor + +from .optimizer import Optimizer, ParamsT + +class AdamW(Optimizer): + def __init__( + self, + params: ParamsT, + lr: Union[float, Tensor] = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 1e-2, + amsgrad: bool = False, + *, + maximize: bool = False, + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/lbfgs.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/lbfgs.py new file mode 100644 index 0000000000000000000000000000000000000000..704df121b7a517f8ba90f29febe85a7d8bc7ef0a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/lbfgs.py @@ -0,0 +1,478 @@ +import torch +from functools import reduce +from .optimizer import Optimizer + +__all__ = ['LBFGS'] + +def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None): + # ported from https://github.com/torch/optim/blob/master/polyinterp.lua + # Compute bounds of interpolation area + if bounds is not None: + xmin_bound, xmax_bound = bounds + else: + xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1) + + # Code for most common case: cubic interpolation of 2 points + # w/ function and derivative values for both + # Solution in this case (where x2 is the farthest point): + # d1 = g1 + g2 - 3*(f1-f2)/(x1-x2); + # d2 = sqrt(d1^2 - g1*g2); + # min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2)); + # t_new = min(max(min_pos,xmin_bound),xmax_bound); + d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2) + d2_square = d1**2 - g1 * g2 + if d2_square >= 0: + d2 = d2_square.sqrt() + if x1 <= x2: + min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2)) + else: + min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2)) + return min(max(min_pos, xmin_bound), xmax_bound) + else: + return (xmin_bound + xmax_bound) / 2. + + +def _strong_wolfe(obj_func, + x, + t, + d, + f, + g, + gtd, + c1=1e-4, + c2=0.9, + tolerance_change=1e-9, + max_ls=25): + # ported from https://github.com/torch/optim/blob/master/lswolfe.lua + d_norm = d.abs().max() + g = g.clone(memory_format=torch.contiguous_format) + # evaluate objective and gradient using initial step + f_new, g_new = obj_func(x, t, d) + ls_func_evals = 1 + gtd_new = g_new.dot(d) + + # bracket an interval containing a point satisfying the Wolfe criteria + t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd + done = False + ls_iter = 0 + while ls_iter < max_ls: + # check conditions + if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev): + bracket = [t_prev, t] + bracket_f = [f_prev, f_new] + bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] + bracket_gtd = [gtd_prev, gtd_new] + break + + if abs(gtd_new) <= -c2 * gtd: + bracket = [t] + bracket_f = [f_new] + bracket_g = [g_new] + done = True + break + + if gtd_new >= 0: + bracket = [t_prev, t] + bracket_f = [f_prev, f_new] + bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] + bracket_gtd = [gtd_prev, gtd_new] + break + + # interpolate + min_step = t + 0.01 * (t - t_prev) + max_step = t * 10 + tmp = t + t = _cubic_interpolate( + t_prev, + f_prev, + gtd_prev, + t, + f_new, + gtd_new, + bounds=(min_step, max_step)) + + # next step + t_prev = tmp + f_prev = f_new + g_prev = g_new.clone(memory_format=torch.contiguous_format) + gtd_prev = gtd_new + f_new, g_new = obj_func(x, t, d) + ls_func_evals += 1 + gtd_new = g_new.dot(d) + ls_iter += 1 + + # reached max number of iterations? + if ls_iter == max_ls: + bracket = [0, t] + bracket_f = [f, f_new] + bracket_g = [g, g_new] + + # zoom phase: we now have a point satisfying the criteria, or + # a bracket around it. We refine the bracket until we find the + # exact point satisfying the criteria + insuf_progress = False + # find high and low points in bracket + low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0) + while not done and ls_iter < max_ls: + # line-search bracket is so small + if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change: + break + + # compute new trial value + t = _cubic_interpolate(bracket[0], bracket_f[0], bracket_gtd[0], + bracket[1], bracket_f[1], bracket_gtd[1]) + + # test that we are making sufficient progress: + # in case `t` is so close to boundary, we mark that we are making + # insufficient progress, and if + # + we have made insufficient progress in the last step, or + # + `t` is at one of the boundary, + # we will move `t` to a position which is `0.1 * len(bracket)` + # away from the nearest boundary point. + eps = 0.1 * (max(bracket) - min(bracket)) + if min(max(bracket) - t, t - min(bracket)) < eps: + # interpolation close to boundary + if insuf_progress or t >= max(bracket) or t <= min(bracket): + # evaluate at 0.1 away from boundary + if abs(t - max(bracket)) < abs(t - min(bracket)): + t = max(bracket) - eps + else: + t = min(bracket) + eps + insuf_progress = False + else: + insuf_progress = True + else: + insuf_progress = False + + # Evaluate new point + f_new, g_new = obj_func(x, t, d) + ls_func_evals += 1 + gtd_new = g_new.dot(d) + ls_iter += 1 + + if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]: + # Armijo condition not satisfied or not lower than lowest point + bracket[high_pos] = t + bracket_f[high_pos] = f_new + bracket_g[high_pos] = g_new.clone(memory_format=torch.contiguous_format) + bracket_gtd[high_pos] = gtd_new + low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0) + else: + if abs(gtd_new) <= -c2 * gtd: + # Wolfe conditions satisfied + done = True + elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0: + # old high becomes new low + bracket[high_pos] = bracket[low_pos] + bracket_f[high_pos] = bracket_f[low_pos] + bracket_g[high_pos] = bracket_g[low_pos] + bracket_gtd[high_pos] = bracket_gtd[low_pos] + + # new point becomes new low + bracket[low_pos] = t + bracket_f[low_pos] = f_new + bracket_g[low_pos] = g_new.clone(memory_format=torch.contiguous_format) + bracket_gtd[low_pos] = gtd_new + + # return stuff + t = bracket[low_pos] + f_new = bracket_f[low_pos] + g_new = bracket_g[low_pos] + return f_new, g_new, t, ls_func_evals + + +class LBFGS(Optimizer): + """Implements L-BFGS algorithm. + + Heavily inspired by `minFunc + `_. + + .. warning:: + This optimizer doesn't support per-parameter options and parameter + groups (there can be only one). + + .. warning:: + Right now all parameters have to be on a single device. This will be + improved in the future. + + .. note:: + This is a very memory intensive optimizer (it requires additional + ``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory + try reducing the history size, or use a different algorithm. + + Args: + lr (float): learning rate (default: 1) + max_iter (int): maximal number of iterations per optimization step + (default: 20) + max_eval (int): maximal number of function evaluations per optimization + step (default: max_iter * 1.25). + tolerance_grad (float): termination tolerance on first order optimality + (default: 1e-7). + tolerance_change (float): termination tolerance on function + value/parameter changes (default: 1e-9). + history_size (int): update history size (default: 100). + line_search_fn (str): either 'strong_wolfe' or None (default: None). + """ + + def __init__(self, + params, + lr=1, + max_iter=20, + max_eval=None, + tolerance_grad=1e-7, + tolerance_change=1e-9, + history_size=100, + line_search_fn=None): + if max_eval is None: + max_eval = max_iter * 5 // 4 + defaults = dict( + lr=lr, + max_iter=max_iter, + max_eval=max_eval, + tolerance_grad=tolerance_grad, + tolerance_change=tolerance_change, + history_size=history_size, + line_search_fn=line_search_fn) + super().__init__(params, defaults) + + if len(self.param_groups) != 1: + raise ValueError("LBFGS doesn't support per-parameter options " + "(parameter groups)") + + self._params = self.param_groups[0]['params'] + self._numel_cache = None + + def _numel(self): + if self._numel_cache is None: + self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0) + return self._numel_cache + + def _gather_flat_grad(self): + views = [] + for p in self._params: + if p.grad is None: + view = p.new(p.numel()).zero_() + elif p.grad.is_sparse: + view = p.grad.to_dense().view(-1) + else: + view = p.grad.view(-1) + views.append(view) + return torch.cat(views, 0) + + def _add_grad(self, step_size, update): + offset = 0 + for p in self._params: + numel = p.numel() + # view as to avoid deprecated pointwise semantics + p.add_(update[offset:offset + numel].view_as(p), alpha=step_size) + offset += numel + assert offset == self._numel() + + def _clone_param(self): + return [p.clone(memory_format=torch.contiguous_format) for p in self._params] + + def _set_param(self, params_data): + for p, pdata in zip(self._params, params_data): + p.copy_(pdata) + + def _directional_evaluate(self, closure, x, t, d): + self._add_grad(t, d) + loss = float(closure()) + flat_grad = self._gather_flat_grad() + self._set_param(x) + return loss, flat_grad + + @torch.no_grad() + def step(self, closure): + """Perform a single optimization step. + + Args: + closure (Callable): A closure that reevaluates the model + and returns the loss. + """ + assert len(self.param_groups) == 1 + + # Make sure the closure is always called with grad enabled + closure = torch.enable_grad()(closure) + + group = self.param_groups[0] + lr = group['lr'] + max_iter = group['max_iter'] + max_eval = group['max_eval'] + tolerance_grad = group['tolerance_grad'] + tolerance_change = group['tolerance_change'] + line_search_fn = group['line_search_fn'] + history_size = group['history_size'] + + # NOTE: LBFGS has only global state, but we register it as state for + # the first param, because this helps with casting in load_state_dict + state = self.state[self._params[0]] + state.setdefault('func_evals', 0) + state.setdefault('n_iter', 0) + + # evaluate initial f(x) and df/dx + orig_loss = closure() + loss = float(orig_loss) + current_evals = 1 + state['func_evals'] += 1 + + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + + # optimal condition + if opt_cond: + return orig_loss + + # tensors cached in state (for tracing) + d = state.get('d') + t = state.get('t') + old_dirs = state.get('old_dirs') + old_stps = state.get('old_stps') + ro = state.get('ro') + H_diag = state.get('H_diag') + prev_flat_grad = state.get('prev_flat_grad') + prev_loss = state.get('prev_loss') + + n_iter = 0 + # optimize for a max of max_iter iterations + while n_iter < max_iter: + # keep track of nb of iterations + n_iter += 1 + state['n_iter'] += 1 + + ############################################################ + # compute gradient descent direction + ############################################################ + if state['n_iter'] == 1: + d = flat_grad.neg() + old_dirs = [] + old_stps = [] + ro = [] + H_diag = 1 + else: + # do lbfgs update (update memory) + y = flat_grad.sub(prev_flat_grad) + s = d.mul(t) + ys = y.dot(s) # y*s + if ys > 1e-10: + # updating memory + if len(old_dirs) == history_size: + # shift history by one (limited-memory) + old_dirs.pop(0) + old_stps.pop(0) + ro.pop(0) + + # store new direction/step + old_dirs.append(y) + old_stps.append(s) + ro.append(1. / ys) + + # update scale of initial Hessian approximation + H_diag = ys / y.dot(y) # (y*y) + + # compute the approximate (L-BFGS) inverse Hessian + # multiplied by the gradient + num_old = len(old_dirs) + + if 'al' not in state: + state['al'] = [None] * history_size + al = state['al'] + + # iteration in L-BFGS loop collapsed to use just one buffer + q = flat_grad.neg() + for i in range(num_old - 1, -1, -1): + al[i] = old_stps[i].dot(q) * ro[i] + q.add_(old_dirs[i], alpha=-al[i]) + + # multiply by initial Hessian + # r/d is the final direction + d = r = torch.mul(q, H_diag) + for i in range(num_old): + be_i = old_dirs[i].dot(r) * ro[i] + r.add_(old_stps[i], alpha=al[i] - be_i) + + if prev_flat_grad is None: + prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format) + else: + prev_flat_grad.copy_(flat_grad) + prev_loss = loss + + ############################################################ + # compute step length + ############################################################ + # reset initial guess for step size + if state['n_iter'] == 1: + t = min(1., 1. / flat_grad.abs().sum()) * lr + else: + t = lr + + # directional derivative + gtd = flat_grad.dot(d) # g * d + + # directional derivative is below tolerance + if gtd > -tolerance_change: + break + + # optional line search: user function + ls_func_evals = 0 + if line_search_fn is not None: + # perform line search, using user function + if line_search_fn != "strong_wolfe": + raise RuntimeError("only 'strong_wolfe' is supported") + else: + x_init = self._clone_param() + + def obj_func(x, t, d): + return self._directional_evaluate(closure, x, t, d) + + loss, flat_grad, t, ls_func_evals = _strong_wolfe( + obj_func, x_init, t, d, loss, flat_grad, gtd) + self._add_grad(t, d) + opt_cond = flat_grad.abs().max() <= tolerance_grad + else: + # no line search, simply move with fixed-step + self._add_grad(t, d) + if n_iter != max_iter: + # re-evaluate function only if not in last iteration + # the reason we do this: in a stochastic setting, + # no use to re-evaluate that function here + with torch.enable_grad(): + loss = float(closure()) + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + ls_func_evals = 1 + + # update func eval + current_evals += ls_func_evals + state['func_evals'] += ls_func_evals + + ############################################################ + # check conditions + ############################################################ + if n_iter == max_iter: + break + + if current_evals >= max_eval: + break + + # optimal condition + if opt_cond: + break + + # lack of progress + if d.mul(t).abs().max() <= tolerance_change: + break + + if abs(loss - prev_loss) < tolerance_change: + break + + state['d'] = d + state['t'] = t + state['old_dirs'] = old_dirs + state['old_stps'] = old_stps + state['ro'] = ro + state['H_diag'] = H_diag + state['prev_flat_grad'] = prev_flat_grad + state['prev_loss'] = prev_loss + + return orig_loss diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/nadam.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/nadam.py new file mode 100644 index 0000000000000000000000000000000000000000..d1e0abbefbbfbe8bbecceadd7133a328429397c1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/nadam.py @@ -0,0 +1,473 @@ +import torch +from torch import Tensor +from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _dispatch_sqrt, _stack_if_compiling, + _capturable_doc, _differentiable_doc, _foreach_doc, _default_to_fused_or_foreach, _view_as_real) +from typing import List, Optional + +__all__ = ['NAdam', 'nadam'] + +class NAdam(Optimizer): + def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, momentum_decay=4e-3, decoupled_weight_decay: bool = False, + *, foreach: Optional[bool] = None, capturable: bool = False, + differentiable: bool = False): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if not 0.0 <= momentum_decay: + raise ValueError(f"Invalid momentum_decay value: {momentum_decay}") + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, momentum_decay=momentum_decay, + decoupled_weight_decay=decoupled_weight_decay, + foreach=foreach, capturable=capturable, differentiable=differentiable) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('foreach', None) + group.setdefault('capturable', False) + group.setdefault('differentiable', False) + group.setdefault('decoupled_weight_decay', False) + state_values = list(self.state.values()) + step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step']) + if not step_is_tensor: + for s in state_values: + s['step'] = torch.tensor(float(s['step']), dtype=torch.float32) + mu_product_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['mu_product']) + if not mu_product_is_tensor: + for s in state_values: + s['mu_product'] = torch.tensor(s['mu_product'], dtype=torch.float32) + + def _init_group(self, group, params_with_grad, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps): + has_complex = False + for p in group['params']: + if p.grad is not None: + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError('NAdam does not support sparse gradients') + grads.append(p.grad) + + state = self.state[p] + # Lazy state initialization + if len(state) == 0: + # note(crcrpar): [special device hosting for step] + # Deliberately host `step` and `mu_product` on CPU if capturable is False. + # This is because kernel launches are costly on CUDA and XLA. + state['step'] = ( + torch.zeros((), dtype=torch.float32, device=p.device) + if group['capturable'] else torch.tensor(0.0, dtype=torch.float32) + ) + state['mu_product'] = ( + torch.ones((), dtype=torch.float32, device=p.device) + if group['capturable'] else torch.tensor(1.0, dtype=torch.float32) + ) + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + mu_products.append(state['mu_product']) + state_steps.append(state['step']) + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + mu_products = [] + state_steps = [] + beta1, beta2 = group['betas'] + + has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps) + + nadam(params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + mu_products, + state_steps, + beta1=beta1, + beta2=beta2, + lr=group['lr'], + weight_decay=group['weight_decay'], + momentum_decay=group['momentum_decay'], + eps=group['eps'], + decoupled_weight_decay=group['decoupled_weight_decay'], + foreach=group['foreach'], + capturable=group['capturable'], + differentiable=group['differentiable'], + has_complex=has_complex) + + return loss + +NAdam.__doc__ = r"""Implements NAdam algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma_t \text{ (lr)}, \: \beta_1,\beta_2 \text{ (betas)}, + \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\ + &\hspace{13mm} \: \lambda \text{ (weight decay)}, \:\psi \text{ (momentum decay)} \\ + &\hspace{13mm} \: \textit{decoupled\_weight\_decay} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + v_0 \leftarrow 0 \text{ ( second moment)} \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} \\ + &\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm}\textbf{if} \: \textit{decoupled\_weight\_decay} \\ + &\hspace{15mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ + &\hspace{10mm}\textbf{else} \\ + &\hspace{15mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm} \mu_t \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{t \psi} \big) \\ + &\hspace{5mm} \mu_{t+1} \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{(t+1)\psi}\big)\\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{5mm}\widehat{m_t} \leftarrow \mu_{t+1} m_t/(1-\prod_{i=1}^{t+1}\mu_i)\\[-1.ex] + & \hspace{11mm} + (1-\mu_t) g_t /(1-\prod_{i=1}^{t} \mu_{i}) \\ + &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Incorporating Nesterov Momentum into Adam`_. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + momentum_decay (float, optional): momentum momentum_decay (default: 4e-3) + decoupled_weight_decay (bool, optional): whether to use decoupled weight + decay as in AdamW to obtain NAdamW (default: False) + {_foreach_doc} + {_capturable_doc} + {_differentiable_doc} + + .. _Incorporating Nesterov Momentum into Adam: + https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + + """ + + +def nadam(params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + mu_products: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + decoupled_weight_decay: bool = False, + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + has_complex: bool = False, + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + momentum_decay: float, + eps: float): + r"""Functional API that performs NAdam algorithm computation. + + See :class:`~torch.optim.NAdam` for details. + """ + + + if not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors") + + if not all(isinstance(t, torch.Tensor) for t in mu_products): + raise RuntimeError("API has changed, `mu_products` argument must contain a list of singleton tensors") + + if foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError('torch.jit.script not supported with foreach optimizers') + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_nadam + else: + func = _single_tensor_nadam + + func(params, + grads, + exp_avgs, + exp_avg_sqs, + mu_products, + state_steps, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + momentum_decay=momentum_decay, + decoupled_weight_decay=decoupled_weight_decay, + eps=eps, + capturable=capturable, + differentiable=differentiable, + has_complex=has_complex) + + +def _single_tensor_nadam(params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + mu_products: List[Tensor], + state_steps: List[Tensor], + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + momentum_decay: float, + eps: float, + decoupled_weight_decay: bool, + capturable: bool, + differentiable: bool, + has_complex: bool): + + for i, param in enumerate(params): + grad = grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + mu_product = mu_products[i] + step_t = state_steps[i] + + if torch.is_complex(param): + param = torch.view_as_real(param) + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_avg_sq = torch.view_as_real(exp_avg_sq) + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + assert ( + (param.is_cuda and mu_product.is_cuda and step_t.is_cuda) or (param.is_xla and mu_product.is_xla and step_t.is_xla) + ), "If capturable=True, params, mu_products, and state_steps must be CUDA or XLA tensors." + + # update step + step_t += 1 + + if capturable: + step = step_t + else: + step = _get_value(step_t) + + bias_correction2 = 1 - beta2 ** step + + if weight_decay != 0: + if decoupled_weight_decay: + # Perform stepweight decay + param.mul_(1 - lr * weight_decay) + else: + grad = grad.add(param, alpha=weight_decay) + + # calculate the momentum cache \mu^{t} and \mu^{t+1} + mu = beta1 * (1. - 0.5 * (0.96 ** (step * momentum_decay))) + mu_next = beta1 * (1. - 0.5 * (0.96 ** ((step + 1) * momentum_decay))) + + # update mu_product + mu_product *= mu + + # decay the first and second moment running average coefficient + exp_avg.lerp_(grad, 1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + denom = exp_avg_sq.div(bias_correction2).sqrt() + + if differentiable or capturable: + denom = denom.add(eps) + # Make autograd track the operations + # by updating the grad and exp_avg directly and not using the + # scalar "value" argument of addcdiv. + mu_product_next = mu_product * mu_next + grad = grad * (-lr * (1. - mu) / (1. - mu_product)) + exp_avg = exp_avg * (-lr * mu_next / (1. - mu_product_next)) + param.addcdiv_(grad, denom) + param.addcdiv_(exp_avg, denom) + else: + mu_product_next = _get_value(mu_product) * mu_next + denom.add_(eps) + param.addcdiv_(grad, denom, value=(-lr * (1. - mu) / (1. - _get_value(mu_product)))) + param.addcdiv_(exp_avg, denom, value=(-lr * mu_next) / (1. - mu_product_next)) + + +def _multi_tensor_nadam(params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + mu_products: List[Tensor], + state_steps: List[Tensor], + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + momentum_decay: float, + eps: float, + decoupled_weight_decay: bool, + capturable: bool, + differentiable: bool, + has_complex: bool): + + if len(params) == 0: + return + + assert not differentiable, "_foreach ops don't support autograd" + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + assert all(p.is_cuda and mp.is_cuda and step.is_cuda + for p, mp, step in zip(params, mu_products, state_steps)), \ + "If capturable=True, params, mu_products, and state_steps must be CUDA tensors." + + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps]) + for ((grouped_params, grouped_grads, grouped_exp_avgs, + grouped_exp_avg_sqs, grouped_mu_products, grouped_state_steps), _) in grouped_tensors.values(): + + # handle complex + if has_complex: + _view_as_real(grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_avg_sqs) + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if grouped_state_steps[0].is_cpu: + torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0) + else: + torch._foreach_add_(grouped_state_steps, 1) + + if weight_decay != 0: + if decoupled_weight_decay: + # Perform stepweight decay + torch._foreach_mul_(grouped_params, 1 - lr * weight_decay) + else: + grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay) + + # Decay the first and second moment running average coefficient + torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1) + + torch._foreach_mul_(grouped_exp_avg_sqs, beta2) + torch._foreach_addcmul_(grouped_exp_avg_sqs, grouped_grads, grouped_grads, 1 - beta2) + + exp_avg_sq_sqrt = torch._foreach_sqrt(grouped_exp_avg_sqs) + + if capturable: + # mus will be beta1 * (1 - 0.5 * 0.96 ** (step * momentum_decay)) + exponent = torch._foreach_mul(grouped_state_steps, momentum_decay) + mus = torch._foreach_pow(0.96, exponent) + torch._foreach_mul_(mus, -0.5) + torch._foreach_add_(mus, 1.0) + torch._foreach_mul_(mus, beta1) + + # mu_nexts will be beta1 * (1 - 0.5 * 0.96 ** ((step + 1) * momentum_decay)) + torch._foreach_add_(exponent, momentum_decay) + mu_nexts = torch._foreach_pow(0.96, exponent) + torch._foreach_mul_(mu_nexts, -0.5) + torch._foreach_add_(mu_nexts, 1.0) + torch._foreach_mul_(mu_nexts, beta1) + + # save peak memory as we don't need exponent anymore + del exponent + + bias_correction_sqrt = torch._foreach_pow(beta2, grouped_state_steps) + # foreach_sub doesn't allow a scalar as the first arg + torch._foreach_sub_(bias_correction_sqrt, 1.0) + torch._foreach_neg_(bias_correction_sqrt) + torch._foreach_sqrt_(bias_correction_sqrt) + else: + bias_correction_sqrt = [_dispatch_sqrt(1 - beta2 ** _get_value(step)) for step in grouped_state_steps] + mus = [beta1 * (1. - 0.5 * (0.96 ** (_get_value(step) * momentum_decay))) for step in grouped_state_steps] + mu_nexts = [beta1 * (1. - 0.5 * (0.96 ** ((_get_value(step) + 1) * momentum_decay))) + for step in grouped_state_steps] + + # update mu_products + torch._foreach_mul_(grouped_mu_products, mus) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + + # explicitly delete bias_correction refs to save memory + del bias_correction_sqrt + + if capturable: + # Build up the step_size multiplier for grad, reusing mus' memory + torch._foreach_sub_(mus, 1.0) + torch._foreach_mul_(mus, lr) + # foreach_sub doesn't allow a scalar as the first arg + denom = torch._foreach_sub(grouped_mu_products, 1.0) + torch._foreach_neg_(denom) + torch._foreach_div_(mus, denom) + # - lr * (1 - mu) / (1 - mu_product) + step_size_grads = mus + # explicitly delete denom to save memory + del denom + + # Build up the step_size multiplier for exp_avg, reusing mu_nexts' memory + denom = torch._foreach_mul(grouped_mu_products, mu_nexts) + torch._foreach_mul_(mu_nexts, lr) + # foreach_sub doesn't allow a scalar as the first arg, but it's okay because + # we need a negative here anyway + torch._foreach_sub_(denom, 1.0) + torch._foreach_div_(mu_nexts, denom) + # - lr * mu_next / (1 - mu_product * mu_next) + step_size_expavg = mu_nexts + # explicitly delete denom to save memory + del denom + + # we cannot inplace into step_size_grads cuz it is a list of ScalarTensors + # and mul'ing with grouped_grads will result in a list of bigger Tensors + numerator = torch._foreach_mul(step_size_grads, grouped_grads) + torch._foreach_addcmul_(numerator, step_size_expavg, grouped_exp_avgs) + + # finally, update params + torch._foreach_addcdiv_(grouped_params, numerator, exp_avg_sq_sqrt) + else: + step_size_grads = _stack_if_compiling([(lr * (1. - mu) / (1. - _get_value(mu_product))) * -1 + for mu_product, mu in zip(grouped_mu_products, mus)]) + step_size_expavg = _stack_if_compiling([(lr * mu_next / (1. - _get_value(mu_product) * mu_next)) * -1 + for mu_product, mu_next in zip(grouped_mu_products, mu_nexts)]) + + torch._foreach_addcdiv_(grouped_params, grouped_grads, exp_avg_sq_sqrt, step_size_grads) + torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, exp_avg_sq_sqrt, step_size_expavg) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/nadam.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/nadam.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f62e188b3d72b1d9021e8f0f6619b8bde4349cb1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/nadam.pyi @@ -0,0 +1,15 @@ +from typing import Tuple + +from .optimizer import Optimizer, ParamsT + +class NAdam(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + weight_decay: float = ..., + momentum_decay: float = ..., + decoupled_weight_decay: bool = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/radam.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/radam.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b001376b05ef4881630124874cbbc5df255c1624 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/radam.pyi @@ -0,0 +1,14 @@ +from typing import Tuple + +from .optimizer import Optimizer, ParamsT + +class RAdam(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + weight_decay: float = ..., + decoupled_weight_decay: bool = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/rmsprop.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/rmsprop.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f206d542dcecb8cf0424dfaa05de2e1f5c46ae14 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/rmsprop.pyi @@ -0,0 +1,13 @@ +from .optimizer import Optimizer, ParamsT + +class RMSprop(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + alpha: float = ..., + eps: float = ..., + weight_decay: float = ..., + momentum: float = ..., + centered: bool = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/rprop.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/rprop.py new file mode 100644 index 0000000000000000000000000000000000000000..2a769a86391bd02811c76074d288bd35aec238f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/rprop.py @@ -0,0 +1,335 @@ +import torch +from torch import Tensor +from .optimizer import (Optimizer, _use_grad_for_differentiable, _default_to_fused_or_foreach, + _differentiable_doc, _foreach_doc, _maximize_doc, _view_as_real) +from typing import List, Optional + +__all__ = ["Rprop", "rprop"] + + +class Rprop(Optimizer): + def __init__( + self, + params, + lr=1e-2, + etas=(0.5, 1.2), + step_sizes=(1e-6, 50), + *, + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 < etas[0] < 1.0 < etas[1]: + raise ValueError(f"Invalid eta values: {etas[0]}, {etas[1]}") + + defaults = dict( + lr=lr, + etas=etas, + step_sizes=step_sizes, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + + def _init_group(self, group, params, grads, prevs, step_sizes): + has_complex = False + for p in group["params"]: + if p.grad is None: + continue + has_complex |= torch.is_complex(p) + params.append(p) + grad = p.grad + if grad.is_sparse: + raise RuntimeError("Rprop does not support sparse gradients") + + grads.append(grad) + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = 0 + state["prev"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + if p.dtype.is_complex: + # Complex Number should be as if they are two independent real numbers. + # Hence the step_size shouldn't be zero for imaginary part. + state["step_size"] = ( + grad.new() + .resize_as_(grad) + .fill_(complex(group["lr"], group["lr"])) + ) + else: + state["step_size"] = ( + grad.new().resize_as_(grad).fill_(group["lr"]) + ) + + prevs.append(state["prev"]) + step_sizes.append(state["step_size"]) + + state["step"] += 1 + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params = [] + grads = [] + prevs = [] + step_sizes = [] + etaminus, etaplus = group["etas"] + step_size_min, step_size_max = group["step_sizes"] + foreach = group["foreach"] + maximize = group["maximize"] + + has_complex = self._init_group(group, params, grads, prevs, step_sizes) + + rprop( + params, + grads, + prevs, + step_sizes, + step_size_min=step_size_min, + step_size_max=step_size_max, + etaminus=etaminus, + etaplus=etaplus, + foreach=foreach, + maximize=maximize, + differentiable=group["differentiable"], + has_complex=has_complex, + ) + + return loss + + +Rprop.__doc__ = r"""Implements the resilient backpropagation algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \theta_0 \in \mathbf{R}^d \text{ (params)},f(\theta) + \text{ (objective)}, \\ + &\hspace{13mm} \eta_{+/-} \text{ (etaplus, etaminus)}, \Gamma_{max/min} + \text{ (step sizes)} \\ + &\textbf{initialize} : g^0_{prev} \leftarrow 0, + \: \eta_0 \leftarrow \text{lr (learning rate)} \\ + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \textbf{for} \text{ } i = 0, 1, \ldots, d-1 \: \mathbf{do} \\ + &\hspace{10mm} \textbf{if} \: g^i_{prev} g^i_t > 0 \\ + &\hspace{15mm} \eta^i_t \leftarrow \mathrm{min}(\eta^i_{t-1} \eta_{+}, + \Gamma_{max}) \\ + &\hspace{10mm} \textbf{else if} \: g^i_{prev} g^i_t < 0 \\ + &\hspace{15mm} \eta^i_t \leftarrow \mathrm{max}(\eta^i_{t-1} \eta_{-}, + \Gamma_{min}) \\ + &\hspace{15mm} g^i_t \leftarrow 0 \\ + &\hspace{10mm} \textbf{else} \: \\ + &\hspace{15mm} \eta^i_t \leftarrow \eta^i_{t-1} \\ + &\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \eta_t \mathrm{sign}(g_t) \\ + &\hspace{5mm}g_{prev} \leftarrow g_t \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to the paper + `A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm + `_. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + etas (Tuple[float, float], optional): pair of (etaminus, etaplus), that + are multiplicative increase and decrease factors + (default: (0.5, 1.2)) + step_sizes (Tuple[float, float], optional): a pair of minimal and + maximal allowed step sizes (default: (1e-6, 50)) + {_foreach_doc} + {_maximize_doc} + {_differentiable_doc} + + """ + +def rprop( + params: List[Tensor], + grads: List[Tensor], + prevs: List[Tensor], + step_sizes: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + maximize: bool = False, + differentiable: bool = False, + has_complex: bool = False, + *, + step_size_min: float, + step_size_max: float, + etaminus: float, + etaplus: float, +): + r"""Functional API that performs rprop algorithm computation. + + See :class:`~torch.optim.Rprop` for details. + """ + + if foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_rprop + else: + func = _single_tensor_rprop + + func( + params, + grads, + prevs, + step_sizes, + step_size_min=step_size_min, + step_size_max=step_size_max, + etaminus=etaminus, + etaplus=etaplus, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + ) + + +def _single_tensor_rprop( + params: List[Tensor], + grads: List[Tensor], + prevs: List[Tensor], + step_sizes: List[Tensor], + *, + step_size_min: float, + step_size_max: float, + etaminus: float, + etaplus: float, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + for i, param in enumerate(params): + grad = grads[i] + grad = grad if not maximize else -grad + prev = prevs[i] + step_size = step_sizes[i] + + if torch.is_complex(param): + grad = torch.view_as_real(grad) + prev = torch.view_as_real(prev) + param = torch.view_as_real(param) + step_size = torch.view_as_real(step_size) + if differentiable: + sign = grad.mul(prev.clone()).sign() + else: + sign = grad.mul(prev).sign() + sign[sign.gt(0)] = etaplus + sign[sign.lt(0)] = etaminus + sign[sign.eq(0)] = 1 + + # update stepsizes with step size updates + step_size.mul_(sign).clamp_(step_size_min, step_size_max) + + # for dir<0, dfdx=0 + # for dir>=0 dfdx=dfdx + grad = grad.clone(memory_format=torch.preserve_format) + grad[sign.eq(etaminus)] = 0 + + # update parameters + param.addcmul_(grad.sign(), step_size, value=-1) + prev.copy_(grad) + + +def _multi_tensor_rprop( + params: List[Tensor], + grads: List[Tensor], + prevs: List[Tensor], + step_sizes: List[Tensor], + *, + step_size_min: float, + step_size_max: float, + etaminus: float, + etaplus: float, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + if len(params) == 0: + return + + assert not differentiable, "_foreach ops don't support autograd" + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, prevs, step_sizes]) + for ((grouped_params, grouped_grads, grouped_prevs, grouped_step_sizes), _) in grouped_tensors.values(): + # Handle complex params + if has_complex: + _view_as_real(grouped_params, grouped_grads, grouped_prevs, grouped_step_sizes) + + signs = torch._foreach_mul(grouped_grads, grouped_prevs) + if maximize: + torch._foreach_neg_(signs) + + # At the end of the step, grouped_prevs will contain the current grads, so we reuse + # grouped_prevs memory instead of creating a new buffer, but, for clarity, we reassign + # to keep referring to the buffer as grouped_grads. + torch._foreach_copy_(grouped_prevs, grouped_grads) + if maximize: + torch._foreach_neg_(grouped_prevs) + grouped_grads = grouped_prevs + + torch._foreach_sign_(signs) + for sign in signs: + sign[sign.gt(0)] = etaplus + sign[sign.lt(0)] = etaminus + sign[sign.eq(0)] = 1 + + # update stepsizes with step size updates + torch._foreach_mul_(grouped_step_sizes, signs) + for step_size in grouped_step_sizes: + step_size.clamp_(step_size_min, step_size_max) + + # for dir<0, dfdx=0 + # for dir>=0 dfdx=dfdx + grouped_grads = list(grouped_grads) + for i in range(len(grouped_grads)): + grouped_grads[i][signs[i].eq(etaminus)] = 0 + + # explicitly del signs as it's not used after here to save memory + del signs + + # update parameters + grad_signs = [grad.sign() for grad in grouped_grads] + torch._foreach_addcmul_(grouped_params, grad_signs, grouped_step_sizes, value=-1) + + # Logically, you may expect grouped_prevs to get updated to grouped_grads, but that's + # basically already happened since we've been using grouped_prevs' memory to store + # updated grouped_grads! diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/rprop.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/rprop.pyi new file mode 100644 index 0000000000000000000000000000000000000000..fd0c6ba209161be0102b9b13a80e956d7d4b8f3e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/rprop.pyi @@ -0,0 +1,12 @@ +from typing import Tuple + +from .optimizer import Optimizer, ParamsT + +class Rprop(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + etas: Tuple[float, float] = ..., + step_sizes: Tuple[float, float] = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/sparse_adam.py b/env-llmeval/lib/python3.10/site-packages/torch/optim/sparse_adam.py new file mode 100644 index 0000000000000000000000000000000000000000..bed0a7988d7f9ea4db1c8e2ebc1077e962dfdf17 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/sparse_adam.py @@ -0,0 +1,154 @@ +import torch +from . import _functional as F +from .optimizer import Optimizer, _maximize_doc + +__all__ = ['SparseAdam'] + +class SparseAdam(Optimizer): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, maximize: bool = False): + if not 0.0 < lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 < eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + + defaults = dict(lr=lr, betas=betas, eps=eps, maximize=maximize) + super().__init__(params, defaults) + + sparse_params = [] + for index, param_group in enumerate(self.param_groups): + assert isinstance(param_group, dict), f"param_groups must be a list of dicts, but got {type(param_group)}" + # given param group, convert given params to a list first before iterating + for d_index, d_param in enumerate(param_group['params']): + if d_param.is_sparse: + sparse_params.append([index, d_index]) + if sparse_params: + raise ValueError( + f"Sparse params at indices {sparse_params}: SparseAdam requires dense parameter tensors" + ) + + + @torch.no_grad() + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + state_steps = [] + eps = group['eps'] + lr = group['lr'] + beta1, beta2 = group['betas'] + maximize = group.get('maximize', False) + + for p in group['params']: + if p.grad is not None: + params_with_grad.append(p) + if not p.grad.is_sparse: + raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead') + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + + # update the steps for each param group update + state['step'] += 1 + # record the step after step update + state_steps.append(state['step']) + + F.sparse_adam(params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + state_steps, + beta1=beta1, + beta2=beta2, + lr=group['lr'], + eps=group['eps'], + maximize=maximize) + + return loss + +SparseAdam.__doc__ = fr"""SparseAdam implements a masked version of the Adam algorithm + suitable for sparse gradients. Currently, due to implementation constraints (explained + below), SparseAdam is only intended for a narrow subset of use cases, specifically + parameters of a dense layout with gradients of a sparse layout. This occurs in a + special case where the module backwards produces grads already in a sparse layout. + One example NN module that behaves as such is ``nn.Embedding(sparse=True)``. + + SparseAdam approximates the Adam algorithm by masking out the parameter and moment + updates corresponding to the zero values in the gradients. Whereas the Adam algorithm + will update the first moment, the second moment, and the parameters based on all values + of the gradients, SparseAdam only updates the moments and parameters corresponding + to the non-zero values of the gradients. + + A simplified way of thinking about the `intended` implementation is as such: + + 1. Create a mask of the non-zero values in the sparse gradients. For example, + if your gradient looks like [0, 5, 0, 0, 9], the mask would be [0, 1, 0, 0, 1]. + 2. Apply this mask over the running moments and do computation on only the + non-zero values. + 3. Apply this mask over the parameters and only apply an update on non-zero values. + + In actuality, we use sparse layout Tensors to optimize this approximation, which means the + more gradients that are masked by not being materialized, the more performant the optimization. + Since we rely on using sparse layout tensors, we infer that any materialized value in the + sparse layout is non-zero and we do NOT actually verify that all values are not zero! + It is important to not conflate a semantically sparse tensor (a tensor where many + of its values are zeros) with a sparse layout tensor (a tensor where ``.is_sparse`` + returns ``True``). The SparseAdam approximation is intended for `semantically` sparse + tensors and the sparse layout is only a implementation detail. A clearer implementation + would be to use MaskedTensors, but those are experimental. + + + .. note:: + + If you suspect your gradients are semantically sparse (but do not have sparse + layout), this variant may not be the best for you. Ideally, you want to avoid + materializing anything that is suspected to be sparse in the first place, since + needing to convert all your grads from dense layout to sparse layout may outweigh + the performance gain. Here, using Adam may be the best alternative, unless you + can easily rig up your module to output sparse grads similar to + ``nn.Embedding(sparse=True)``. If you insist on converting your grads, you can do + so by manually overriding your parameters' ``.grad`` fields with their sparse + equivalents before calling ``.step()``. + + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + {_maximize_doc} + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + + """ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/optim/swa_utils.pyi b/env-llmeval/lib/python3.10/site-packages/torch/optim/swa_utils.pyi new file mode 100644 index 0000000000000000000000000000000000000000..074e7a9bbd75a6c19f7456789eedf82e6bc4e19f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/optim/swa_utils.pyi @@ -0,0 +1,32 @@ +from typing import Any, Callable, Iterable, Union + +from torch import device, Tensor +from torch.nn.modules import Module +from .lr_scheduler import _LRScheduler +from .optimizer import Optimizer + +class AveragedModel(Module): + def __init__( + self, + model: Module, + device: Union[int, device] = ..., + avg_fn: Callable[[Tensor, Tensor, int], Tensor] = ..., + use_buffers: bool = ..., + ) -> None: ... + def update_parameters(self, model: Module) -> None: ... + +def update_bn( + loader: Iterable[Any], + model: Module, + device: Union[int, device] = ..., +) -> None: ... + +class SWALR(_LRScheduler): + def __init__( + self, + optimizer: Optimizer, + swa_lr: float, + anneal_epochs: int, + anneal_strategy: str, + last_epoch: int = ..., + ) -> None: ... diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/package/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..66cace5931ac17c548becfddbb0e56dbbdac3d38 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/__init__.py @@ -0,0 +1,12 @@ +from .analyze.is_from_package import is_from_package +from .file_structure_representation import Directory +from .glob_group import GlobGroup +from .importer import ( + Importer, + ObjMismatchError, + ObjNotFoundError, + OrderedImporter, + sys_importer, +) +from .package_exporter import EmptyMatchError, PackageExporter, PackagingError +from .package_importer import PackageImporter diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37d85a45981e192f58a82675bdd61b952d178dac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_digraph.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_digraph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fff70b7cb2b9bce7f316f28e39317703c2748933 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_digraph.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_directory_reader.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_directory_reader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba9092f1588f8b0553c6a52a106bb4741f7c3c18 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_directory_reader.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_importlib.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_importlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..489764b5b4ab76f431dba4371b8b7cbda57e932e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_importlib.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_mangling.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_mangling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3473ce5c1fb25755878619791a9a5f04388f4326 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_mangling.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_package_pickler.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_package_pickler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..210a3fc524c6c0434247dfe4e23022327ed91b6c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_package_pickler.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_package_unpickler.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_package_unpickler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5bfebf3fc618abbee47f635fdcf25782b495c2f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_package_unpickler.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_stdlib.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_stdlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7223dbe8886a15dce66632d86e9b1ef489ea85f0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/_stdlib.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/file_structure_representation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/file_structure_representation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1c0bfd1e9e49554956bdac4467f5bfeff8ebe42 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/file_structure_representation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/importer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/importer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d07313991e365b9f802f5e21e17ec20dd4a03ade Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/importer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/package_exporter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/package_exporter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b60437b61b00b0fb567ccd41e17afef44c7a1b9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/package_exporter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/package_importer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/package_importer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8407b1a19feb58959a5d6b90fad7f9d59f8234be Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/package_importer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/_digraph.py b/env-llmeval/lib/python3.10/site-packages/torch/package/_digraph.py new file mode 100644 index 0000000000000000000000000000000000000000..f84a51398f005403a8e3a6e5610b5f721a0d4be7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/_digraph.py @@ -0,0 +1,173 @@ +from collections import deque +from typing import List, Set + + +class DiGraph: + """Really simple unweighted directed graph data structure to track dependencies. + + The API is pretty much the same as networkx so if you add something just + copy their API. + """ + + def __init__(self): + # Dict of node -> dict of arbitrary attributes + self._node = {} + # Nested dict of node -> successor node -> nothing. + # (didn't implement edge data) + self._succ = {} + # Nested dict of node -> predecessor node -> nothing. + self._pred = {} + + # Keep track of the order in which nodes are added to + # the graph. + self._node_order = {} + self._insertion_idx = 0 + + def add_node(self, n, **kwargs): + """Add a node to the graph. + + Args: + n: the node. Can we any object that is a valid dict key. + **kwargs: any attributes you want to attach to the node. + """ + if n not in self._node: + self._node[n] = kwargs + self._succ[n] = {} + self._pred[n] = {} + self._node_order[n] = self._insertion_idx + self._insertion_idx += 1 + else: + self._node[n].update(kwargs) + + def add_edge(self, u, v): + """Add an edge to graph between nodes ``u`` and ``v`` + + ``u`` and ``v`` will be created if they do not already exist. + """ + # add nodes + self.add_node(u) + self.add_node(v) + + # add the edge + self._succ[u][v] = True + self._pred[v][u] = True + + def successors(self, n): + """Returns an iterator over successor nodes of n.""" + try: + return iter(self._succ[n]) + except KeyError as e: + raise ValueError(f"The node {n} is not in the digraph.") from e + + def predecessors(self, n): + """Returns an iterator over predecessors nodes of n.""" + try: + return iter(self._pred[n]) + except KeyError as e: + raise ValueError(f"The node {n} is not in the digraph.") from e + + @property + def edges(self): + """Returns an iterator over all edges (u, v) in the graph""" + for n, successors in self._succ.items(): + for succ in successors: + yield n, succ + + @property + def nodes(self): + """Returns a dictionary of all nodes to their attributes.""" + return self._node + + def __iter__(self): + """Iterate over the nodes.""" + return iter(self._node) + + def __contains__(self, n): + """Returns True if ``n`` is a node in the graph, False otherwise.""" + try: + return n in self._node + except TypeError: + return False + + def forward_transitive_closure(self, src: str) -> Set[str]: + """Returns a set of nodes that are reachable from src""" + + result = set(src) + working_set = deque(src) + while len(working_set) > 0: + cur = working_set.popleft() + for n in self.successors(cur): + if n not in result: + result.add(n) + working_set.append(n) + return result + + def backward_transitive_closure(self, src: str) -> Set[str]: + """Returns a set of nodes that are reachable from src in reverse direction""" + + result = set(src) + working_set = deque(src) + while len(working_set) > 0: + cur = working_set.popleft() + for n in self.predecessors(cur): + if n not in result: + result.add(n) + working_set.append(n) + return result + + def all_paths(self, src: str, dst: str): + """Returns a subgraph rooted at src that shows all the paths to dst.""" + + result_graph = DiGraph() + # First compute forward transitive closure of src (all things reachable from src). + forward_reachable_from_src = self.forward_transitive_closure(src) + + if dst not in forward_reachable_from_src: + return result_graph + + # Second walk the reverse dependencies of dst, adding each node to + # the output graph iff it is also present in forward_reachable_from_src. + # we don't use backward_transitive_closures for optimization purposes + working_set = deque(dst) + while len(working_set) > 0: + cur = working_set.popleft() + for n in self.predecessors(cur): + if n in forward_reachable_from_src: + result_graph.add_edge(n, cur) + # only explore further if its reachable from src + working_set.append(n) + + return result_graph.to_dot() + + def first_path(self, dst: str) -> List[str]: + """Returns a list of nodes that show the first path that resulted in dst being added to the graph.""" + path = [] + + while dst: + path.append(dst) + candidates = self._pred[dst].keys() + dst, min_idx = "", None + for candidate in candidates: + idx = self._node_order.get(candidate, None) + if idx is None: + break + if min_idx is None or idx < min_idx: + min_idx = idx + dst = candidate + + return list(reversed(path)) + + def to_dot(self) -> str: + """Returns the dot representation of the graph. + + Returns: + A dot representation of the graph. + """ + edges = "\n".join(f'"{f}" -> "{t}";' for f, t in self.edges) + return f"""\ +digraph G {{ +rankdir = LR; +node [shape=box]; +{edges} +}} +""" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/_directory_reader.py b/env-llmeval/lib/python3.10/site-packages/torch/package/_directory_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..cec5333c3e3faf3268555b4d30e6072a3f5de349 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/_directory_reader.py @@ -0,0 +1,63 @@ +import os.path +from glob import glob +from typing import cast + +import torch +from torch.types import Storage + +__serialization_id_record_name__ = ".data/serialization_id" + + +# because get_storage_from_record returns a tensor!? +class _HasStorage: + def __init__(self, storage): + self._storage = storage + + def storage(self): + return self._storage + + +class DirectoryReader: + """ + Class to allow PackageImporter to operate on unzipped packages. Methods + copy the behavior of the internal PyTorchFileReader class (which is used for + accessing packages in all other cases). + + N.B.: ScriptObjects are not depickleable or accessible via this DirectoryReader + class due to ScriptObjects requiring an actual PyTorchFileReader instance. + """ + + def __init__(self, directory): + self.directory = directory + + def get_record(self, name): + filename = f"{self.directory}/{name}" + with open(filename, "rb") as f: + return f.read() + + def get_storage_from_record(self, name, numel, dtype): + filename = f"{self.directory}/{name}" + nbytes = torch._utils._element_size(dtype) * numel + storage = cast(Storage, torch.UntypedStorage) + return _HasStorage(storage.from_file(filename=filename, nbytes=nbytes)) + + def has_record(self, path): + full_path = os.path.join(self.directory, path) + return os.path.isfile(full_path) + + def get_all_records( + self, + ): + files = [] + for filename in glob(f"{self.directory}/**", recursive=True): + if not os.path.isdir(filename): + files.append(filename[len(self.directory) + 1 :]) + return files + + def serialization_id( + self, + ): + if self.has_record(__serialization_id_record_name__): + return self.get_record(__serialization_id_record_name__) + else: + return "" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/_importlib.py b/env-llmeval/lib/python3.10/site-packages/torch/package/_importlib.py new file mode 100644 index 0000000000000000000000000000000000000000..fd303b6141e7eeeeb891927c063ed5588927388a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/_importlib.py @@ -0,0 +1,93 @@ +import _warnings +import os.path + +# note: implementations +# copied from cpython's import code + + +# _zip_searchorder defines how we search for a module in the Zip +# archive: we first search for a package __init__, then for +# non-package .pyc, and .py entries. The .pyc entries +# are swapped by initzipimport() if we run in optimized mode. Also, +# '/' is replaced by path_sep there. + +_zip_searchorder = ( + ("/__init__.py", True), + (".py", False), +) + + +# Replace any occurrences of '\r\n?' in the input string with '\n'. +# This converts DOS and Mac line endings to Unix line endings. +def _normalize_line_endings(source): + source = source.replace(b"\r\n", b"\n") + source = source.replace(b"\r", b"\n") + return source + + +def _resolve_name(name, package, level): + """Resolve a relative module name to an absolute one.""" + bits = package.rsplit(".", level - 1) + if len(bits) < level: + raise ValueError("attempted relative import beyond top-level package") + base = bits[0] + return f"{base}.{name}" if name else base + + +def _sanity_check(name, package, level): + """Verify arguments are "sane".""" + if not isinstance(name, str): + raise TypeError(f"module name must be str, not {type(name)}") + if level < 0: + raise ValueError("level must be >= 0") + if level > 0: + if not isinstance(package, str): + raise TypeError("__package__ not set to a string") + elif not package: + raise ImportError("attempted relative import with no known parent package") + if not name and level == 0: + raise ValueError("Empty module name") + + +def _calc___package__(globals): + """Calculate what __package__ should be. + + __package__ is not guaranteed to be defined or could be set to None + to represent that its proper value is unknown. + + """ + package = globals.get("__package__") + spec = globals.get("__spec__") + if package is not None: + if spec is not None and package != spec.parent: + _warnings.warn( # noqa: G010 + f"__package__ != __spec__.parent ({package!r} != {spec.parent!r})", # noqa: G004 + ImportWarning, + stacklevel=3, + ) + return package + elif spec is not None: + return spec.parent + else: + _warnings.warn( # noqa: G010 + "can't resolve package from __spec__ or __package__, " + "falling back on __name__ and __path__", + ImportWarning, + stacklevel=3, + ) + package = globals["__name__"] + if "__path__" not in globals: + package = package.rpartition(".")[0] + return package + + +def _normalize_path(path): + """Normalize a path by ensuring it is a string. + + If the resulting string contains path separators, an exception is raised. + """ + parent, file_name = os.path.split(path) + if parent: + raise ValueError(f"{path!r} must be only a file name") + else: + return file_name diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/_mangling.py b/env-llmeval/lib/python3.10/site-packages/torch/package/_mangling.py new file mode 100644 index 0000000000000000000000000000000000000000..0876d64664a2726bcb58d50b49326889f030f34e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/_mangling.py @@ -0,0 +1,62 @@ +"""Import mangling. +See mangling.md for details. +""" +import re + +_mangle_index = 0 + + +class PackageMangler: + """ + Used on import, to ensure that all modules imported have a shared mangle parent. + """ + + def __init__(self): + global _mangle_index + self._mangle_index = _mangle_index + # Increment the global index + _mangle_index += 1 + # Angle brackets are used so that there is almost no chance of + # confusing this module for a real module. Plus, it is Python's + # preferred way of denoting special modules. + self._mangle_parent = f"" + + def mangle(self, name) -> str: + assert len(name) != 0 + return self._mangle_parent + "." + name + + def demangle(self, mangled: str) -> str: + """ + Note: This only demangles names that were mangled by this specific + PackageMangler. It will pass through names created by a different + PackageMangler instance. + """ + if mangled.startswith(self._mangle_parent + "."): + return mangled.partition(".")[2] + + # wasn't a mangled name + return mangled + + def parent_name(self): + return self._mangle_parent + + +def is_mangled(name: str) -> bool: + return bool(re.match(r"", name)) + + +def demangle(name: str) -> str: + """ + Note: Unlike PackageMangler.demangle, this version works on any + mangled name, irrespective of which PackageMangler created it. + """ + if is_mangled(name): + first, sep, last = name.partition(".") + # If there is only a base mangle prefix, e.g. '', + # then return an empty string. + return last if len(sep) != 0 else "" + return name + + +def get_mangle_prefix(name: str) -> str: + return name.partition(".")[0] if is_mangled(name) else name diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/_mock.py b/env-llmeval/lib/python3.10/site-packages/torch/package/_mock.py new file mode 100644 index 0000000000000000000000000000000000000000..b0bdb95cc48c4b1bfaa9edc07fceb1b16d733752 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/_mock.py @@ -0,0 +1,122 @@ +_magic_methods = [ + "__subclasscheck__", + "__hex__", + "__rmul__", + "__float__", + "__idiv__", + "__setattr__", + "__div__", + "__invert__", + "__nonzero__", + "__rshift__", + "__eq__", + "__pos__", + "__round__", + "__rand__", + "__or__", + "__complex__", + "__divmod__", + "__len__", + "__reversed__", + "__copy__", + "__reduce__", + "__deepcopy__", + "__rdivmod__", + "__rrshift__", + "__ifloordiv__", + "__hash__", + "__iand__", + "__xor__", + "__isub__", + "__oct__", + "__ceil__", + "__imod__", + "__add__", + "__truediv__", + "__unicode__", + "__le__", + "__delitem__", + "__sizeof__", + "__sub__", + "__ne__", + "__pow__", + "__bytes__", + "__mul__", + "__itruediv__", + "__bool__", + "__iter__", + "__abs__", + "__gt__", + "__iadd__", + "__enter__", + "__floordiv__", + "__call__", + "__neg__", + "__and__", + "__ixor__", + "__getitem__", + "__exit__", + "__cmp__", + "__getstate__", + "__index__", + "__contains__", + "__floor__", + "__lt__", + "__getattr__", + "__mod__", + "__trunc__", + "__delattr__", + "__instancecheck__", + "__setitem__", + "__ipow__", + "__ilshift__", + "__long__", + "__irshift__", + "__imul__", + "__lshift__", + "__dir__", + "__ge__", + "__int__", + "__ior__", +] + + +class MockedObject: + _name: str + + def __new__(cls, *args, **kwargs): + # _suppress_err is set by us in the mocked module impl, so that we can + # construct instances of MockedObject to hand out to people looking up + # module attributes. + + # Any other attempt to construct a MockedObject instance (say, in the + # unpickling process) should give an error. + if not kwargs.get("_suppress_err"): + raise NotImplementedError( + f"Object '{cls._name}' was mocked out during packaging " + f"but it is being used in '__new__'. If this error is " + "happening during 'load_pickle', please ensure that your " + "pickled object doesn't contain any mocked objects." + ) + # Otherwise, this is just a regular object creation + # (e.g. `x = MockedObject("foo")`), so pass it through normally. + return super().__new__(cls) + + def __init__(self, name: str, _suppress_err: bool): + self.__dict__["_name"] = name + + def __repr__(self): + return f"MockedObject({self._name})" + + +def install_method(method_name): + def _not_implemented(self, *args, **kwargs): + raise NotImplementedError( + f"Object '{self._name}' was mocked out during packaging but it is being used in {method_name}" + ) + + setattr(MockedObject, method_name, _not_implemented) + + +for method_name in _magic_methods: + install_method(method_name) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/_package_pickler.py b/env-llmeval/lib/python3.10/site-packages/torch/package/_package_pickler.py new file mode 100644 index 0000000000000000000000000000000000000000..cabc6a82164fb3aaf767f14cf60bca58535fcf61 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/_package_pickler.py @@ -0,0 +1,118 @@ +"""isort:skip_file""" +from pickle import ( # type: ignore[attr-defined] + _compat_pickle, + _extension_registry, + _getattribute, + _Pickler, + EXT1, + EXT2, + EXT4, + GLOBAL, + Pickler, + PicklingError, + STACK_GLOBAL, +) +from struct import pack +from types import FunctionType + +from .importer import Importer, ObjMismatchError, ObjNotFoundError, sys_importer + + +class PackagePickler(_Pickler): + """Package-aware pickler. + + This behaves the same as a normal pickler, except it uses an `Importer` + to find objects and modules to save. + """ + + def __init__(self, importer: Importer, *args, **kwargs): + self.importer = importer + super().__init__(*args, **kwargs) + + # Make sure the dispatch table copied from _Pickler is up-to-date. + # Previous issues have been encountered where a library (e.g. dill) + # mutate _Pickler.dispatch, PackagePickler makes a copy when this lib + # is imported, then the offending library removes its dispatch entries, + # leaving PackagePickler with a stale dispatch table that may cause + # unwanted behavior. + self.dispatch = _Pickler.dispatch.copy() # type: ignore[misc] + self.dispatch[FunctionType] = PackagePickler.save_global # type: ignore[assignment] + + def save_global(self, obj, name=None): + # unfortunately the pickler code is factored in a way that + # forces us to copy/paste this function. The only change is marked + # CHANGED below. + write = self.write # type: ignore[attr-defined] + memo = self.memo # type: ignore[attr-defined] + + # CHANGED: import module from module environment instead of __import__ + try: + module_name, name = self.importer.get_name(obj, name) + except (ObjNotFoundError, ObjMismatchError) as err: + raise PicklingError(f"Can't pickle {obj}: {str(err)}") from None + + module = self.importer.import_module(module_name) + _, parent = _getattribute(module, name) + # END CHANGED + + if self.proto >= 2: # type: ignore[attr-defined] + code = _extension_registry.get((module_name, name)) + if code: + assert code > 0 + if code <= 0xFF: + write(EXT1 + pack("= 3. + if self.proto >= 4: # type: ignore[attr-defined] + self.save(module_name) # type: ignore[attr-defined] + self.save(name) # type: ignore[attr-defined] + write(STACK_GLOBAL) + elif parent is not module: + self.save_reduce(getattr, (parent, lastname)) # type: ignore[attr-defined] + elif self.proto >= 3: # type: ignore[attr-defined] + write( + GLOBAL + + bytes(module_name, "utf-8") + + b"\n" + + bytes(name, "utf-8") + + b"\n" + ) + else: + if self.fix_imports: # type: ignore[attr-defined] + r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING + r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING + if (module_name, name) in r_name_mapping: + module_name, name = r_name_mapping[(module_name, name)] + elif module_name in r_import_mapping: + module_name = r_import_mapping[module_name] + try: + write( + GLOBAL + + bytes(module_name, "ascii") + + b"\n" + + bytes(name, "ascii") + + b"\n" + ) + except UnicodeEncodeError: + raise PicklingError( + "can't pickle global identifier '%s.%s' using " + "pickle protocol %i" % (module, name, self.proto) # type: ignore[attr-defined] + ) from None + + self.memoize(obj) # type: ignore[attr-defined] + + +def create_pickler(data_buf, importer, protocol=4): + if importer is sys_importer: + # if we are using the normal import library system, then + # we can use the C implementation of pickle which is faster + return Pickler(data_buf, protocol=protocol) + else: + return PackagePickler(importer, data_buf, protocol=protocol) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/_package_unpickler.py b/env-llmeval/lib/python3.10/site-packages/torch/package/_package_unpickler.py new file mode 100644 index 0000000000000000000000000000000000000000..b00210e3c191e5dfaf94170e083b56ebc44d5bf2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/_package_unpickler.py @@ -0,0 +1,26 @@ +import _compat_pickle +import pickle + +from .importer import Importer + + +class PackageUnpickler(pickle._Unpickler): # type: ignore[name-defined] + """Package-aware unpickler. + + This behaves the same as a normal unpickler, except it uses `importer` to + find any global names that it encounters while unpickling. + """ + + def __init__(self, importer: Importer, *args, **kwargs): + super().__init__(*args, **kwargs) + self._importer = importer + + def find_class(self, module, name): + # Subclasses may override this. + if self.proto < 3 and self.fix_imports: # type: ignore[attr-defined] + if (module, name) in _compat_pickle.NAME_MAPPING: + module, name = _compat_pickle.NAME_MAPPING[(module, name)] + elif module in _compat_pickle.IMPORT_MAPPING: + module = _compat_pickle.IMPORT_MAPPING[module] + mod = self._importer.import_module(module) + return getattr(mod, name) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/_stdlib.py b/env-llmeval/lib/python3.10/site-packages/torch/package/_stdlib.py new file mode 100644 index 0000000000000000000000000000000000000000..a810d50661cb3ded86ee42ce623ec660276a754a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/_stdlib.py @@ -0,0 +1,464 @@ +"""List of Python standard library modules. + +Sadly, there is no reliable way to tell whether a module is part of the +standard library except by comparing to a canonical list. + +This is taken from https://github.com/PyCQA/isort/tree/develop/isort/stdlibs, +which itself is sourced from the Python documentation. +""" + +import sys + + +def is_stdlib_module(module: str) -> bool: + base_module = module.partition(".")[0] + return base_module in _get_stdlib_modules() + + +def _get_stdlib_modules(): + if sys.version_info.major == 3: + if sys.version_info.minor == 8: + return stdlib3_8 + if sys.version_info.minor == 9: + return stdlib3_9 + if sys.version_info.minor >= 10: + return sys.stdlib_module_names # type: ignore[attr-defined] + elif sys.version_info.major > 3: + return sys.stdlib_module_names # type: ignore[attr-defined] + + raise RuntimeError(f"Unsupported Python version: {sys.version_info}") + + +stdlib3_8 = { + "_dummy_thread", + "_thread", + "abc", + "aifc", + "argparse", + "array", + "ast", + "asynchat", + "asyncio", + "asyncore", + "atexit", + "audioop", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "builtins", + "bz2", + "cProfile", + "calendar", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "compileall", + "concurrent", + "configparser", + "contextlib", + "contextvars", + "copy", + "copyreg", + "crypt", + "csv", + "ctypes", + "curses", + "dataclasses", + "datetime", + "dbm", + "decimal", + "difflib", + "dis", + "distutils", + "doctest", + "dummy_threading", + "email", + "encodings", + "ensurepip", + "enum", + "errno", + "faulthandler", + "fcntl", + "filecmp", + "fileinput", + "fnmatch", + "formatter", + "fractions", + "ftplib", + "functools", + "gc", + "getopt", + "getpass", + "gettext", + "glob", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "html", + "http", + "imaplib", + "imghdr", + "imp", + "importlib", + "inspect", + "io", + "ipaddress", + "itertools", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "lzma", + "mailbox", + "mailcap", + "marshal", + "math", + "mimetypes", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multiprocessing", + "netrc", + "nis", + "nntplib", + "ntpath", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "parser", + "pathlib", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "poplib", + "posix", + "posixpath", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "queue", + "quopri", + "random", + "re", + "readline", + "reprlib", + "resource", + "rlcompleter", + "runpy", + "sched", + "secrets", + "select", + "selectors", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "socketserver", + "spwd", + "sqlite3", + "sre", + "sre_compile", + "sre_constants", + "sre_parse", + "ssl", + "stat", + "statistics", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "symbol", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "threading", + "time", + "timeit", + "tkinter", + "token", + "tokenize", + "trace", + "traceback", + "tracemalloc", + "tty", + "turtle", + "turtledemo", + "types", + "typing", + "unicodedata", + "unittest", + "urllib", + "uu", + "uuid", + "venv", + "warnings", + "wave", + "weakref", + "webbrowser", + "winreg", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpc", + "zipapp", + "zipfile", + "zipimport", + "zlib", +} + +stdlib3_9 = { + "_thread", + "abc", + "aifc", + "argparse", + "array", + "ast", + "asynchat", + "asyncio", + "asyncore", + "atexit", + "audioop", + "base64", + "bdb", + "binascii", + "binhex", + "bisect", + "builtins", + "bz2", + "cProfile", + "calendar", + "cgi", + "cgitb", + "chunk", + "cmath", + "cmd", + "code", + "codecs", + "codeop", + "collections", + "colorsys", + "compileall", + "concurrent", + "configparser", + "contextlib", + "contextvars", + "copy", + "copyreg", + "crypt", + "csv", + "ctypes", + "curses", + "dataclasses", + "datetime", + "dbm", + "decimal", + "difflib", + "dis", + "distutils", + "doctest", + "email", + "encodings", + "ensurepip", + "enum", + "errno", + "faulthandler", + "fcntl", + "filecmp", + "fileinput", + "fnmatch", + "formatter", + "fractions", + "ftplib", + "functools", + "gc", + "getopt", + "getpass", + "gettext", + "glob", + "graphlib", + "grp", + "gzip", + "hashlib", + "heapq", + "hmac", + "html", + "http", + "imaplib", + "imghdr", + "imp", + "importlib", + "inspect", + "io", + "ipaddress", + "itertools", + "json", + "keyword", + "lib2to3", + "linecache", + "locale", + "logging", + "lzma", + "mailbox", + "mailcap", + "marshal", + "math", + "mimetypes", + "mmap", + "modulefinder", + "msilib", + "msvcrt", + "multiprocessing", + "netrc", + "nis", + "nntplib", + "ntpath", + "numbers", + "operator", + "optparse", + "os", + "ossaudiodev", + "parser", + "pathlib", + "pdb", + "pickle", + "pickletools", + "pipes", + "pkgutil", + "platform", + "plistlib", + "poplib", + "posix", + "posixpath", + "pprint", + "profile", + "pstats", + "pty", + "pwd", + "py_compile", + "pyclbr", + "pydoc", + "queue", + "quopri", + "random", + "re", + "readline", + "reprlib", + "resource", + "rlcompleter", + "runpy", + "sched", + "secrets", + "select", + "selectors", + "shelve", + "shlex", + "shutil", + "signal", + "site", + "smtpd", + "smtplib", + "sndhdr", + "socket", + "socketserver", + "spwd", + "sqlite3", + "sre", + "sre_compile", + "sre_constants", + "sre_parse", + "ssl", + "stat", + "statistics", + "string", + "stringprep", + "struct", + "subprocess", + "sunau", + "symbol", + "symtable", + "sys", + "sysconfig", + "syslog", + "tabnanny", + "tarfile", + "telnetlib", + "tempfile", + "termios", + "test", + "textwrap", + "threading", + "time", + "timeit", + "tkinter", + "token", + "tokenize", + "trace", + "traceback", + "tracemalloc", + "tty", + "turtle", + "turtledemo", + "types", + "typing", + "unicodedata", + "unittest", + "urllib", + "uu", + "uuid", + "venv", + "warnings", + "wave", + "weakref", + "webbrowser", + "winreg", + "winsound", + "wsgiref", + "xdrlib", + "xml", + "xmlrpc", + "zipapp", + "zipfile", + "zipimport", + "zlib", + "zoneinfo", +} diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/file_structure_representation.py b/env-llmeval/lib/python3.10/site-packages/torch/package/file_structure_representation.py new file mode 100644 index 0000000000000000000000000000000000000000..cc5f055c1a20ef14a47e6a91127931b2a6ccebfe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/file_structure_representation.py @@ -0,0 +1,133 @@ +from typing import Dict, List + +from .glob_group import GlobGroup, GlobPattern + +__all__ = ["Directory"] + + +class Directory: + """A file structure representation. Organized as Directory nodes that have lists of + their Directory children. Directories for a package are created by calling + :meth:`PackageImporter.file_structure`.""" + + def __init__(self, name: str, is_dir: bool): + self.name = name + self.is_dir = is_dir + self.children: Dict[str, Directory] = {} + + def _get_dir(self, dirs: List[str]) -> "Directory": + """Builds path of Directories if not yet built and returns last directory + in list. + + Args: + dirs (List[str]): List of directory names that are treated like a path. + + Returns: + :class:`Directory`: The last Directory specified in the dirs list. + """ + if len(dirs) == 0: + return self + dir_name = dirs[0] + if dir_name not in self.children: + self.children[dir_name] = Directory(dir_name, True) + return self.children[dir_name]._get_dir(dirs[1:]) + + def _add_file(self, file_path: str): + """Adds a file to a Directory. + + Args: + file_path (str): Path of file to add. Last element is added as a file while + other paths items are added as directories. + """ + *dirs, file = file_path.split("/") + dir = self._get_dir(dirs) + dir.children[file] = Directory(file, False) + + def has_file(self, filename: str) -> bool: + """Checks if a file is present in a :class:`Directory`. + + Args: + filename (str): Path of file to search for. + Returns: + bool: If a :class:`Directory` contains the specified file. + """ + lineage = filename.split("/", maxsplit=1) + child = lineage[0] + grandchildren = lineage[1] if len(lineage) > 1 else None + if child in self.children.keys(): + if grandchildren is None: + return True + else: + return self.children[child].has_file(grandchildren) + return False + + def __str__(self): + str_list: List[str] = [] + self._stringify_tree(str_list) + return "".join(str_list) + + def _stringify_tree( + self, str_list: List[str], preamble: str = "", dir_ptr: str = "─── " + ): + """Recursive method to generate print-friendly version of a Directory.""" + space = " " + branch = "│ " + tee = "├── " + last = "└── " + + # add this directory's representation + str_list.append(f"{preamble}{dir_ptr}{self.name}\n") + + # add directory's children representations + if dir_ptr == tee: + preamble = preamble + branch + else: + preamble = preamble + space + + file_keys: List[str] = [] + dir_keys: List[str] = [] + for key, val in self.children.items(): + if val.is_dir: + dir_keys.append(key) + else: + file_keys.append(key) + + for index, key in enumerate(sorted(dir_keys)): + if (index == len(dir_keys) - 1) and len(file_keys) == 0: + self.children[key]._stringify_tree(str_list, preamble, last) + else: + self.children[key]._stringify_tree(str_list, preamble, tee) + for index, file in enumerate(sorted(file_keys)): + pointer = last if (index == len(file_keys) - 1) else tee + str_list.append(f"{preamble}{pointer}{file}\n") + + +def _create_directory_from_file_list( + filename: str, + file_list: List[str], + include: "GlobPattern" = "**", + exclude: "GlobPattern" = (), +) -> Directory: + """Return a :class:`Directory` file structure representation created from a list of files. + + Args: + filename (str): The name given to the top-level directory that will be the + relative root for all file paths found in the file_list. + + file_list (List[str]): List of files to add to the top-level directory. + + include (Union[List[str], str]): An optional pattern that limits what is included from the file_list to + files whose name matches the pattern. + + exclude (Union[List[str], str]): An optional pattern that excludes files whose name match the pattern. + + Returns: + :class:`Directory`: a :class:`Directory` file structure representation created from a list of files. + """ + glob_pattern = GlobGroup(include, exclude=exclude, separator="/") + + top_dir = Directory(filename, True) + for file in file_list: + if glob_pattern.matches(file): + top_dir._add_file(file) + return top_dir diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/find_file_dependencies.py b/env-llmeval/lib/python3.10/site-packages/torch/package/find_file_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..af8cd9fec84deb31b853045ecd077d58d45384fb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/find_file_dependencies.py @@ -0,0 +1,95 @@ +import ast +from typing import List, Optional, Tuple + +from ._importlib import _resolve_name + + +class _ExtractModuleReferences(ast.NodeVisitor): + """ + Extract the list of global variables a block of code will read and write + """ + + @classmethod + def run(cls, src: str, package: str) -> List[Tuple[str, Optional[str]]]: + visitor = cls(package) + tree = ast.parse(src) + visitor.visit(tree) + return list(visitor.references.keys()) + + def __init__(self, package): + super().__init__() + self.package = package + self.references = {} + + def _absmodule(self, module_name: str, level: int) -> str: + if level > 0: + return _resolve_name(module_name, self.package, level) + return module_name + + def visit_Import(self, node): + for alias in node.names: + self.references[(alias.name, None)] = True + + def visit_ImportFrom(self, node): + name = self._absmodule(node.module, 0 if node.level is None else node.level) + for alias in node.names: + # from my_package import foo + # foo may be a module, so we have to add it to the list of + # potential references, if import of it fails, we will ignore it + if alias.name != "*": + self.references[(name, alias.name)] = True + else: + self.references[(name, None)] = True + + def _grab_node_int(self, node): + return node.value + + def _grab_node_str(self, node): + return node.value + + def visit_Call(self, node): + # __import__ calls aren't routed to the visit_Import/From nodes + if hasattr(node.func, "id") and node.func.id == "__import__": + try: + name = self._grab_node_str(node.args[0]) + fromlist = [] + level = 0 + if len(node.args) > 3: + for v in node.args[3].elts: + fromlist.append(self._grab_node_str(v)) + elif hasattr(node, "keywords"): + for keyword in node.keywords: + if keyword.arg == "fromlist": + for v in keyword.value.elts: + fromlist.append(self._grab_node_str(v)) + if len(node.args) > 4: + level = self._grab_node_int(node.args[4]) + elif hasattr(node, "keywords"): + for keyword in node.keywords: + if keyword.arg == "level": + level = self._grab_node_int(keyword.value) + if fromlist == []: + # the top-level package (the name up till the first dot) is returned + # when the fromlist argument is empty in normal import system, + # we need to include top level package to match this behavior and last + # level package to capture the intended dependency of user + self.references[(name, None)] = True + top_name = name.rsplit(".", maxsplit=1)[0] + if top_name != name: + top_name = self._absmodule(top_name, level) + self.references[(top_name, None)] = True + else: + name = self._absmodule(name, level) + for alias in fromlist: + # fromlist args may be submodules, so we have to add the fromlist args + # to the list of potential references. If import of an arg fails we + # will ignore it, similar to visit_ImportFrom + if alias != "*": + self.references[(name, alias)] = True + else: + self.references[(name, None)] = True + except Exception as e: + return + + +find_files_source_depends_on = _ExtractModuleReferences.run diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/glob_group.py b/env-llmeval/lib/python3.10/site-packages/torch/package/glob_group.py new file mode 100644 index 0000000000000000000000000000000000000000..a8434788d016fd64cb03e1cf5cdebea5d65d6a59 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/glob_group.py @@ -0,0 +1,82 @@ +import re +from typing import Iterable, Union + +GlobPattern = Union[str, Iterable[str]] + + +class GlobGroup: + """A set of patterns that candidate strings will be matched against. + + A candidate is composed of a list of segments separated by ``separator``, e.g. "foo.bar.baz". + + A pattern contains one or more segments. Segments can be: + - A literal string (e.g. "foo"), which matches exactly. + - A string containing a wildcard (e.g. "torch*", or "foo*baz*"). The wildcard matches + any string, including the empty string. + - A double wildcard ("**"). This matches against zero or more complete segments. + + Examples: + ``torch.**``: matches ``torch`` and all its submodules, e.g. ``torch.nn`` and ``torch.nn.functional``. + ``torch.*``: matches ``torch.nn`` or ``torch.functional``, but not ``torch.nn.functional``. + ``torch*.**``: matches ``torch``, ``torchvision``, and all their submodules. + + A candidates will match the ``GlobGroup`` if it matches any of the ``include`` patterns and + none of the ``exclude`` patterns. + + Args: + include (Union[str, Iterable[str]]): A string or list of strings, + each representing a pattern to be matched against. A candidate + will match if it matches *any* include pattern + exclude (Union[str, Iterable[str]]): A string or list of strings, + each representing a pattern to be matched against. A candidate + will be excluded from matching if it matches *any* exclude pattern. + separator (str): A string that delimits segments in candidates and + patterns. By default this is "." which corresponds to how modules are + named in Python. Another common value for this is "/", which is + the Unix path separator. + """ + + def __init__( + self, include: GlobPattern, *, exclude: GlobPattern = (), separator: str = "." + ): + self._dbg = f"GlobGroup(include={include}, exclude={exclude})" + self.include = GlobGroup._glob_list(include, separator) + self.exclude = GlobGroup._glob_list(exclude, separator) + self.separator = separator + + def __str__(self): + return self._dbg + + def __repr__(self): + return self._dbg + + def matches(self, candidate: str) -> bool: + candidate = self.separator + candidate + return any(p.fullmatch(candidate) for p in self.include) and all( + not p.fullmatch(candidate) for p in self.exclude + ) + + @staticmethod + def _glob_list(elems: GlobPattern, separator: str = "."): + if isinstance(elems, str): + return [GlobGroup._glob_to_re(elems, separator)] + else: + return [GlobGroup._glob_to_re(e, separator) for e in elems] + + @staticmethod + def _glob_to_re(pattern: str, separator: str = "."): + # to avoid corner cases for the first component, we prefix the candidate string + # with '.' so `import torch` will regex against `.torch`, assuming '.' is the separator + def component_to_re(component): + if "**" in component: + if component == "**": + return "(" + re.escape(separator) + "[^" + separator + "]+)*" + else: + raise ValueError("** can only appear as an entire path segment") + else: + return re.escape(separator) + ("[^" + separator + "]*").join( + re.escape(x) for x in component.split("*") + ) + + result = "".join(component_to_re(c) for c in pattern.split(separator)) + return re.compile(result) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/importer.py b/env-llmeval/lib/python3.10/site-packages/torch/package/importer.py new file mode 100644 index 0000000000000000000000000000000000000000..dd01d09209a8c69e8c7c03a724397c358a1c96ce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/importer.py @@ -0,0 +1,237 @@ +import importlib +from abc import ABC, abstractmethod +from pickle import ( # type: ignore[attr-defined] # type: ignore[attr-defined] + _getattribute, + _Pickler, + whichmodule as _pickle_whichmodule, +) +from types import ModuleType +from typing import Any, Dict, List, Optional, Tuple + +from ._mangling import demangle, get_mangle_prefix, is_mangled + +__all__ = ["ObjNotFoundError", "ObjMismatchError", "Importer", "OrderedImporter"] + + +class ObjNotFoundError(Exception): + """Raised when an importer cannot find an object by searching for its name.""" + + pass + + +class ObjMismatchError(Exception): + """Raised when an importer found a different object with the same name as the user-provided one.""" + + pass + + +class Importer(ABC): + """Represents an environment to import modules from. + + By default, you can figure out what module an object belongs by checking + __module__ and importing the result using __import__ or importlib.import_module. + + torch.package introduces module importers other than the default one. + Each PackageImporter introduces a new namespace. Potentially a single + name (e.g. 'foo.bar') is present in multiple namespaces. + + It supports two main operations: + import_module: module_name -> module object + get_name: object -> (parent module name, name of obj within module) + + The guarantee is that following round-trip will succeed or throw an ObjNotFoundError/ObjMisMatchError. + module_name, obj_name = env.get_name(obj) + module = env.import_module(module_name) + obj2 = getattr(module, obj_name) + assert obj1 is obj2 + """ + + modules: Dict[str, ModuleType] + + @abstractmethod + def import_module(self, module_name: str) -> ModuleType: + """Import `module_name` from this environment. + + The contract is the same as for importlib.import_module. + """ + pass + + def get_name(self, obj: Any, name: Optional[str] = None) -> Tuple[str, str]: + """Given an object, return a name that can be used to retrieve the + object from this environment. + + Args: + obj: An object to get the module-environment-relative name for. + name: If set, use this name instead of looking up __name__ or __qualname__ on `obj`. + This is only here to match how Pickler handles __reduce__ functions that return a string, + don't use otherwise. + Returns: + A tuple (parent_module_name, attr_name) that can be used to retrieve `obj` from this environment. + Use it like: + mod = importer.import_module(parent_module_name) + obj = getattr(mod, attr_name) + + Raises: + ObjNotFoundError: we couldn't retrieve `obj by name. + ObjMisMatchError: we found a different object with the same name as `obj`. + """ + if name is None and obj and _Pickler.dispatch.get(type(obj)) is None: + # Honor the string return variant of __reduce__, which will give us + # a global name to search for in this environment. + # TODO: I guess we should do copyreg too? + reduce = getattr(obj, "__reduce__", None) + if reduce is not None: + try: + rv = reduce() + if isinstance(rv, str): + name = rv + except Exception: + pass + if name is None: + name = getattr(obj, "__qualname__", None) + if name is None: + name = obj.__name__ + + orig_module_name = self.whichmodule(obj, name) + # Demangle the module name before importing. If this obj came out of a + # PackageImporter, `__module__` will be mangled. See mangling.md for + # details. + module_name = demangle(orig_module_name) + + # Check that this name will indeed return the correct object + try: + module = self.import_module(module_name) + obj2, _ = _getattribute(module, name) + except (ImportError, KeyError, AttributeError): + raise ObjNotFoundError( + f"{obj} was not found as {module_name}.{name}" + ) from None + + if obj is obj2: + return module_name, name + + def get_obj_info(obj): + assert name is not None + module_name = self.whichmodule(obj, name) + is_mangled_ = is_mangled(module_name) + location = ( + get_mangle_prefix(module_name) + if is_mangled_ + else "the current Python environment" + ) + importer_name = ( + f"the importer for {get_mangle_prefix(module_name)}" + if is_mangled_ + else "'sys_importer'" + ) + return module_name, location, importer_name + + obj_module_name, obj_location, obj_importer_name = get_obj_info(obj) + obj2_module_name, obj2_location, obj2_importer_name = get_obj_info(obj2) + msg = ( + f"\n\nThe object provided is from '{obj_module_name}', " + f"which is coming from {obj_location}." + f"\nHowever, when we import '{obj2_module_name}', it's coming from {obj2_location}." + "\nTo fix this, make sure this 'PackageExporter's importer lists " + f"{obj_importer_name} before {obj2_importer_name}." + ) + raise ObjMismatchError(msg) + + def whichmodule(self, obj: Any, name: str) -> str: + """Find the module name an object belongs to. + + This should be considered internal for end-users, but developers of + an importer can override it to customize the behavior. + + Taken from pickle.py, but modified to exclude the search into sys.modules + """ + module_name = getattr(obj, "__module__", None) + if module_name is not None: + return module_name + + # Protect the iteration by using a list copy of self.modules against dynamic + # modules that trigger imports of other modules upon calls to getattr. + for module_name, module in self.modules.copy().items(): + if ( + module_name == "__main__" + or module_name == "__mp_main__" # bpo-42406 + or module is None + ): + continue + try: + if _getattribute(module, name)[0] is obj: + return module_name + except AttributeError: + pass + + return "__main__" + + +class _SysImporter(Importer): + """An importer that implements the default behavior of Python.""" + + def import_module(self, module_name: str): + return importlib.import_module(module_name) + + def whichmodule(self, obj: Any, name: str) -> str: + return _pickle_whichmodule(obj, name) + + +sys_importer = _SysImporter() + + +class OrderedImporter(Importer): + """A compound importer that takes a list of importers and tries them one at a time. + + The first importer in the list that returns a result "wins". + """ + + def __init__(self, *args): + self._importers: List[Importer] = list(args) + + def _is_torchpackage_dummy(self, module): + """Returns true iff this module is an empty PackageNode in a torch.package. + + If you intern `a.b` but never use `a` in your code, then `a` will be an + empty module with no source. This can break cases where we are trying to + re-package an object after adding a real dependency on `a`, since + OrderedImportere will resolve `a` to the dummy package and stop there. + + See: https://github.com/pytorch/pytorch/pull/71520#issuecomment-1029603769 + """ + if not getattr(module, "__torch_package__", False): + return False + if not hasattr(module, "__path__"): + return False + if not hasattr(module, "__file__"): + return True + return module.__file__ is None + + def import_module(self, module_name: str) -> ModuleType: + last_err = None + for importer in self._importers: + if not isinstance(importer, Importer): + raise TypeError( + f"{importer} is not a Importer. " + "All importers in OrderedImporter must inherit from Importer." + ) + try: + module = importer.import_module(module_name) + if self._is_torchpackage_dummy(module): + continue + return module + except ModuleNotFoundError as err: + last_err = err + + if last_err is not None: + raise last_err + else: + raise ModuleNotFoundError(module_name) + + def whichmodule(self, obj: Any, name: str) -> str: + for importer in self._importers: + module_name = importer.whichmodule(obj, name) + if module_name != "__main__": + return module_name + + return "__main__" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/package_exporter.py b/env-llmeval/lib/python3.10/site-packages/torch/package/package_exporter.py new file mode 100644 index 0000000000000000000000000000000000000000..684b9f4fcbfa5d276f92956731d5b4b160b3e355 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/package_exporter.py @@ -0,0 +1,1199 @@ +import collections +import importlib.machinery +import io +import linecache +import pickletools +import platform +import types +from collections import defaultdict, OrderedDict +from dataclasses import dataclass +from enum import Enum +from importlib.machinery import SourceFileLoader +from pathlib import Path +from typing import ( + Any, + BinaryIO, + Callable, + cast, + DefaultDict, + Dict, + List, + Optional, + Sequence, + Set, + Union, +) + +import torch +from torch.serialization import location_tag, normalize_storage_type +from torch.types import Storage +from torch.utils.hooks import RemovableHandle + +from ._digraph import DiGraph +from ._importlib import _normalize_path +from ._mangling import demangle, is_mangled +from ._package_pickler import create_pickler +from ._stdlib import is_stdlib_module +from .find_file_dependencies import find_files_source_depends_on +from .glob_group import GlobGroup, GlobPattern +from .importer import Importer, OrderedImporter, sys_importer + +__all__ = [ + "PackagingErrorReason", + "EmptyMatchError", + "PackagingError", + "PackageExporter", +] + +_gate_torchscript_serialization = True + +ActionHook = Callable[["PackageExporter", str], None] + + +class _ModuleProviderAction(Enum): + """Represents one of the actions that :class:`PackageExporter` can take on a module. + + See :meth:`PackageExporter.extern` and friends for a description of what the actions do. + """ + + INTERN = 1 + EXTERN = 2 + MOCK = 3 + DENY = 4 + # Special case: when a module is mocked, PackageExporter writes out a + # `_mock` module that implements our mocking stubs. If we re-package code, + # we may encounter a `_mock` module from the original package. If we do, + # just ignore it and write a `_mock` module once. + REPACKAGED_MOCK_MODULE = 5 + # Special case: PackageImporter adds a fake module + # (`torch_package_importer`) that allows packaged code to access it. Don't + # re-export this. + SKIP = 6 + + +class PackagingErrorReason(Enum): + """Listing of different reasons a dependency may fail to package. + + This enum is used to provide good error messages when + :class:`PackagingError` is raised. + """ + + def __repr__(self): + return f"<{self.__class__.__name__}.{self.name}>" + + IS_EXTENSION_MODULE = ( + "Module is a C extension module. torch.package supports Python modules only." + ) + NO_DUNDER_FILE = "Module had no __file__ defined." + SOURCE_FILE_NOT_FOUND = ( + "Module had a __file__, but we could not find it in your filesystem." + ) + DEPENDENCY_RESOLUTION_FAILED = "Dependency resolution failed." + NO_ACTION = ( + "Module did not match against any action pattern. Extern, mock, or intern it." + ) + DENIED = "Module was denied by a pattern." + MOCKED_BUT_STILL_USED = ( + "Module was mocked out, but is still being used in the package. " + "Please intern or extern the mocked modules if objects are supposed to be in " + "the package." + ) + + +@dataclass +class _PatternInfo: + """Holds :class:`PackageExporter`-specific info about how to execute matches against""" + + # What action to take on a module that matches this pattern. + action: _ModuleProviderAction + # The value of `allow_empty` the user gave when specifying the pattern. + allow_empty: bool + # Whether this pattern has been matched during packaging. + was_matched: bool + + def __init__(self, action, allow_empty): + self.action = action + self.allow_empty = allow_empty + self.was_matched = False + + +class EmptyMatchError(Exception): + """This is an exception that is thrown when a mock or extern is marked as + ``allow_empty=False``, and is not matched with any module during packaging. + """ + + pass + + +class PackagingError(Exception): + """This exception is raised when there is an issue with exporting a package. + ``PackageExporter`` will attempt to gather up all the errors and present + them to you at once. + """ + + def __init__(self, dependency_graph: DiGraph, debug=False): + # Group errors by reason. + broken: Dict[PackagingErrorReason, List[str]] = defaultdict(list) + for module_name, attrs in dependency_graph.nodes.items(): + error = attrs.get("error") + if error is None: + continue + if error == PackagingErrorReason.NO_ACTION: + assert "action" not in attrs + broken[error].append(module_name) + + message = io.StringIO() + message.write("\n") + + for reason, module_names in broken.items(): + message.write(f"* {reason.value}\n") + for module_name in module_names: + message.write(f" {module_name}\n") + + # Print additional context if it's provided. + error_context = dependency_graph.nodes[module_name].get("error_context") + if error_context is not None: + message.write(f" Context: {error_context}\n") + if module_name in _DISALLOWED_MODULES: + message.write( + " Note: While we usually use modules in the python standard library " + f"from the local environment, `{module_name}` has a lot of system " + "level access and therefore can pose a security risk. We heavily " + f"recommend removing `{module_name}` from your packaged code. However, if that " + "is not possible, add it to the extern list by calling " + f'PackageExporter.extern("`{module_name}`")\n' + ) + if debug: + module_path = dependency_graph.first_path(module_name) + message.write( + f" A path to {module_name}: {' -> '.join(module_path)}" + ) + if not debug: + message.write("\n") + message.write( + "Set debug=True when invoking PackageExporter for a visualization of where " + "broken modules are coming from!\n" + ) + # Save the dependency graph so that tooling can get at it. + self.dependency_graph = dependency_graph + super().__init__(message.getvalue()) + + +class PackageExporter: + """Exporters allow you to write packages of code, pickled Python data, and + arbitrary binary and text resources into a self-contained package. + + Imports can load this code in a hermetic way, such that code is loaded + from the package rather than the normal Python import system. This allows + for the packaging of PyTorch model code and data so that it can be run + on a server or used in the future for transfer learning. + + The code contained in packages is copied file-by-file from the original + source when it is created, and the file format is a specially organized + zip file. Future users of the package can unzip the package, and edit the code + in order to perform custom modifications to it. + + The importer for packages ensures that code in the module can only be loaded from + within the package, except for modules explicitly listed as external using :meth:`extern`. + The file ``extern_modules`` in the zip archive lists all the modules that a package externally depends on. + This prevents "implicit" dependencies where the package runs locally because it is importing + a locally-installed package, but then fails when the package is copied to another machine. + + When source code is added to the package, the exporter can optionally scan it + for further code dependencies (``dependencies=True``). It looks for import statements, + resolves relative references to qualified module names, and performs an action specified by the user + (See: :meth:`extern`, :meth:`mock`, and :meth:`intern`). + """ + + """A importer that will be searched in order to find the modules referenced by other modules or by + pickled objects. The default module environment just uses sys_importer, which searches the Python environment. + """ + importer: Importer + + def __init__( + self, + f: Union[str, Path, BinaryIO], + importer: Union[Importer, Sequence[Importer]] = sys_importer, + debug: bool = False, + ): + """ + Create an exporter. + + Args: + f: The location to export to. Can be a ``string``/``Path`` object containing a filename + or a binary I/O object. + importer: If a single Importer is passed, use that to search for modules. + If a sequence of importers are passed, an ``OrderedImporter`` will be constructed out of them. + debug: If set to True, add path of broken modules to PackagingErrors. + """ + torch._C._log_api_usage_once("torch.package.PackageExporter") + self.debug = debug + if isinstance(f, (Path, str)): + f = str(f) + self.buffer: Optional[BinaryIO] = None + else: # is a byte buffer + self.buffer = f + + self.zip_file = torch._C.PyTorchFileWriter(f) + self.zip_file.set_min_version(6) + self._written_files: Set[str] = set() + + self.serialized_reduces: Dict[int, Any] = {} + + # A graph tracking all the modules and pickle objects added to this + # package and the dependencies between them. + # - Each node is a module name (or a pickle name that looks like '') + # - Each directed edge (u, v) means u depends on v. + # - Nodes may contain metadata that describe how to write the thing to the zipfile. + self.dependency_graph = DiGraph() + self.script_module_serializer = torch._C.ScriptModuleSerializer(self.zip_file) + self.storage_context = self.script_module_serializer.storage_context() + + # These are OrderedDicts for compatibility with RemovableHandle. + # Generic OrderedDict type annotations are not present until 3.7. + # The real type signature is OrderedDict[int, Callable[[PackageExporter, str], None]] + self._extern_hooks: OrderedDict = OrderedDict() + self._mock_hooks: OrderedDict = OrderedDict() + self._intern_hooks: OrderedDict = OrderedDict() + + if isinstance(importer, Importer): + self.importer = importer + else: + if not isinstance(importer, collections.abc.Sequence): + raise TypeError( + "importer arg should be an Importer or a sequence of Importers, " + f"got {type(importer)} instead." + ) + self.importer = OrderedImporter(*importer) + + self.patterns: Dict[GlobGroup, _PatternInfo] = {} + self._unique_id = 0 + + def save_source_file( + self, module_name: str, file_or_directory: str, dependencies=True + ): + """Adds the local file system ``file_or_directory`` to the source package to provide the code + for ``module_name``. + + Args: + module_name (str): e.g. ``"my_package.my_subpackage"``, code will be saved to provide code for this package. + file_or_directory (str): the path to a file or directory of code. When a directory, all python files in the directory + are recursively copied using :meth:`save_source_file`. If a file is named ``"/__init__.py"`` the code is treated + as a package. + dependencies (bool, optional): If ``True``, we scan the source for dependencies. + """ + path = Path(file_or_directory) + if path.is_dir(): + to_save = [] # list of tuples with arguments to save_source_string + module_path = module_name.replace(".", "/") + for filename in path.glob("**/*.py"): + relative_path = filename.relative_to(path).as_posix() + archivename = module_path + "/" + relative_path + submodule_name = None + if filename.name == "__init__.py": + submodule_name = archivename[: -len("/__init__.py")].replace( + "/", "." + ) + is_package = True + else: + submodule_name = archivename[: -len(".py")].replace("/", ".") + is_package = False + + # we delay the call to save_source_string so that we record all the source files + # being provided by this directory structure _before_ attempting to resolve the dependencies + # on the source. This makes sure we don't try to copy over modules that will just get + # overwritten by this directory blob + to_save.append( + ( + submodule_name, + _read_file(str(filename)), + is_package, + dependencies, + ) + ) + + for item in to_save: + self.save_source_string(*item) + else: + is_package = path.name == "__init__.py" + self.save_source_string( + module_name, + _read_file(file_or_directory), + is_package, + dependencies, + ) + + def get_unique_id(self) -> str: + """Get an id. This id is guaranteed to only be handed out once for this package.""" + ret = str(self._unique_id) + self._unique_id += 1 + return ret + + def _get_dependencies( + self, src: str, module_name: str, is_package: bool + ) -> List[str]: + """Return all modules that this source code depends on. + + Dependencies are found by scanning the source code for import-like statements. + + Arguments: + src: The Python source code to analyze for dependencies. + module_name: The name of the module that ``src`` corresponds to. + is_package: Whether this module should be treated as a package. + See :py:meth:`save_source_string` for more info. + + Returns: + A list containing modules detected as direct dependencies in + ``src``. The items in the list are guaranteed to be unique. + """ + package_name = ( + module_name if is_package else module_name.rsplit(".", maxsplit=1)[0] + ) + try: + dep_pairs = find_files_source_depends_on(src, package_name) + except Exception as e: + self.dependency_graph.add_node( + module_name, + error=PackagingErrorReason.DEPENDENCY_RESOLUTION_FAILED, + error_context=str(e), + ) + return [] + + # Use a dict to get uniquing but also deterministic order + dependencies = {} + for dep_module_name, dep_module_obj in dep_pairs: + # handle the case where someone did something like `from pack import sub` + # where `sub` is a submodule. In this case we don't have to save pack, just sub. + # this ensures we don't pick up additional dependencies on pack. + # However, in the case where `sub` is not a submodule but an object, then we do have + # to save pack. + if dep_module_obj is not None: + possible_submodule = f"{dep_module_name}.{dep_module_obj}" + if self._module_exists(possible_submodule): + dependencies[possible_submodule] = True + # we don't need to save `pack` + continue + if self._module_exists(dep_module_name): + dependencies[dep_module_name] = True + + return list(dependencies.keys()) + + def save_source_string( + self, + module_name: str, + src: str, + is_package: bool = False, + dependencies: bool = True, + ): + """Adds ``src`` as the source code for ``module_name`` in the exported package. + + Args: + module_name (str): e.g. ``my_package.my_subpackage``, code will be saved to provide code for this package. + src (str): The Python source code to save for this package. + is_package (bool, optional): If ``True``, this module is treated as a package. Packages are allowed to have submodules + (e.g. ``my_package.my_subpackage.my_subsubpackage``), and resources can be saved inside them. Defaults to ``False``. + dependencies (bool, optional): If ``True``, we scan the source for dependencies. + """ + self.dependency_graph.add_node( + module_name, + source=src, + is_package=is_package, + provided=True, + action=_ModuleProviderAction.INTERN, + ) + + if dependencies: + deps = self._get_dependencies(src, module_name, is_package) + + for dep in deps: + self.dependency_graph.add_edge(module_name, dep) + self.add_dependency(dep) + + def _write_source_string( + self, + module_name: str, + src: str, + is_package: bool = False, + ): + """Write ``src`` as the source code for ``module_name`` in the zip archive. + + Arguments are otherwise the same as for :meth:`save_source_string`. + """ + extension = "/__init__.py" if is_package else ".py" + filename = module_name.replace(".", "/") + extension + + self._write(filename, src) + + def _import_module(self, module_name: str): + try: + return self.importer.import_module(module_name) + except ModuleNotFoundError as e: + if not is_mangled(module_name): + raise + msg = ( + f"Module not found: '{module_name}'. Make sure the PackageImporter that " + "created this module is present in `self.importer`" + ) + raise ModuleNotFoundError(msg) from None + + def _module_exists(self, module_name: str) -> bool: + try: + self._import_module(module_name) + return True + except Exception: + return False + + def _get_source_of_module(self, module: types.ModuleType) -> Optional[str]: + filename = None + spec = getattr(module, "__spec__", None) + if spec is not None: + loader = getattr(spec, "loader", None) + if loader is not None and isinstance(loader, SourceFileLoader): + try: + filename = loader.get_filename(module.__name__) + except ImportError: + pass + if filename is None: + filename = getattr(module, "__file__", None) + if isinstance(filename, str) and filename.endswith(".py"): + return "".join(linecache.getlines(filename, module.__dict__)) + return None + + def add_dependency(self, module_name: str, dependencies=True): + """Given a module, add it to the dependency graph according to patterns + specified by the user. + """ + if ( + module_name in self.dependency_graph + and self.dependency_graph.nodes[module_name].get("provided") is True + ): + return + + # Special case: PackageImporter provides a special module called + # `torch_package_importer` that allows packaged modules to reference + # their PackageImporter. We don't want to re-export this. + if module_name == "torch_package_importer": + self.dependency_graph.add_node( + module_name, + action=_ModuleProviderAction.SKIP, + provided=True, + ) + return + + if module_name == "_mock": + self.dependency_graph.add_node( + module_name, + action=_ModuleProviderAction.REPACKAGED_MOCK_MODULE, + provided=True, + ) + return + + if self._can_implicitly_extern(module_name): + self.dependency_graph.add_node( + module_name, action=_ModuleProviderAction.EXTERN, provided=True + ) + return + + for pattern, pattern_info in self.patterns.items(): + if pattern.matches(module_name): + pattern_info.was_matched = True + self.dependency_graph.add_node( + module_name, action=pattern_info.action, provided=True + ) + + if pattern_info.action == _ModuleProviderAction.DENY: + # Requiring a denied module just adds an error to the graph. + self.dependency_graph.add_node( + module_name, error=PackagingErrorReason.DENIED + ) + + # If we are interning this module, we need to retrieve its + # dependencies and package those as well. + if pattern_info.action == _ModuleProviderAction.INTERN: + self._intern_module(module_name, dependencies) + return + + # No patterns have matched. Explicitly add this as an error. + self.dependency_graph.add_node( + module_name, error=PackagingErrorReason.NO_ACTION + ) + + def save_module(self, module_name: str, dependencies=True): + """Save the code for ``module`` into the package. Code for the module is resolved using the ``importers`` path to find the + module object, and then using its ``__file__`` attribute to find the source code. + + Args: + module_name (str): e.g. ``my_package.my_subpackage``, code will be saved to provide code + for this package. + dependencies (bool, optional): If ``True``, we scan the source for dependencies. + """ + if not isinstance(module_name, str): + raise TypeError( + "save_module() expects a string input, did you perhaps mean to pass `__name__`?" + ) + + self._intern_module(module_name, dependencies) + + def _intern_module( + self, + module_name: str, + dependencies: bool, + ): + """Adds the module to the dependency graph as an interned module, + along with any metadata needed to write it out to the zipfile at serialization time. + """ + module_obj = self._import_module(module_name) + # Subtle: if the import above succeeded, either: + # 1. The module name is not mangled, and this was just a regular import, or + # 2. The module name is mangled, but one of the importers was able to + # recognize the mangling and import it. + # Either way, it is now safe to demangle this name so that we don't + # serialize the mangled version to the package. + module_name = demangle(module_name) + + # Find dependencies of this module and require them as well. + is_package = hasattr(module_obj, "__path__") + source = self._get_source_of_module(module_obj) + if source is None: + # Couldn't find a source! Add it to our dependency graph as broken + # and continue. + filename = getattr(module_obj, "__file__", None) + error_context = None + if filename is None: + packaging_error = PackagingErrorReason.NO_DUNDER_FILE + elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)): + packaging_error = PackagingErrorReason.IS_EXTENSION_MODULE + else: + packaging_error = PackagingErrorReason.SOURCE_FILE_NOT_FOUND + error_context = f"filename: {filename}" + self.dependency_graph.add_node( + module_name, + action=_ModuleProviderAction.INTERN, + is_package=is_package, + error=packaging_error, + error_context=error_context, + provided=True, + ) + return + + self.dependency_graph.add_node( + module_name, + action=_ModuleProviderAction.INTERN, + is_package=is_package, + source=source, + provided=True, + ) + + if dependencies: + deps = self._get_dependencies(source, module_name, is_package) + for dep in deps: + self.dependency_graph.add_edge(module_name, dep) + self.add_dependency(dep) + + def save_pickle( + self, + package: str, + resource: str, + obj: Any, + dependencies: bool = True, + pickle_protocol: int = 3, + ): + """Save a python object to the archive using pickle. Equivalent to :func:`torch.save` but saving into + the archive rather than a stand-alone file. Standard pickle does not save the code, only the objects. + If ``dependencies`` is true, this method will also scan the pickled objects for which modules are required + to reconstruct them and save the relevant code. + + To be able to save an object where ``type(obj).__name__`` is ``my_module.MyObject``, + ``my_module.MyObject`` must resolve to the class of the object according to the ``importer`` order. When saving objects that + have previously been packaged, the importer's ``import_module`` method will need to be present in the ``importer`` list + for this to work. + + Args: + package (str): The name of module package this resource should go in (e.g. ``"my_package.my_subpackage"``). + resource (str): A unique name for the resource, used to identify it to load. + obj (Any): The object to save, must be picklable. + dependencies (bool, optional): If ``True``, we scan the source for dependencies. + """ + + assert (pickle_protocol == 4) or ( + pickle_protocol == 3 + ), "torch.package only supports pickle protocols 3 and 4" + + filename = self._filename(package, resource) + # Write the pickle data for `obj` + data_buf = io.BytesIO() + pickler = create_pickler(data_buf, self.importer, protocol=pickle_protocol) + pickler.persistent_id = self._persistent_id + pickler.dump(obj) + data_value = data_buf.getvalue() + mocked_modules = defaultdict(list) + name_in_dependency_graph = f"<{package}.{resource}>" + self.dependency_graph.add_node( + name_in_dependency_graph, + action=_ModuleProviderAction.INTERN, + provided=True, + is_pickle=True, + ) + + def _check_mocked_error(module: Optional[str], field: Optional[str]): + """ + checks if an object (field) comes from a mocked module and then adds + the pair to mocked_modules which contains mocked modules paired with their + list of mocked objects present in the pickle. + + We also hold the invariant that the first user defined rule that applies + to the module is the one we use. + """ + + assert isinstance(module, str) + assert isinstance(field, str) + if self._can_implicitly_extern(module): + return + for pattern, pattern_info in self.patterns.items(): + if pattern.matches(module): + if pattern_info.action == _ModuleProviderAction.MOCK: + mocked_modules[module].append(field) + return + + if dependencies: + all_dependencies = [] + module = None + field = None + memo: DefaultDict[int, str] = defaultdict(None) + memo_count = 0 + # pickletools.dis(data_value) + for opcode, arg, pos in pickletools.genops(data_value): + if pickle_protocol == 4: + if ( + opcode.name == "SHORT_BINUNICODE" + or opcode.name == "BINUNICODE" + or opcode.name == "BINUNICODE8" + ): + assert isinstance(arg, str) + module = field + field = arg + memo[memo_count] = arg + elif ( + opcode.name == "LONG_BINGET" + or opcode.name == "BINGET" + or opcode.name == "GET" + ): + assert isinstance(arg, int) + module = field + field = memo.get(arg, None) + elif opcode.name == "MEMOIZE": + memo_count += 1 + elif opcode.name == "STACK_GLOBAL": + if module is None: + # If not module was passed on in the entries preceeding this one, continue. + continue + assert isinstance(module, str) + if module not in all_dependencies: + all_dependencies.append(module) + _check_mocked_error(module, field) + elif ( + pickle_protocol == 3 and opcode.name == "GLOBAL" + ): # a global reference + assert isinstance(arg, str) + module, field = arg.split(" ") + if module not in all_dependencies: + all_dependencies.append(module) + _check_mocked_error(module, field) + for module_name in all_dependencies: + self.dependency_graph.add_edge(name_in_dependency_graph, module_name) + + """ If an object happens to come from a mocked module, then we collect these errors and spit them + out with the other errors found by package exporter. + """ + if module in mocked_modules: + assert isinstance(module, str) + fields = mocked_modules[module] + self.dependency_graph.add_node( + module_name, + action=_ModuleProviderAction.MOCK, + error=PackagingErrorReason.MOCKED_BUT_STILL_USED, + error_context=f"Object(s) '{fields}' from module `{module_name}` was mocked out during packaging " + f"but is being used in resource - `{resource}` in package `{package}`. ", + provided=True, + ) + else: + self.add_dependency(module_name) + + self._write(filename, data_value) + + def save_text(self, package: str, resource: str, text: str): + """Save text data to the package. + + Args: + package (str): The name of module package this resource should go it (e.g. ``"my_package.my_subpackage"``). + resource (str): A unique name for the resource, used to identify it to load. + text (str): The contents to save. + """ + return self.save_binary(package, resource, text.encode("utf-8")) + + def save_binary(self, package, resource, binary: bytes): + """Save raw bytes to the package. + + Args: + package (str): The name of module package this resource should go it (e.g. ``"my_package.my_subpackage"``). + resource (str): A unique name for the resource, used to identify it to load. + binary (str): The data to save. + """ + filename = self._filename(package, resource) + self._write(filename, binary) + + def register_extern_hook(self, hook: ActionHook) -> RemovableHandle: + """Registers an extern hook on the exporter. + + The hook will be called each time a module matches against an :meth:`extern` pattern. + It should have the following signature:: + + hook(exporter: PackageExporter, module_name: str) -> None + + Hooks will be called in order of registration. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + A handle that can be used to remove the added hook by calling + ``handle.remove()``. + """ + handle = RemovableHandle(self._extern_hooks) + self._extern_hooks[handle.id] = hook + return handle + + def register_mock_hook(self, hook: ActionHook) -> RemovableHandle: + """Registers a mock hook on the exporter. + + The hook will be called each time a module matches against a :meth:`mock` pattern. + It should have the following signature:: + + hook(exporter: PackageExporter, module_name: str) -> None + + Hooks will be called in order of registration. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + A handle that can be used to remove the added hook by calling + ``handle.remove()``. + """ + handle = RemovableHandle(self._mock_hooks) + self._mock_hooks[handle.id] = hook + return handle + + def register_intern_hook(self, hook: ActionHook) -> RemovableHandle: + """Registers an intern hook on the exporter. + + The hook will be called each time a module matches against an :meth:`intern` pattern. + It should have the following signature:: + + hook(exporter: PackageExporter, module_name: str) -> None + + Hooks will be called in order of registration. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + A handle that can be used to remove the added hook by calling + ``handle.remove()``. + """ + handle = RemovableHandle(self._intern_hooks) + self._intern_hooks[handle.id] = hook + return handle + + def intern( + self, + include: "GlobPattern", + *, + exclude: "GlobPattern" = (), + allow_empty: bool = True, + ): + """Specify modules that should be packaged. A module must match some ``intern`` pattern in order to be + included in the package and have its dependencies processed recursively. + + Args: + include (Union[List[str], str]): A string e.g. "my_package.my_subpackage", or list of strings + for the names of the modules to be externed. This can also be a glob-style pattern, as described in :meth:`mock`. + + exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string. + + allow_empty (bool): An optional flag that specifies whether the intern modules specified by this call + to the ``intern`` method must be matched to some module during packaging. If an ``intern`` module glob + pattern is added with ``allow_empty=False``, and :meth:`close` is called (either explicitly or via ``__exit__``) + before any modules match that pattern, an exception is thrown. If ``allow_empty=True``, no such exception is thrown. + + """ + self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo( + _ModuleProviderAction.INTERN, allow_empty + ) + + def mock( + self, + include: "GlobPattern", + *, + exclude: "GlobPattern" = (), + allow_empty: bool = True, + ): + """Replace some required modules with a mock implementation. Mocked modules will return a fake + object for any attribute accessed from it. Because we copy file-by-file, the dependency resolution will sometimes + find files that are imported by model files but whose functionality is never used + (e.g. custom serialization code or training helpers). + Use this function to mock this functionality out without having to modify the original code. + + Args: + include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings + for the names of the modules to be mocked out. Strings can also be a glob-style pattern + string that may match multiple modules. Any required dependencies that match this pattern + string will be mocked out automatically. + + Examples : + ``'torch.**'`` -- matches ``torch`` and all submodules of torch, e.g. ``'torch.nn'`` + and ``'torch.nn.functional'`` + + ``'torch.*'`` -- matches ``'torch.nn'`` or ``'torch.functional'``, but not + ``'torch.nn.functional'`` + + exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string. + e.g. ``include='torch.**', exclude='torch.foo'`` will mock all torch packages except ``'torch.foo'``, + Default: is ``[]``. + + allow_empty (bool): An optional flag that specifies whether the mock implementation(s) specified by this call + to the :meth:`mock` method must be matched to some module during packaging. If a mock is added with + ``allow_empty=False``, and :meth:`close` is called (either explicitly or via ``__exit__``) and the mock has + not been matched to a module used by the package being exported, an exception is thrown. + If ``allow_empty=True``, no such exception is thrown. + + """ + self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo( + _ModuleProviderAction.MOCK, allow_empty + ) + + def extern( + self, + include: "GlobPattern", + *, + exclude: "GlobPattern" = (), + allow_empty: bool = True, + ): + """Include ``module`` in the list of external modules the package can import. + This will prevent dependency discovery from saving + it in the package. The importer will load an external module directly from the standard import system. + Code for extern modules must also exist in the process loading the package. + + Args: + include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings + for the names of the modules to be externed. This can also be a glob-style pattern, as + described in :meth:`mock`. + + exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the + include string. + + allow_empty (bool): An optional flag that specifies whether the extern modules specified by this call + to the ``extern`` method must be matched to some module during packaging. If an extern module glob + pattern is added with ``allow_empty=False``, and :meth:`close` is called (either explicitly or via + ``__exit__``) before any modules match that pattern, an exception is thrown. If ``allow_empty=True``, + no such exception is thrown. + + """ + self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo( + _ModuleProviderAction.EXTERN, allow_empty + ) + + def deny(self, include: "GlobPattern", *, exclude: "GlobPattern" = ()): + """Blocklist modules who names match the given glob patterns from the list of modules the package can import. + If a dependency on any matching packages is found, a :class:`PackagingError` is raised. + + Args: + include (Union[List[str], str]): A string e.g. ``"my_package.my_subpackage"``, or list of strings + for the names of the modules to be externed. This can also be a glob-style pattern, as described in :meth:`mock`. + + exclude (Union[List[str], str]): An optional pattern that excludes some patterns that match the include string. + """ + self.patterns[GlobGroup(include, exclude=exclude)] = _PatternInfo( + _ModuleProviderAction.DENY, allow_empty=True + ) + + def _persistent_id(self, obj): + if torch.is_storage(obj) or isinstance(obj, torch.storage.TypedStorage): + storage: Storage + if isinstance(obj, torch.storage.TypedStorage): + # TODO: Once we decide to break serialization FC, we can + # remove this case + untyped_storage = obj._untyped_storage + storage_type_str = obj.pickle_storage_type() + storage_type = getattr(torch, storage_type_str) + storage = cast(Storage, untyped_storage) + storage_numel = obj.size() + + elif isinstance(obj, torch.UntypedStorage): + untyped_storage = obj + storage = cast(Storage, untyped_storage) + storage_type = normalize_storage_type(type(storage)) + storage_numel = storage.nbytes() + else: + raise RuntimeError(f"storage type not recognized: {type(obj)}") + + location = location_tag(storage) + + # serialize storage if not already written + storage_present = self.storage_context.has_storage(storage) + storage_id = self.storage_context.get_or_add_storage(storage) + if not storage_present: + if storage.device.type != "cpu": + storage = storage.cpu() + num_bytes = storage.nbytes() + self.zip_file.write_record( + f".data/{storage_id}.storage", storage.data_ptr(), num_bytes + ) + return ("storage", storage_type, storage_id, location, storage_numel) + + if hasattr(obj, "__reduce_package__"): + if _gate_torchscript_serialization and isinstance( + obj, torch.jit.RecursiveScriptModule + ): + raise Exception( + "Serializing ScriptModules directly into a package is a beta feature. " + "To use, set global " + "`torch.package.package_exporter._gate_torchscript_serialization` to `False`." + ) + if self.serialized_reduces.get(id(obj)) is None: + self.serialized_reduces[id(obj)] = ( + "reduce_package", + id(obj), + *obj.__reduce_package__(self), + ) + + return self.serialized_reduces[id(obj)] + + return None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + # If __exit__ was called because an exception was raised, we do not + # attempt to finalize the package. Instead, control is returned to the + # caller to continue raising the exception. + if exc_type is not None: + # Do the bare minimum to leave the open buffer in a valid state. + self._finalize_zip() + return + + self.close() + + def _write(self, filename, str_or_bytes): + if filename in self._written_files: + raise AssertionError( + f"Tried to write file '{filename}', but it already exists in this archive. " + "Please file a bug." + ) + self._written_files.add(filename) + + if is_mangled(filename): + raise AssertionError( + f"Tried to save a torch.package'd module as '{filename}'. " + "Directly saving torch.package'd modules is not allowed." + ) + if isinstance(str_or_bytes, str): + str_or_bytes = str_or_bytes.encode("utf-8") + self.zip_file.write_record(filename, str_or_bytes, len(str_or_bytes)) + + def _validate_dependency_graph(self): + # 1. Check the graph for any errors inserted during dependency analysis. + for attrs in self.dependency_graph.nodes.values(): + if "error" in attrs: + raise PackagingError(self.dependency_graph, debug=self.debug) + + # 2. Check that all patterns for which allow_empty=False have been matched at least once. + for pattern, pattern_info in self.patterns.items(): + if not pattern_info.allow_empty and not pattern_info.was_matched: + raise EmptyMatchError( + f"Exporter did not match any modules to {pattern}, which was marked as allow_empty=False" + ) + + def _write_mock_file(self): + if "_mock.py" not in self._written_files: + mock_file = str(Path(__file__).parent / "_mock.py") + self._write_source_string("_mock", _read_file(mock_file), is_package=False) + + def _execute_dependency_graph(self): + """Takes a finalized dependency graph describing how to package all + modules and executes it, writing to the ZIP archive. + """ + self._validate_dependency_graph() + + extern_modules = [] + for module_name, attrs in self.dependency_graph.nodes.items(): + action = attrs["action"] + + if action == _ModuleProviderAction.EXTERN: + for hook in self._extern_hooks.values(): + hook(self, module_name) + + extern_modules.append(module_name) + + elif action == _ModuleProviderAction.MOCK: + for hook in self._mock_hooks.values(): + hook(self, module_name) + + self._write_mock_file() + + is_package = hasattr(self._import_module(module_name), "__path__") + self._write_source_string(module_name, _MOCK_IMPL, is_package) + + elif action == _ModuleProviderAction.INTERN: + for hook in self._intern_hooks.values(): + hook(self, module_name) + + # The node in the dependency graph contains metadata that tells us + # how to intern the module. + if "provided" not in attrs: + raise AssertionError( + f"Module was marked `intern` but not provided: {module_name}" + ) + + if attrs.get("is_pickle") is True: + # This node came from save_pickle, we don't need to write any source for it. + continue + + is_package = attrs["is_package"] + source = attrs["source"] + self._write_source_string(module_name, source, is_package) + + elif action == _ModuleProviderAction.REPACKAGED_MOCK_MODULE: + self._write_mock_file() + elif action == _ModuleProviderAction.SKIP: + continue + else: + raise AssertionError( + f"Invalid action: {module_name}, {action}. Please report a bug to PyTorch." + ) + + extern_file_contents = "\n".join(extern_modules) + "\n" + self._write(".data/extern_modules", extern_file_contents) + + def _write_python_version(self): + """Writes the python version that the package was created with to .data/python_version""" + self._write(".data/python_version", platform.python_version()) + + def close(self): + """Write the package to the filesystem. Any calls after :meth:`close` are now invalid. + It is preferable to use resource guard syntax instead:: + + with PackageExporter("file.zip") as e: + ... + """ + self._execute_dependency_graph() + self._write_python_version() + + self.script_module_serializer.write_files() + self._finalize_zip() + + def _finalize_zip(self): + """Called at the very end of packaging to leave the zipfile in a closed but valid state.""" + del self.zip_file + if self.buffer: + self.buffer.flush() + + def _filename(self, package, resource): + package_path = package.replace(".", "/") + resource = _normalize_path(resource) + return f"{package_path}/{resource}" + + def _can_implicitly_extern(self, module_name: str): + top_level_package_name = module_name.partition(".")[0] + return top_level_package_name == "torch" or ( + top_level_package_name not in _DISALLOWED_MODULES + and is_stdlib_module(top_level_package_name) + ) + + def dependency_graph_string(self) -> str: + """Returns digraph string representation of dependencies in package. + + Returns: + A string representation of dependencies in package. + """ + return self.dependency_graph.to_dot() + + def _nodes_with_action_type( + self, action: Optional[_ModuleProviderAction] + ) -> List[str]: + result = [] + for name, node_dict in self.dependency_graph.nodes.items(): + node_action = node_dict.get("action", None) + if node_action == action and "is_pickle" not in node_dict: + result.append(name) + result.sort() + return result + + def externed_modules(self) -> List[str]: + """Return all modules that are currently externed. + + Returns: + A list containing the names of modules which will be + externed in this package. + """ + return self._nodes_with_action_type(_ModuleProviderAction.EXTERN) + + def interned_modules(self) -> List[str]: + """Return all modules that are currently interned. + + Returns: + A list containing the names of modules which will be + interned in this package. + """ + return self._nodes_with_action_type(_ModuleProviderAction.INTERN) + + def mocked_modules(self) -> List[str]: + """Return all modules that are currently mocked. + + Returns: + A list containing the names of modules which will be + mocked in this package. + """ + return self._nodes_with_action_type(_ModuleProviderAction.MOCK) + + def denied_modules(self) -> List[str]: + """Return all modules that are currently denied. + + Returns: + A list containing the names of modules which will be + denied in this package. + """ + return self._nodes_with_action_type(_ModuleProviderAction.DENY) + + def get_rdeps(self, module_name: str) -> List[str]: + """Return a list of all modules which depend on the module ``module_name``. + + Returns: + A list containing the names of modules which depend on ``module_name``. + """ + if module_name in self.dependency_graph._pred.keys(): + return list(self.dependency_graph._pred[module_name].keys()) + else: + return [] + + def all_paths(self, src: str, dst: str) -> str: + """Return a dot representation of the subgraph + that has all paths from src to dst. + + Returns: + A dot representation containing all paths from src to dst. + (https://graphviz.org/doc/info/lang.html) + """ + return self.dependency_graph.all_paths(src, dst) + + +# even though these are in the standard library, we do not allow them to be +# automatically externed since they offer a lot of system level access +_DISALLOWED_MODULES = ["sys", "io"] + +_MOCK_IMPL = """\ +from _mock import MockedObject +def __getattr__(attr: str): + return MockedObject(__name__ + '.' + attr, _suppress_err=True) +""" + + +def _read_file(filename: str) -> str: + with open(filename, "rb") as f: + b = f.read() + return b.decode("utf-8") diff --git a/env-llmeval/lib/python3.10/site-packages/torch/package/package_importer.py b/env-llmeval/lib/python3.10/site-packages/torch/package/package_importer.py new file mode 100644 index 0000000000000000000000000000000000000000..cc0fe85d20cff3e89b070800bd6070229b474367 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/package/package_importer.py @@ -0,0 +1,760 @@ +import builtins +import importlib +import importlib.machinery +import inspect +import io +import linecache +import os.path +import types +from contextlib import contextmanager +from pathlib import Path +from typing import Any, BinaryIO, Callable, cast, Dict, Iterable, List, Optional, Union +from weakref import WeakValueDictionary + +import torch +from torch.serialization import _get_restore_location, _maybe_decode_ascii + +from ._directory_reader import DirectoryReader +from ._importlib import ( + _calc___package__, + _normalize_line_endings, + _normalize_path, + _resolve_name, + _sanity_check, +) +from ._mangling import demangle, PackageMangler +from ._package_unpickler import PackageUnpickler +from .file_structure_representation import _create_directory_from_file_list, Directory +from .glob_group import GlobPattern +from .importer import Importer + +__all__ = ["PackageImporter"] + + +# This is a list of imports that are implicitly allowed even if they haven't +# been marked as extern. This is to work around the fact that Torch implicitly +# depends on numpy and package can't track it. +# https://github.com/pytorch/MultiPy/issues/46 +IMPLICIT_IMPORT_ALLOWLIST: Iterable[str] = [ + "numpy", + "numpy.core", + "numpy.core._multiarray_umath", + # FX GraphModule might depend on builtins module and users usually + # don't extern builtins. Here we import it here by default. + "builtins", +] + + +class PackageImporter(Importer): + """Importers allow you to load code written to packages by :class:`PackageExporter`. + Code is loaded in a hermetic way, using files from the package + rather than the normal python import system. This allows + for the packaging of PyTorch model code and data so that it can be run + on a server or used in the future for transfer learning. + + The importer for packages ensures that code in the module can only be loaded from + within the package, except for modules explicitly listed as external during export. + The file ``extern_modules`` in the zip archive lists all the modules that a package externally depends on. + This prevents "implicit" dependencies where the package runs locally because it is importing + a locally-installed package, but then fails when the package is copied to another machine. + """ + + """The dictionary of already loaded modules from this package, equivalent to ``sys.modules`` but + local to this importer. + """ + + modules: Dict[str, types.ModuleType] + + def __init__( + self, + file_or_buffer: Union[str, torch._C.PyTorchFileReader, Path, BinaryIO], + module_allowed: Callable[[str], bool] = lambda module_name: True, + ): + """Open ``file_or_buffer`` for importing. This checks that the imported package only requires modules + allowed by ``module_allowed`` + + Args: + file_or_buffer: a file-like object (has to implement :meth:`read`, :meth:`readline`, :meth:`tell`, and :meth:`seek`), + a string, or an ``os.PathLike`` object containing a filename. + module_allowed (Callable[[str], bool], optional): A method to determine if a externally provided module + should be allowed. Can be used to ensure packages loaded do not depend on modules that the server + does not support. Defaults to allowing anything. + + Raises: + ImportError: If the package will use a disallowed module. + """ + torch._C._log_api_usage_once("torch.package.PackageImporter") + + self.zip_reader: Any + if isinstance(file_or_buffer, torch._C.PyTorchFileReader): + self.filename = "" + self.zip_reader = file_or_buffer + elif isinstance(file_or_buffer, (Path, str)): + self.filename = str(file_or_buffer) + if not os.path.isdir(self.filename): + self.zip_reader = torch._C.PyTorchFileReader(self.filename) + else: + self.zip_reader = DirectoryReader(self.filename) + else: + self.filename = "" + self.zip_reader = torch._C.PyTorchFileReader(file_or_buffer) + + torch._C._log_api_usage_metadata( + "torch.package.PackageImporter.metadata", + { + "serialization_id": self.zip_reader.serialization_id(), + "file_name": self.filename, + }, + ) + + self.root = _PackageNode(None) + self.modules = {} + self.extern_modules = self._read_extern() + + for extern_module in self.extern_modules: + if not module_allowed(extern_module): + raise ImportError( + f"package '{file_or_buffer}' needs the external module '{extern_module}' " + f"but that module has been disallowed" + ) + self._add_extern(extern_module) + + for fname in self.zip_reader.get_all_records(): + self._add_file(fname) + + self.patched_builtins = builtins.__dict__.copy() + self.patched_builtins["__import__"] = self.__import__ + # Allow packaged modules to reference their PackageImporter + self.modules["torch_package_importer"] = self # type: ignore[assignment] + + self._mangler = PackageMangler() + + # used for reduce deserializaiton + self.storage_context: Any = None + self.last_map_location = None + + # used for torch.serialization._load + self.Unpickler = lambda *args, **kwargs: PackageUnpickler(self, *args, **kwargs) + + def import_module(self, name: str, package=None): + """Load a module from the package if it hasn't already been loaded, and then return + the module. Modules are loaded locally + to the importer and will appear in ``self.modules`` rather than ``sys.modules``. + + Args: + name (str): Fully qualified name of the module to load. + package ([type], optional): Unused, but present to match the signature of importlib.import_module. Defaults to ``None``. + + Returns: + types.ModuleType: The (possibly already) loaded module. + """ + # We should always be able to support importing modules from this package. + # This is to support something like: + # obj = importer.load_pickle(...) + # importer.import_module(obj.__module__) <- this string will be mangled + # + # Note that _mangler.demangle will not demangle any module names + # produced by a different PackageImporter instance. + name = self._mangler.demangle(name) + + return self._gcd_import(name) + + def load_binary(self, package: str, resource: str) -> bytes: + """Load raw bytes. + + Args: + package (str): The name of module package (e.g. ``"my_package.my_subpackage"``). + resource (str): The unique name for the resource. + + Returns: + bytes: The loaded data. + """ + + path = self._zipfile_path(package, resource) + return self.zip_reader.get_record(path) + + def load_text( + self, + package: str, + resource: str, + encoding: str = "utf-8", + errors: str = "strict", + ) -> str: + """Load a string. + + Args: + package (str): The name of module package (e.g. ``"my_package.my_subpackage"``). + resource (str): The unique name for the resource. + encoding (str, optional): Passed to ``decode``. Defaults to ``'utf-8'``. + errors (str, optional): Passed to ``decode``. Defaults to ``'strict'``. + + Returns: + str: The loaded text. + """ + data = self.load_binary(package, resource) + return data.decode(encoding, errors) + + def load_pickle(self, package: str, resource: str, map_location=None) -> Any: + """Unpickles the resource from the package, loading any modules that are needed to construct the objects + using :meth:`import_module`. + + Args: + package (str): The name of module package (e.g. ``"my_package.my_subpackage"``). + resource (str): The unique name for the resource. + map_location: Passed to `torch.load` to determine how tensors are mapped to devices. Defaults to ``None``. + + Returns: + Any: The unpickled object. + """ + pickle_file = self._zipfile_path(package, resource) + restore_location = _get_restore_location(map_location) + loaded_storages = {} + loaded_reduces = {} + storage_context = torch._C.DeserializationStorageContext() + + def load_tensor(dtype, size, key, location, restore_location): + name = f"{key}.storage" + + if storage_context.has_storage(name): + storage = storage_context.get_storage(name, dtype)._typed_storage() + else: + tensor = self.zip_reader.get_storage_from_record( + ".data/" + name, size, dtype + ) + if isinstance(self.zip_reader, torch._C.PyTorchFileReader): + storage_context.add_storage(name, tensor) + storage = tensor._typed_storage() + loaded_storages[key] = restore_location(storage, location) + + def persistent_load(saved_id): + assert isinstance(saved_id, tuple) + typename = _maybe_decode_ascii(saved_id[0]) + data = saved_id[1:] + + if typename == "storage": + storage_type, key, location, size = data + dtype = storage_type.dtype + + if key not in loaded_storages: + load_tensor( + dtype, + size, + key, + _maybe_decode_ascii(location), + restore_location, + ) + storage = loaded_storages[key] + # TODO: Once we decide to break serialization FC, we can + # stop wrapping with TypedStorage + return torch.storage.TypedStorage( + wrap_storage=storage._untyped_storage, dtype=dtype, _internal=True + ) + elif typename == "reduce_package": + # to fix BC breaking change, objects on this load path + # will be loaded multiple times erroneously + if len(data) == 2: + func, args = data + return func(self, *args) + reduce_id, func, args = data + if reduce_id not in loaded_reduces: + loaded_reduces[reduce_id] = func(self, *args) + return loaded_reduces[reduce_id] + else: + f"Unknown typename for persistent_load, expected 'storage' or 'reduce_package' but got '{typename}'" + + # Load the data (which may in turn use `persistent_load` to load tensors) + data_file = io.BytesIO(self.zip_reader.get_record(pickle_file)) + unpickler = self.Unpickler(data_file) + unpickler.persistent_load = persistent_load # type: ignore[assignment] + + @contextmanager + def set_deserialization_context(): + # to let reduce_package access deserializaiton context + self.storage_context = storage_context + self.last_map_location = map_location + try: + yield + finally: + self.storage_context = None + self.last_map_location = None + + with set_deserialization_context(): + result = unpickler.load() + + # TODO from zdevito: + # This stateful weird function will need to be removed in our efforts + # to unify the format. It has a race condition if multiple python + # threads try to read independent files + torch._utils._validate_loaded_sparse_tensors() + + return result + + def id(self): + """ + Returns internal identifier that torch.package uses to distinguish :class:`PackageImporter` instances. + Looks like:: + + + """ + return self._mangler.parent_name() + + def file_structure( + self, *, include: "GlobPattern" = "**", exclude: "GlobPattern" = () + ) -> Directory: + """Returns a file structure representation of package's zipfile. + + Args: + include (Union[List[str], str]): An optional string e.g. ``"my_package.my_subpackage"``, or optional list of strings + for the names of the files to be included in the zipfile representation. This can also be + a glob-style pattern, as described in :meth:`PackageExporter.mock` + + exclude (Union[List[str], str]): An optional pattern that excludes files whose name match the pattern. + + Returns: + :class:`Directory` + """ + return _create_directory_from_file_list( + self.filename, self.zip_reader.get_all_records(), include, exclude + ) + + def python_version(self): + """Returns the version of python that was used to create this package. + + Note: this function is experimental and not Forward Compatible. The plan is to move this into a lock + file later on. + + Returns: + :class:`Optional[str]` a python version e.g. 3.8.9 or None if no version was stored with this package + """ + python_version_path = ".data/python_version" + return ( + self.zip_reader.get_record(python_version_path).decode("utf-8").strip() + if self.zip_reader.has_record(python_version_path) + else None + ) + + def _read_extern(self): + return ( + self.zip_reader.get_record(".data/extern_modules") + .decode("utf-8") + .splitlines(keepends=False) + ) + + def _make_module( + self, name: str, filename: Optional[str], is_package: bool, parent: str + ): + mangled_filename = self._mangler.mangle(filename) if filename else None + spec = importlib.machinery.ModuleSpec( + name, + self, # type: ignore[arg-type] + origin="", + is_package=is_package, + ) + module = importlib.util.module_from_spec(spec) + self.modules[name] = module + module.__name__ = self._mangler.mangle(name) + ns = module.__dict__ + ns["__spec__"] = spec + ns["__loader__"] = self + ns["__file__"] = mangled_filename + ns["__cached__"] = None + ns["__builtins__"] = self.patched_builtins + ns["__torch_package__"] = True + + # Add this module to our private global registry. It should be unique due to mangling. + assert module.__name__ not in _package_imported_modules + _package_imported_modules[module.__name__] = module + + # pre-emptively install on the parent to prevent IMPORT_FROM from trying to + # access sys.modules + self._install_on_parent(parent, name, module) + + if filename is not None: + assert mangled_filename is not None + # pre-emptively install the source in `linecache` so that stack traces, + # `inspect`, etc. work. + assert filename not in linecache.cache # type: ignore[attr-defined] + linecache.lazycache(mangled_filename, ns) + + code = self._compile_source(filename, mangled_filename) + exec(code, ns) + + return module + + def _load_module(self, name: str, parent: str): + cur: _PathNode = self.root + for atom in name.split("."): + if not isinstance(cur, _PackageNode) or atom not in cur.children: + if name in IMPLICIT_IMPORT_ALLOWLIST: + module = self.modules[name] = importlib.import_module(name) + return module + raise ModuleNotFoundError( + f'No module named "{name}" in self-contained archive "{self.filename}"' + f" and the module is also not in the list of allowed external modules: {self.extern_modules}", + name=name, + ) + cur = cur.children[atom] + if isinstance(cur, _ExternNode): + module = self.modules[name] = importlib.import_module(name) + return module + return self._make_module(name, cur.source_file, isinstance(cur, _PackageNode), parent) # type: ignore[attr-defined] + + def _compile_source(self, fullpath: str, mangled_filename: str): + source = self.zip_reader.get_record(fullpath) + source = _normalize_line_endings(source) + return compile(source, mangled_filename, "exec", dont_inherit=True) + + # note: named `get_source` so that linecache can find the source + # when this is the __loader__ of a module. + def get_source(self, module_name) -> str: + # linecache calls `get_source` with the `module.__name__` as the argument, so we must demangle it here. + module = self.import_module(demangle(module_name)) + return self.zip_reader.get_record(demangle(module.__file__)).decode("utf-8") + + # note: named `get_resource_reader` so that importlib.resources can find it. + # This is otherwise considered an internal method. + def get_resource_reader(self, fullname): + try: + package = self._get_package(fullname) + except ImportError: + return None + if package.__loader__ is not self: + return None + return _PackageResourceReader(self, fullname) + + def _install_on_parent(self, parent: str, name: str, module: types.ModuleType): + if not parent: + return + # Set the module as an attribute on its parent. + parent_module = self.modules[parent] + if parent_module.__loader__ is self: + setattr(parent_module, name.rpartition(".")[2], module) + + # note: copied from cpython's import code, with call to create module replaced with _make_module + def _do_find_and_load(self, name): + path = None + parent = name.rpartition(".")[0] + module_name_no_parent = name.rpartition(".")[-1] + if parent: + if parent not in self.modules: + self._gcd_import(parent) + # Crazy side-effects! + if name in self.modules: + return self.modules[name] + parent_module = self.modules[parent] + + try: + path = parent_module.__path__ # type: ignore[attr-defined] + + except AttributeError: + # when we attempt to import a package only containing pybinded files, + # the parent directory isn't always a package as defined by python, + # so we search if the package is actually there or not before calling the error. + if isinstance( + parent_module.__loader__, + importlib.machinery.ExtensionFileLoader, + ): + if name not in self.extern_modules: + msg = ( + _ERR_MSG + + "; {!r} is a c extension module which was not externed. C extension modules \ + need to be externed by the PackageExporter in order to be used as we do not support interning them.}." + ).format(name, name) + raise ModuleNotFoundError(msg, name=name) from None + if not isinstance( + parent_module.__dict__.get(module_name_no_parent), + types.ModuleType, + ): + msg = ( + _ERR_MSG + + "; {!r} is a c extension package which does not contain {!r}." + ).format(name, parent, name) + raise ModuleNotFoundError(msg, name=name) from None + else: + msg = (_ERR_MSG + "; {!r} is not a package").format(name, parent) + raise ModuleNotFoundError(msg, name=name) from None + + module = self._load_module(name, parent) + + self._install_on_parent(parent, name, module) + + return module + + # note: copied from cpython's import code + def _find_and_load(self, name): + module = self.modules.get(name, _NEEDS_LOADING) + if module is _NEEDS_LOADING: + return self._do_find_and_load(name) + + if module is None: + message = f"import of {name} halted; None in sys.modules" + raise ModuleNotFoundError(message, name=name) + + # To handle https://github.com/pytorch/pytorch/issues/57490, where std's + # creation of fake submodules via the hacking of sys.modules is not import + # friendly + if name == "os": + self.modules["os.path"] = cast(Any, module).path + elif name == "typing": + self.modules["typing.io"] = cast(Any, module).io + self.modules["typing.re"] = cast(Any, module).re + + return module + + def _gcd_import(self, name, package=None, level=0): + """Import and return the module based on its name, the package the call is + being made from, and the level adjustment. + + This function represents the greatest common denominator of functionality + between import_module and __import__. This includes setting __package__ if + the loader did not. + + """ + _sanity_check(name, package, level) + if level > 0: + name = _resolve_name(name, package, level) + + return self._find_and_load(name) + + # note: copied from cpython's import code + def _handle_fromlist(self, module, fromlist, *, recursive=False): + """Figure out what __import__ should return. + + The import_ parameter is a callable which takes the name of module to + import. It is required to decouple the function from assuming importlib's + import implementation is desired. + + """ + module_name = demangle(module.__name__) + # The hell that is fromlist ... + # If a package was imported, try to import stuff from fromlist. + if hasattr(module, "__path__"): + for x in fromlist: + if not isinstance(x, str): + if recursive: + where = module_name + ".__all__" + else: + where = "``from list''" + raise TypeError( + f"Item in {where} must be str, " f"not {type(x).__name__}" + ) + elif x == "*": + if not recursive and hasattr(module, "__all__"): + self._handle_fromlist(module, module.__all__, recursive=True) + elif not hasattr(module, x): + from_name = f"{module_name}.{x}" + try: + self._gcd_import(from_name) + except ModuleNotFoundError as exc: + # Backwards-compatibility dictates we ignore failed + # imports triggered by fromlist for modules that don't + # exist. + if ( + exc.name == from_name + and self.modules.get(from_name, _NEEDS_LOADING) is not None + ): + continue + raise + return module + + def __import__(self, name, globals=None, locals=None, fromlist=(), level=0): + if level == 0: + module = self._gcd_import(name) + else: + globals_ = globals if globals is not None else {} + package = _calc___package__(globals_) + module = self._gcd_import(name, package, level) + if not fromlist: + # Return up to the first dot in 'name'. This is complicated by the fact + # that 'name' may be relative. + if level == 0: + return self._gcd_import(name.partition(".")[0]) + elif not name: + return module + else: + # Figure out where to slice the module's name up to the first dot + # in 'name'. + cut_off = len(name) - len(name.partition(".")[0]) + # Slice end needs to be positive to alleviate need to special-case + # when ``'.' not in name``. + module_name = demangle(module.__name__) + return self.modules[module_name[: len(module_name) - cut_off]] + else: + return self._handle_fromlist(module, fromlist) + + def _get_package(self, package): + """Take a package name or module object and return the module. + + If a name, the module is imported. If the passed or imported module + object is not a package, raise an exception. + """ + if hasattr(package, "__spec__"): + if package.__spec__.submodule_search_locations is None: + raise TypeError(f"{package.__spec__.name!r} is not a package") + else: + return package + else: + module = self.import_module(package) + if module.__spec__.submodule_search_locations is None: + raise TypeError(f"{package!r} is not a package") + else: + return module + + def _zipfile_path(self, package, resource=None): + package = self._get_package(package) + assert package.__loader__ is self + name = demangle(package.__name__) + if resource is not None: + resource = _normalize_path(resource) + return f"{name.replace('.', '/')}/{resource}" + else: + return f"{name.replace('.', '/')}" + + def _get_or_create_package( + self, atoms: List[str] + ) -> "Union[_PackageNode, _ExternNode]": + cur = self.root + for i, atom in enumerate(atoms): + node = cur.children.get(atom, None) + if node is None: + node = cur.children[atom] = _PackageNode(None) + if isinstance(node, _ExternNode): + return node + if isinstance(node, _ModuleNode): + name = ".".join(atoms[:i]) + raise ImportError( + f"inconsistent module structure. module {name} is not a package, but has submodules" + ) + assert isinstance(node, _PackageNode) + cur = node + return cur + + def _add_file(self, filename: str): + """Assembles a Python module out of the given file. Will ignore files in the .data directory. + + Args: + filename (str): the name of the file inside of the package archive to be added + """ + *prefix, last = filename.split("/") + if len(prefix) > 1 and prefix[0] == ".data": + return + package = self._get_or_create_package(prefix) + if isinstance(package, _ExternNode): + raise ImportError( + f"inconsistent module structure. package contains a module file {filename}" + f" that is a subpackage of a module marked external." + ) + if last == "__init__.py": + package.source_file = filename + elif last.endswith(".py"): + package_name = last[: -len(".py")] + package.children[package_name] = _ModuleNode(filename) + + def _add_extern(self, extern_name: str): + *prefix, last = extern_name.split(".") + package = self._get_or_create_package(prefix) + if isinstance(package, _ExternNode): + return # the shorter extern covers this extern case + package.children[last] = _ExternNode() + + +_NEEDS_LOADING = object() +_ERR_MSG_PREFIX = "No module named " +_ERR_MSG = _ERR_MSG_PREFIX + "{!r}" + + +class _PathNode: + pass + + +class _PackageNode(_PathNode): + def __init__(self, source_file: Optional[str]): + self.source_file = source_file + self.children: Dict[str, _PathNode] = {} + + +class _ModuleNode(_PathNode): + __slots__ = ["source_file"] + + def __init__(self, source_file: str): + self.source_file = source_file + + +class _ExternNode(_PathNode): + pass + + +# A private global registry of all modules that have been package-imported. +_package_imported_modules: WeakValueDictionary = WeakValueDictionary() + +# `inspect` by default only looks in `sys.modules` to find source files for classes. +# Patch it to check our private registry of package-imported modules as well. +_orig_getfile = inspect.getfile + + +def _patched_getfile(object): + if inspect.isclass(object): + if object.__module__ in _package_imported_modules: + return _package_imported_modules[object.__module__].__file__ + return _orig_getfile(object) + + +inspect.getfile = _patched_getfile + + +class _PackageResourceReader: + """Private class used to support PackageImporter.get_resource_reader(). + + Confirms to the importlib.abc.ResourceReader interface. Allowed to access + the innards of PackageImporter. + """ + + def __init__(self, importer, fullname): + self.importer = importer + self.fullname = fullname + + def open_resource(self, resource): + from io import BytesIO + + return BytesIO(self.importer.load_binary(self.fullname, resource)) + + def resource_path(self, resource): + # The contract for resource_path is that it either returns a concrete + # file system path or raises FileNotFoundError. + if isinstance( + self.importer.zip_reader, DirectoryReader + ) and self.importer.zip_reader.has_record( + os.path.join(self.fullname, resource) + ): + return os.path.join( + self.importer.zip_reader.directory, self.fullname, resource + ) + raise FileNotFoundError + + def is_resource(self, name): + path = self.importer._zipfile_path(self.fullname, name) + return self.importer.zip_reader.has_record(path) + + def contents(self): + from pathlib import Path + + filename = self.fullname.replace(".", "/") + + fullname_path = Path(self.importer._zipfile_path(self.fullname)) + files = self.importer.zip_reader.get_all_records() + subdirs_seen = set() + for filename in files: + try: + relative = Path(filename).relative_to(fullname_path) + except ValueError: + continue + # If the path of the file (which is relative to the top of the zip + # namespace), relative to the package given when the resource + # reader was created, has a parent, then it's a name in a + # subdirectory and thus we skip it. + parent_name = relative.parent.name + if len(parent_name) == 0: + yield relative.name + elif parent_name not in subdirs_seen: + subdirs_seen.add(parent_name) + yield parent_name