Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/torch/_lazy/closure.py +134 -0
- env-llmeval/lib/python3.10/site-packages/torch/_lazy/device_context.py +25 -0
- env-llmeval/lib/python3.10/site-packages/torch/_lazy/ir_cache.py +13 -0
- env-llmeval/lib/python3.10/site-packages/torch/_lazy/metrics.py +21 -0
- env-llmeval/lib/python3.10/site-packages/torch/_lazy/ts_backend.py +6 -0
- env-llmeval/lib/python3.10/site-packages/torch/cpu/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__init__.py +1 -0
- env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/autocast_mode.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/autocast_mode.py +43 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__init__.py +39 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__init__.pyi +15 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/_functional.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adadelta.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adam.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/lbfgs.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/lr_scheduler.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/nadam.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/optimizer.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/sparse_adam.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/swa_utils.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/_functional.py +79 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/_multi_tensor/__init__.py +28 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/_multi_tensor/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/adadelta.pyi +11 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/adagrad.py +383 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/adagrad.pyi +12 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/adam.py +658 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/adam.pyi +22 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/adamw.pyi +22 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/lbfgs.py +478 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/nadam.py +473 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/nadam.pyi +15 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/radam.pyi +14 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/rmsprop.pyi +13 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/rprop.py +335 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/rprop.pyi +12 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/sparse_adam.py +154 -0
- env-llmeval/lib/python3.10/site-packages/torch/optim/swa_utils.pyi +32 -0
- env-llmeval/lib/python3.10/site-packages/torch/package/__init__.py +12 -0
- env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/__init__.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/torch/_lazy/closure.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import threading
|
3 |
+
from queue import Empty as EmptyQueue, Queue
|
4 |
+
|
5 |
+
from torch._lazy.device_context import get_device_context
|
6 |
+
|
7 |
+
|
8 |
+
class ClosureHandler:
|
9 |
+
def __init__(self):
|
10 |
+
pass
|
11 |
+
|
12 |
+
def run(self, closure):
|
13 |
+
"""Run closure function
|
14 |
+
|
15 |
+
Args:
|
16 |
+
closure: callable function to run
|
17 |
+
"""
|
18 |
+
closure()
|
19 |
+
|
20 |
+
def __call__(self, closures):
|
21 |
+
for closure in closures:
|
22 |
+
self.run(closure)
|
23 |
+
|
24 |
+
|
25 |
+
class AsyncClosureHandler(ClosureHandler):
|
26 |
+
"""Handler for Asynchronous Step Closures
|
27 |
+
Args:
|
28 |
+
max_queue_size: The maximum length of the closure queue after which
|
29 |
+
the training loop will block until closures are evaluated.
|
30 |
+
By default, a reasonable limit of a maximum of 100 on the queue.
|
31 |
+
This value can be set using the `XLA_MAX_ASYNC_QUEUE` environment
|
32 |
+
variable.
|
33 |
+
"""
|
34 |
+
|
35 |
+
def __init__(self, max_queue_size=100):
|
36 |
+
super().__init__()
|
37 |
+
self._closure_queue: Queue = Queue(
|
38 |
+
int(os.environ.get("LTC_MAX_ASYNC_QUEUE", max_queue_size))
|
39 |
+
)
|
40 |
+
self._closure_exception: Queue = Queue()
|
41 |
+
self._closure_lock = threading.Lock()
|
42 |
+
self._closure_event_loop_finished = threading.Event()
|
43 |
+
self._closure_event_loop = None
|
44 |
+
|
45 |
+
def start_event_loop(self):
|
46 |
+
"""Start closure event loop if not started"""
|
47 |
+
if self._closure_event_loop is None:
|
48 |
+
|
49 |
+
def event_loop():
|
50 |
+
# Run loop until closure event is set and closure queue is empty
|
51 |
+
while True:
|
52 |
+
try:
|
53 |
+
closure = self._closure_queue.get(block=True, timeout=3)
|
54 |
+
closure()
|
55 |
+
self._closure_queue.task_done()
|
56 |
+
except EmptyQueue:
|
57 |
+
with self._closure_lock:
|
58 |
+
if self._closure_queue.empty():
|
59 |
+
self._closure_event_loop_finished.set()
|
60 |
+
return
|
61 |
+
except Exception as e:
|
62 |
+
self._closure_exception.put(e)
|
63 |
+
return
|
64 |
+
|
65 |
+
self._closure_event_loop = threading.Thread(target=event_loop)
|
66 |
+
self._closure_event_loop.start()
|
67 |
+
|
68 |
+
def run(self, closure):
|
69 |
+
with self._closure_lock:
|
70 |
+
self._closure_queue.put(closure, block=True)
|
71 |
+
if (
|
72 |
+
self._closure_event_loop is None
|
73 |
+
or not self._closure_event_loop.is_alive()
|
74 |
+
):
|
75 |
+
try:
|
76 |
+
e = self._closure_exception.get(block=False)
|
77 |
+
raise RuntimeError(
|
78 |
+
"Cannot run asynchronous closure due to previously raised exception"
|
79 |
+
) from e
|
80 |
+
except EmptyQueue:
|
81 |
+
self._closure_event_loop = None
|
82 |
+
self.start_event_loop()
|
83 |
+
|
84 |
+
|
85 |
+
def add_step_closure(closure, args=(), run_async=False):
|
86 |
+
"""Adds a closure to the list of the ones to be run at the end of the step.
|
87 |
+
Many times during model training there is the need to print/report (print to
|
88 |
+
console, post to tensorboard, etc...) information which require the content of
|
89 |
+
intermediary tensors to be inspected.
|
90 |
+
Inspecting different tensors content in different points of the model code
|
91 |
+
requires many executions and typically causes performance issues.
|
92 |
+
Adding a step closure will ensure that it will be run after the barrier, when
|
93 |
+
all the live tensors will be already materialized to device data.
|
94 |
+
Live tensors which will include the ones captured by the closure arguments.
|
95 |
+
So using `add_step_closure()` will ensure a single execution will be
|
96 |
+
performed, even when multiple closures are queued, requiring multiple tensors
|
97 |
+
to be inspected.
|
98 |
+
Step closures will be run sequentially in the order they have been queued.
|
99 |
+
Note that even though using this API the execution will be optimized, it is
|
100 |
+
advised to throttle the printing/reporting events once every N steps.
|
101 |
+
Args:
|
102 |
+
closure (callable): The function to be called.
|
103 |
+
args (tuple): The arguments to be passed to the closure.
|
104 |
+
run_async: If True, run the closure asynchronously.
|
105 |
+
"""
|
106 |
+
devctx = get_device_context()
|
107 |
+
closures_type = "async_step_closures" if run_async else "step_closures"
|
108 |
+
step_closures = getattr(devctx, closures_type, None)
|
109 |
+
if step_closures is None:
|
110 |
+
step_closures = []
|
111 |
+
setattr(devctx, closures_type, step_closures)
|
112 |
+
step_closures.append(lambda a=args: closure(*a))
|
113 |
+
|
114 |
+
|
115 |
+
def run_step_closures():
|
116 |
+
devctx = get_device_context()
|
117 |
+
async_step_closures = getattr(devctx, "async_step_closures", None)
|
118 |
+
if async_step_closures is not None:
|
119 |
+
devctx.async_step_closures = []
|
120 |
+
async_closure_handler = getattr(devctx, "async_closure_handler", None)
|
121 |
+
if async_closure_handler is None:
|
122 |
+
async_closure_handler = AsyncClosureHandler()
|
123 |
+
devctx.async_closure_handler = async_closure_handler
|
124 |
+
async_closure_handler(async_step_closures)
|
125 |
+
|
126 |
+
step_closures = getattr(devctx, "step_closures", None)
|
127 |
+
if step_closures is not None:
|
128 |
+
devctx.step_closures = []
|
129 |
+
closure_handler = getattr(devctx, "closure_handler", None)
|
130 |
+
if closure_handler is None:
|
131 |
+
closure_handler = ClosureHandler()
|
132 |
+
devctx.closure_handler = closure_handler
|
133 |
+
closure_handler(step_closures)
|
134 |
+
return devctx
|
env-llmeval/lib/python3.10/site-packages/torch/_lazy/device_context.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import threading
|
2 |
+
from typing import Any, Dict
|
3 |
+
|
4 |
+
import torch._C._lazy
|
5 |
+
|
6 |
+
|
7 |
+
class DeviceContext:
|
8 |
+
_CONTEXTS: Dict[str, Any] = dict()
|
9 |
+
_CONTEXTS_LOCK = threading.Lock()
|
10 |
+
|
11 |
+
def __init__(self, device):
|
12 |
+
self.device = device
|
13 |
+
|
14 |
+
|
15 |
+
def get_device_context(device=None):
|
16 |
+
if device is None:
|
17 |
+
device = torch._C._lazy._get_default_device_type()
|
18 |
+
else:
|
19 |
+
device = str(device)
|
20 |
+
with DeviceContext._CONTEXTS_LOCK:
|
21 |
+
devctx = DeviceContext._CONTEXTS.get(device, None)
|
22 |
+
if devctx is None:
|
23 |
+
devctx = DeviceContext(device)
|
24 |
+
DeviceContext._CONTEXTS[device] = devctx
|
25 |
+
return devctx
|
env-llmeval/lib/python3.10/site-packages/torch/_lazy/ir_cache.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch._C._lazy
|
2 |
+
|
3 |
+
|
4 |
+
def dump(dot_file_name: str):
|
5 |
+
"""Dump TrieCache in the dot format"""
|
6 |
+
return torch._C._lazy._dump_ir_cache(dot_file_name)
|
7 |
+
|
8 |
+
|
9 |
+
def reset():
|
10 |
+
"""Clear TrieCache. This is needed in testing to avoid
|
11 |
+
node reusing between different tests.
|
12 |
+
"""
|
13 |
+
return torch._C._lazy._clear_ir_cache()
|
env-llmeval/lib/python3.10/site-packages/torch/_lazy/metrics.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch._C._lazy
|
2 |
+
|
3 |
+
|
4 |
+
def reset():
|
5 |
+
"""Resets all metric counters."""
|
6 |
+
torch._C._lazy._reset_metrics()
|
7 |
+
|
8 |
+
|
9 |
+
def counter_names():
|
10 |
+
"""Retrieves all the currently active counter names."""
|
11 |
+
return torch._C._lazy._counter_names()
|
12 |
+
|
13 |
+
|
14 |
+
def counter_value(name: str):
|
15 |
+
"""Return the value of the counter with the speficied name"""
|
16 |
+
return torch._C._lazy._counter_value(name)
|
17 |
+
|
18 |
+
|
19 |
+
def metrics_report():
|
20 |
+
"""Return the combined (lazy core and backend) metric report"""
|
21 |
+
return torch._C._lazy._metrics_report()
|
env-llmeval/lib/python3.10/site-packages/torch/_lazy/ts_backend.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch._C._lazy_ts_backend
|
2 |
+
|
3 |
+
|
4 |
+
def init():
|
5 |
+
"""Initializes the lazy Torchscript backend"""
|
6 |
+
torch._C._lazy_ts_backend._init()
|
env-llmeval/lib/python3.10/site-packages/torch/cpu/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.83 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .autocast_mode import autocast
|
env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (224 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/__pycache__/autocast_mode.cpython-310.pyc
ADDED
Binary file (1.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/cpu/amp/autocast_mode.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
__all__ = ["autocast"]
|
6 |
+
|
7 |
+
|
8 |
+
class autocast(torch.amp.autocast_mode.autocast):
|
9 |
+
r"""
|
10 |
+
See :class:`torch.autocast`.
|
11 |
+
``torch.cpu.amp.autocast(args...)`` is equivalent to ``torch.autocast("cpu", args...)``
|
12 |
+
"""
|
13 |
+
|
14 |
+
def __init__(
|
15 |
+
self,
|
16 |
+
enabled: bool = True,
|
17 |
+
dtype: torch.dtype = torch.bfloat16,
|
18 |
+
cache_enabled: bool = True,
|
19 |
+
):
|
20 |
+
if torch._jit_internal.is_scripting():
|
21 |
+
self._enabled = enabled
|
22 |
+
self.device = "cpu"
|
23 |
+
self.fast_dtype = dtype
|
24 |
+
return
|
25 |
+
super().__init__(
|
26 |
+
"cpu", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled
|
27 |
+
)
|
28 |
+
|
29 |
+
def __enter__(self):
|
30 |
+
if torch._jit_internal.is_scripting():
|
31 |
+
return self
|
32 |
+
return super().__enter__()
|
33 |
+
|
34 |
+
# TODO: discuss a unified TorchScript-friendly API for autocast
|
35 |
+
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override]
|
36 |
+
if torch._jit_internal.is_scripting():
|
37 |
+
return
|
38 |
+
return super().__exit__(exc_type, exc_val, exc_tb)
|
39 |
+
|
40 |
+
def __call__(self, func):
|
41 |
+
if torch._jit_internal.is_scripting():
|
42 |
+
return func
|
43 |
+
return super().__call__(func)
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__init__.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
:mod:`torch.optim` is a package implementing various optimization algorithms.
|
3 |
+
|
4 |
+
Most commonly used methods are already supported, and the interface is general
|
5 |
+
enough, so that more sophisticated ones can also be easily integrated in the
|
6 |
+
future.
|
7 |
+
"""
|
8 |
+
|
9 |
+
from .adadelta import Adadelta
|
10 |
+
from .adagrad import Adagrad
|
11 |
+
from .adam import Adam
|
12 |
+
from .adamw import AdamW
|
13 |
+
from .sparse_adam import SparseAdam
|
14 |
+
from .adamax import Adamax
|
15 |
+
from .asgd import ASGD
|
16 |
+
from .sgd import SGD
|
17 |
+
from .radam import RAdam
|
18 |
+
from .rprop import Rprop
|
19 |
+
from .rmsprop import RMSprop
|
20 |
+
from .optimizer import Optimizer
|
21 |
+
from .nadam import NAdam
|
22 |
+
from .lbfgs import LBFGS
|
23 |
+
from . import lr_scheduler
|
24 |
+
from . import swa_utils
|
25 |
+
|
26 |
+
del adadelta
|
27 |
+
del adagrad
|
28 |
+
del adam
|
29 |
+
del adamw
|
30 |
+
del sparse_adam
|
31 |
+
del adamax
|
32 |
+
del asgd
|
33 |
+
del sgd
|
34 |
+
del radam
|
35 |
+
del rprop
|
36 |
+
del rmsprop
|
37 |
+
del optimizer
|
38 |
+
del nadam
|
39 |
+
del lbfgs
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__init__.pyi
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from . import lr_scheduler as lr_scheduler, swa_utils as swa_utils
|
2 |
+
from .adadelta import Adadelta as Adadelta
|
3 |
+
from .adagrad import Adagrad as Adagrad
|
4 |
+
from .adam import Adam as Adam
|
5 |
+
from .adamax import Adamax as Adamax
|
6 |
+
from .adamw import AdamW as AdamW
|
7 |
+
from .asgd import ASGD as ASGD
|
8 |
+
from .lbfgs import LBFGS as LBFGS
|
9 |
+
from .nadam import NAdam as NAdam
|
10 |
+
from .optimizer import Optimizer as Optimizer
|
11 |
+
from .radam import RAdam as RAdam
|
12 |
+
from .rmsprop import RMSprop as RMSprop
|
13 |
+
from .rprop import Rprop as Rprop
|
14 |
+
from .sgd import SGD as SGD
|
15 |
+
from .sparse_adam import SparseAdam as SparseAdam
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.08 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/_functional.cpython-310.pyc
ADDED
Binary file (2.09 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adadelta.cpython-310.pyc
ADDED
Binary file (8.53 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc
ADDED
Binary file (10.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adam.cpython-310.pyc
ADDED
Binary file (16.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc
ADDED
Binary file (9.68 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc
ADDED
Binary file (16.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc
ADDED
Binary file (8.09 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/lbfgs.cpython-310.pyc
ADDED
Binary file (8.86 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/lr_scheduler.cpython-310.pyc
ADDED
Binary file (67 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/nadam.cpython-310.pyc
ADDED
Binary file (13.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/optimizer.cpython-310.pyc
ADDED
Binary file (34.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc
ADDED
Binary file (12.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc
ADDED
Binary file (10.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc
ADDED
Binary file (9.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc
ADDED
Binary file (10.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/sparse_adam.cpython-310.pyc
ADDED
Binary file (6.19 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/__pycache__/swa_utils.cpython-310.pyc
ADDED
Binary file (15.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/_functional.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""Functional interface."""
|
2 |
+
import math
|
3 |
+
from torch import Tensor
|
4 |
+
from typing import List
|
5 |
+
|
6 |
+
from .adadelta import adadelta # type: ignore[attr-defined] # noqa: F401
|
7 |
+
from .adagrad import adagrad, _make_sparse # type: ignore[attr-defined] # noqa: F401
|
8 |
+
from .adam import adam # type: ignore[attr-defined] # noqa: F401
|
9 |
+
from .adamw import adamw # type: ignore[attr-defined] # noqa: F401
|
10 |
+
from .adamax import adamax # type: ignore[attr-defined] # noqa: F401
|
11 |
+
from .asgd import asgd # type: ignore[attr-defined] # noqa: F401
|
12 |
+
from .nadam import nadam # type: ignore[attr-defined] # noqa: F401
|
13 |
+
from .radam import radam # type: ignore[attr-defined] # noqa: F401
|
14 |
+
from .rmsprop import rmsprop # type: ignore[attr-defined] # noqa: F401
|
15 |
+
from .rprop import rprop # type: ignore[attr-defined] # noqa: F401
|
16 |
+
from .sgd import sgd # type: ignore[attr-defined] # noqa: F401
|
17 |
+
|
18 |
+
|
19 |
+
# TODO: use foreach API in optim._functional to do all the computation
|
20 |
+
|
21 |
+
|
22 |
+
def sparse_adam(params: List[Tensor],
|
23 |
+
grads: List[Tensor],
|
24 |
+
exp_avgs: List[Tensor],
|
25 |
+
exp_avg_sqs: List[Tensor],
|
26 |
+
state_steps: List[int],
|
27 |
+
*,
|
28 |
+
eps: float,
|
29 |
+
beta1: float,
|
30 |
+
beta2: float,
|
31 |
+
lr: float,
|
32 |
+
maximize: bool):
|
33 |
+
r"""Functional API that performs Sparse Adam algorithm computation.
|
34 |
+
|
35 |
+
See :class:`~torch.optim.SparseAdam` for details.
|
36 |
+
"""
|
37 |
+
for i, param in enumerate(params):
|
38 |
+
grad = grads[i]
|
39 |
+
grad = grad if not maximize else -grad
|
40 |
+
grad = grad.coalesce() # the update is non-linear so indices must be unique
|
41 |
+
grad_indices = grad._indices()
|
42 |
+
grad_values = grad._values()
|
43 |
+
if grad_values.numel() == 0:
|
44 |
+
# Skip update for empty grad
|
45 |
+
continue
|
46 |
+
size = grad.size()
|
47 |
+
|
48 |
+
exp_avg = exp_avgs[i]
|
49 |
+
exp_avg_sq = exp_avg_sqs[i]
|
50 |
+
step = state_steps[i]
|
51 |
+
|
52 |
+
|
53 |
+
def make_sparse(values):
|
54 |
+
constructor = grad.new
|
55 |
+
if grad_indices.dim() == 0 or values.dim() == 0:
|
56 |
+
return constructor().resize_as_(grad)
|
57 |
+
return constructor(grad_indices, values, size)
|
58 |
+
|
59 |
+
# Decay the first and second moment running average coefficient
|
60 |
+
# old <- b * old + (1 - b) * new
|
61 |
+
# <==> old += (1 - b) * (new - old)
|
62 |
+
old_exp_avg_values = exp_avg.sparse_mask(grad)._values()
|
63 |
+
exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)
|
64 |
+
exp_avg.add_(make_sparse(exp_avg_update_values))
|
65 |
+
old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values()
|
66 |
+
exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
|
67 |
+
exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values))
|
68 |
+
|
69 |
+
# Dense addition again is intended, avoiding another sparse_mask
|
70 |
+
numer = exp_avg_update_values.add_(old_exp_avg_values)
|
71 |
+
exp_avg_sq_update_values.add_(old_exp_avg_sq_values)
|
72 |
+
denom = exp_avg_sq_update_values.sqrt_().add_(eps)
|
73 |
+
del exp_avg_update_values, exp_avg_sq_update_values
|
74 |
+
|
75 |
+
bias_correction1 = 1 - beta1 ** step
|
76 |
+
bias_correction2 = 1 - beta2 ** step
|
77 |
+
step_size = lr * math.sqrt(bias_correction2) / bias_correction1
|
78 |
+
|
79 |
+
param.add_(make_sparse(-step_size * numer.div_(denom)))
|
env-llmeval/lib/python3.10/site-packages/torch/optim/_multi_tensor/__init__.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
:mod:`torch.optim._multi_tensor` is a package implementing various optimization algorithms.
|
3 |
+
Most commonly used methods are already supported, and the interface is general
|
4 |
+
enough, so that more sophisticated ones can be also easily integrated in the
|
5 |
+
future.
|
6 |
+
"""
|
7 |
+
from functools import partialmethod
|
8 |
+
from torch import optim
|
9 |
+
|
10 |
+
def partialclass(cls, *args, **kwargs):
|
11 |
+
|
12 |
+
class NewCls(cls):
|
13 |
+
__init__ = partialmethod(cls.__init__, *args, **kwargs)
|
14 |
+
|
15 |
+
return NewCls
|
16 |
+
|
17 |
+
|
18 |
+
Adam = partialclass(optim.Adam, foreach=True)
|
19 |
+
AdamW = partialclass(optim.AdamW, foreach=True)
|
20 |
+
NAdam = partialclass(optim.NAdam, foreach=True)
|
21 |
+
SGD = partialclass(optim.SGD, foreach=True)
|
22 |
+
RAdam = partialclass(optim.RAdam, foreach=True)
|
23 |
+
RMSprop = partialclass(optim.RMSprop, foreach=True)
|
24 |
+
Rprop = partialclass(optim.Rprop, foreach=True)
|
25 |
+
ASGD = partialclass(optim.ASGD, foreach=True)
|
26 |
+
Adamax = partialclass(optim.Adamax, foreach=True)
|
27 |
+
Adadelta = partialclass(optim.Adadelta, foreach=True)
|
28 |
+
Adagrad = partialclass(optim.Adagrad, foreach=True)
|
env-llmeval/lib/python3.10/site-packages/torch/optim/_multi_tensor/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.18 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/optim/adadelta.pyi
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .optimizer import Optimizer, ParamsT
|
2 |
+
|
3 |
+
class Adadelta(Optimizer):
|
4 |
+
def __init__(
|
5 |
+
self,
|
6 |
+
params: ParamsT,
|
7 |
+
lr: float = ...,
|
8 |
+
rho: float = ...,
|
9 |
+
eps: float = ...,
|
10 |
+
weight_decay: float = ...,
|
11 |
+
) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/optim/adagrad.py
ADDED
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import Tensor
|
3 |
+
|
4 |
+
from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _view_as_real,
|
5 |
+
_default_to_fused_or_foreach, _differentiable_doc, _foreach_doc, _maximize_doc)
|
6 |
+
from typing import List, Optional
|
7 |
+
|
8 |
+
__all__ = ["Adagrad", "adagrad"]
|
9 |
+
|
10 |
+
|
11 |
+
class Adagrad(Optimizer):
|
12 |
+
def __init__(
|
13 |
+
self,
|
14 |
+
params,
|
15 |
+
lr=1e-2,
|
16 |
+
lr_decay=0,
|
17 |
+
weight_decay=0,
|
18 |
+
initial_accumulator_value=0,
|
19 |
+
eps=1e-10,
|
20 |
+
foreach: Optional[bool] = None,
|
21 |
+
*,
|
22 |
+
maximize: bool = False,
|
23 |
+
differentiable: bool = False,
|
24 |
+
):
|
25 |
+
if not 0.0 <= lr:
|
26 |
+
raise ValueError(f"Invalid learning rate: {lr}")
|
27 |
+
if not 0.0 <= lr_decay:
|
28 |
+
raise ValueError(f"Invalid lr_decay value: {lr_decay}")
|
29 |
+
if not 0.0 <= weight_decay:
|
30 |
+
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
|
31 |
+
if not 0.0 <= initial_accumulator_value:
|
32 |
+
raise ValueError(
|
33 |
+
f"Invalid initial_accumulator_value value: {initial_accumulator_value}"
|
34 |
+
)
|
35 |
+
if not 0.0 <= eps:
|
36 |
+
raise ValueError(f"Invalid epsilon value: {eps}")
|
37 |
+
|
38 |
+
defaults = dict(
|
39 |
+
lr=lr,
|
40 |
+
lr_decay=lr_decay,
|
41 |
+
eps=eps,
|
42 |
+
weight_decay=weight_decay,
|
43 |
+
initial_accumulator_value=initial_accumulator_value,
|
44 |
+
foreach=foreach,
|
45 |
+
maximize=maximize,
|
46 |
+
differentiable=differentiable,
|
47 |
+
)
|
48 |
+
super().__init__(params, defaults)
|
49 |
+
|
50 |
+
for group in self.param_groups:
|
51 |
+
for p in group["params"]:
|
52 |
+
state = self.state[p]
|
53 |
+
state["step"] = torch.tensor(0.0, dtype=torch.float32)
|
54 |
+
init_value = (
|
55 |
+
complex(initial_accumulator_value, initial_accumulator_value)
|
56 |
+
if torch.is_complex(p)
|
57 |
+
else initial_accumulator_value
|
58 |
+
)
|
59 |
+
state["sum"] = torch.full_like(
|
60 |
+
p, init_value, memory_format=torch.preserve_format
|
61 |
+
)
|
62 |
+
|
63 |
+
def __setstate__(self, state):
|
64 |
+
super().__setstate__(state)
|
65 |
+
for group in self.param_groups:
|
66 |
+
group.setdefault("foreach", None)
|
67 |
+
group.setdefault("maximize", False)
|
68 |
+
group.setdefault("differentiable", False)
|
69 |
+
|
70 |
+
state_values = list(self.state.values())
|
71 |
+
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(
|
72 |
+
state_values[0]["step"]
|
73 |
+
)
|
74 |
+
if not step_is_tensor:
|
75 |
+
for s in state_values:
|
76 |
+
s["step"] = torch.tensor(float(s["step"]), dtype=torch.float32)
|
77 |
+
|
78 |
+
def share_memory(self):
|
79 |
+
for group in self.param_groups:
|
80 |
+
for p in group["params"]:
|
81 |
+
state = self.state[p]
|
82 |
+
state["sum"].share_memory_()
|
83 |
+
|
84 |
+
def _init_group(self, group, params_with_grad, grads, state_sums, state_steps):
|
85 |
+
has_sparse_grad, has_complex = False, False
|
86 |
+
for p in group["params"]:
|
87 |
+
if p.grad is not None:
|
88 |
+
has_sparse_grad |= p.grad.is_sparse
|
89 |
+
has_complex |= torch.is_complex(p)
|
90 |
+
params_with_grad.append(p)
|
91 |
+
grads.append(p.grad)
|
92 |
+
state = self.state[p]
|
93 |
+
state_sums.append(state["sum"])
|
94 |
+
state_steps.append(state["step"])
|
95 |
+
|
96 |
+
return has_sparse_grad, has_complex
|
97 |
+
|
98 |
+
@_use_grad_for_differentiable
|
99 |
+
def step(self, closure=None):
|
100 |
+
"""Perform a single optimization step.
|
101 |
+
|
102 |
+
Args:
|
103 |
+
closure (Callable, optional): A closure that reevaluates the model
|
104 |
+
and returns the loss.
|
105 |
+
"""
|
106 |
+
loss = None
|
107 |
+
|
108 |
+
if closure is not None:
|
109 |
+
with torch.enable_grad():
|
110 |
+
loss = closure()
|
111 |
+
|
112 |
+
for group in self.param_groups:
|
113 |
+
params_with_grad = []
|
114 |
+
grads = []
|
115 |
+
state_sums = []
|
116 |
+
state_steps = []
|
117 |
+
|
118 |
+
has_sparse_grad, has_complex = self._init_group(group, params_with_grad, grads, state_sums, state_steps)
|
119 |
+
|
120 |
+
adagrad(
|
121 |
+
params_with_grad,
|
122 |
+
grads,
|
123 |
+
state_sums,
|
124 |
+
state_steps,
|
125 |
+
lr=group["lr"],
|
126 |
+
weight_decay=group["weight_decay"],
|
127 |
+
lr_decay=group["lr_decay"],
|
128 |
+
eps=group["eps"],
|
129 |
+
has_sparse_grad=has_sparse_grad,
|
130 |
+
foreach=group["foreach"],
|
131 |
+
maximize=group["maximize"],
|
132 |
+
differentiable=group["differentiable"],
|
133 |
+
has_complex=has_complex,
|
134 |
+
)
|
135 |
+
|
136 |
+
return loss
|
137 |
+
|
138 |
+
|
139 |
+
Adagrad.__doc__ = r"""Implements Adagrad algorithm.
|
140 |
+
|
141 |
+
.. math::
|
142 |
+
\begin{aligned}
|
143 |
+
&\rule{110mm}{0.4pt} \\
|
144 |
+
&\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta)
|
145 |
+
\text{ (objective)}, \: \lambda \text{ (weight decay)}, \\
|
146 |
+
&\hspace{12mm} \tau \text{ (initial accumulator value)}, \: \eta\text{ (lr decay)}\\
|
147 |
+
&\textbf{initialize} : state\_sum_0 \leftarrow 0 \\[-1.ex]
|
148 |
+
&\rule{110mm}{0.4pt} \\
|
149 |
+
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
|
150 |
+
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
|
151 |
+
&\hspace{5mm} \tilde{\gamma} \leftarrow \gamma / (1 +(t-1) \eta) \\
|
152 |
+
&\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\
|
153 |
+
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
|
154 |
+
&\hspace{5mm}state\_sum_t \leftarrow state\_sum_{t-1} + g^2_t \\
|
155 |
+
&\hspace{5mm}\theta_t \leftarrow
|
156 |
+
\theta_{t-1}- \tilde{\gamma} \frac{g_t}{\sqrt{state\_sum_t}+\epsilon} \\
|
157 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
158 |
+
&\bf{return} \: \theta_t \\[-1.ex]
|
159 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
160 |
+
\end{aligned}
|
161 |
+
|
162 |
+
For further details regarding the algorithm we refer to `Adaptive Subgradient Methods for Online Learning
|
163 |
+
and Stochastic Optimization`_.
|
164 |
+
""" + fr"""
|
165 |
+
Args:
|
166 |
+
params (iterable): iterable of parameters to optimize or dicts defining
|
167 |
+
parameter groups
|
168 |
+
lr (float, optional): learning rate (default: 1e-2)
|
169 |
+
lr_decay (float, optional): learning rate decay (default: 0)
|
170 |
+
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
171 |
+
eps (float, optional): term added to the denominator to improve
|
172 |
+
numerical stability (default: 1e-10)
|
173 |
+
{_foreach_doc}
|
174 |
+
{_maximize_doc}
|
175 |
+
{_differentiable_doc}
|
176 |
+
|
177 |
+
.. _Adaptive Subgradient Methods for Online Learning and Stochastic
|
178 |
+
Optimization: http://jmlr.org/papers/v12/duchi11a.html
|
179 |
+
|
180 |
+
"""
|
181 |
+
|
182 |
+
|
183 |
+
def adagrad(
|
184 |
+
params: List[Tensor],
|
185 |
+
grads: List[Tensor],
|
186 |
+
state_sums: List[Tensor],
|
187 |
+
state_steps: List[Tensor],
|
188 |
+
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
|
189 |
+
# setting these as kwargs for now as functional API is compiled by torch/distributed/optim
|
190 |
+
has_sparse_grad: bool = None,
|
191 |
+
foreach: Optional[bool] = None,
|
192 |
+
differentiable: bool = False,
|
193 |
+
has_complex: bool = False,
|
194 |
+
*,
|
195 |
+
lr: float,
|
196 |
+
weight_decay: float,
|
197 |
+
lr_decay: float,
|
198 |
+
eps: float,
|
199 |
+
maximize: bool,
|
200 |
+
):
|
201 |
+
r"""Functional API that performs Adagrad algorithm computation.
|
202 |
+
|
203 |
+
See :class:`~torch.optim.Adagrad` for details.
|
204 |
+
"""
|
205 |
+
if not all(isinstance(t, torch.Tensor) for t in state_steps):
|
206 |
+
raise RuntimeError(
|
207 |
+
"API has changed, `state_steps` argument must contain a list of singleton tensors"
|
208 |
+
)
|
209 |
+
|
210 |
+
if foreach is None:
|
211 |
+
_, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
|
212 |
+
|
213 |
+
if foreach and torch.jit.is_scripting():
|
214 |
+
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
|
215 |
+
|
216 |
+
if foreach and not torch.jit.is_scripting():
|
217 |
+
func = _multi_tensor_adagrad
|
218 |
+
else:
|
219 |
+
func = _single_tensor_adagrad
|
220 |
+
|
221 |
+
func(
|
222 |
+
params,
|
223 |
+
grads,
|
224 |
+
state_sums,
|
225 |
+
state_steps,
|
226 |
+
lr=lr,
|
227 |
+
weight_decay=weight_decay,
|
228 |
+
lr_decay=lr_decay,
|
229 |
+
eps=eps,
|
230 |
+
has_sparse_grad=has_sparse_grad,
|
231 |
+
maximize=maximize,
|
232 |
+
differentiable=differentiable,
|
233 |
+
has_complex=has_complex,
|
234 |
+
)
|
235 |
+
|
236 |
+
|
237 |
+
def _make_sparse(grad, grad_indices, values):
|
238 |
+
size = grad.size()
|
239 |
+
if grad_indices.numel() == 0 or values.numel() == 0:
|
240 |
+
return torch.empty_like(grad)
|
241 |
+
return torch.sparse_coo_tensor(grad_indices, values, size)
|
242 |
+
|
243 |
+
|
244 |
+
def _single_tensor_adagrad(
|
245 |
+
params: List[Tensor],
|
246 |
+
grads: List[Tensor],
|
247 |
+
state_sums: List[Tensor],
|
248 |
+
state_steps: List[Tensor],
|
249 |
+
*,
|
250 |
+
lr: float,
|
251 |
+
weight_decay: float,
|
252 |
+
lr_decay: float,
|
253 |
+
eps: float,
|
254 |
+
has_sparse_grad: bool,
|
255 |
+
maximize: bool,
|
256 |
+
differentiable: bool,
|
257 |
+
has_complex: bool,
|
258 |
+
):
|
259 |
+
|
260 |
+
for (param, grad, state_sum, step_t) in zip(params, grads, state_sums, state_steps):
|
261 |
+
# update step
|
262 |
+
step_t += 1
|
263 |
+
step = _get_value(step_t)
|
264 |
+
grad = grad if not maximize else -grad
|
265 |
+
|
266 |
+
if weight_decay != 0:
|
267 |
+
if grad.is_sparse:
|
268 |
+
raise RuntimeError(
|
269 |
+
"weight_decay option is not compatible with sparse gradients"
|
270 |
+
)
|
271 |
+
grad = grad.add(param, alpha=weight_decay)
|
272 |
+
|
273 |
+
clr = lr / (1 + (step - 1) * lr_decay)
|
274 |
+
|
275 |
+
if grad.is_sparse:
|
276 |
+
grad = grad.coalesce() # the update is non-linear so indices must be unique
|
277 |
+
grad_indices = grad._indices()
|
278 |
+
grad_values = grad._values()
|
279 |
+
|
280 |
+
state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2)))
|
281 |
+
std = state_sum.sparse_mask(grad)
|
282 |
+
std_values = std._values().sqrt_().add_(eps)
|
283 |
+
param.add_(
|
284 |
+
_make_sparse(grad, grad_indices, grad_values / std_values), alpha=-clr
|
285 |
+
)
|
286 |
+
else:
|
287 |
+
is_complex = torch.is_complex(param)
|
288 |
+
if is_complex:
|
289 |
+
grad = torch.view_as_real(grad)
|
290 |
+
state_sum = torch.view_as_real(state_sum)
|
291 |
+
param = torch.view_as_real(param)
|
292 |
+
state_sum.addcmul_(grad, grad, value=1)
|
293 |
+
if differentiable:
|
294 |
+
std = state_sum.sqrt() + eps
|
295 |
+
else:
|
296 |
+
std = state_sum.sqrt().add_(eps)
|
297 |
+
param.addcdiv_(grad, std, value=-clr)
|
298 |
+
if is_complex:
|
299 |
+
param = torch.view_as_complex(param)
|
300 |
+
state_sum = torch.view_as_complex(state_sum)
|
301 |
+
|
302 |
+
|
303 |
+
def _multi_tensor_adagrad(
|
304 |
+
params: List[Tensor],
|
305 |
+
grads: List[Tensor],
|
306 |
+
state_sums: List[Tensor],
|
307 |
+
state_steps: List[Tensor],
|
308 |
+
*,
|
309 |
+
lr: float,
|
310 |
+
weight_decay: float,
|
311 |
+
lr_decay: float,
|
312 |
+
eps: float,
|
313 |
+
has_sparse_grad: bool,
|
314 |
+
maximize: bool,
|
315 |
+
differentiable: bool,
|
316 |
+
has_complex: bool,
|
317 |
+
):
|
318 |
+
|
319 |
+
assert not differentiable, "_foreach ops don't support autograd"
|
320 |
+
|
321 |
+
# Foreach functions will throw errors if given empty lists
|
322 |
+
if len(params) == 0:
|
323 |
+
return
|
324 |
+
|
325 |
+
grouped_tensorlists = Optimizer._group_tensors_by_device_and_dtype([params, grads, state_sums, state_steps])
|
326 |
+
for ((device_params, device_grads, device_state_sums, device_state_steps), _) in grouped_tensorlists.values():
|
327 |
+
device_has_sparse_grad = has_sparse_grad and any(grad.is_sparse for grad in device_grads)
|
328 |
+
|
329 |
+
if device_has_sparse_grad:
|
330 |
+
_single_tensor_adagrad(
|
331 |
+
device_params,
|
332 |
+
device_grads,
|
333 |
+
device_state_sums,
|
334 |
+
device_state_steps,
|
335 |
+
lr=lr,
|
336 |
+
weight_decay=weight_decay,
|
337 |
+
lr_decay=lr_decay,
|
338 |
+
eps=eps,
|
339 |
+
has_sparse_grad=True,
|
340 |
+
maximize=False,
|
341 |
+
differentiable=differentiable,
|
342 |
+
has_complex=has_complex,
|
343 |
+
)
|
344 |
+
continue
|
345 |
+
|
346 |
+
if maximize:
|
347 |
+
device_grads = torch._foreach_neg(device_grads)
|
348 |
+
|
349 |
+
# Handle complex parameters
|
350 |
+
if has_complex:
|
351 |
+
_view_as_real(device_params, device_grads, device_state_sums)
|
352 |
+
|
353 |
+
# Update steps
|
354 |
+
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
|
355 |
+
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
|
356 |
+
# wrapped it once now. The alpha is required to assure we go to the right overload.
|
357 |
+
if device_state_steps[0].is_cpu:
|
358 |
+
torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
|
359 |
+
else:
|
360 |
+
torch._foreach_add_(device_state_steps, 1)
|
361 |
+
|
362 |
+
if weight_decay != 0:
|
363 |
+
# Re-use the intermediate memory (device_grads) already allocated for maximize
|
364 |
+
if maximize:
|
365 |
+
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
|
366 |
+
else:
|
367 |
+
device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay)
|
368 |
+
|
369 |
+
minus_clr = [-lr / (1 + (_get_value(step) - 1) * lr_decay) for step in device_state_steps]
|
370 |
+
|
371 |
+
torch._foreach_addcmul_(device_state_sums, device_grads, device_grads, value=1)
|
372 |
+
|
373 |
+
std = torch._foreach_sqrt(device_state_sums)
|
374 |
+
torch._foreach_add_(std, eps)
|
375 |
+
|
376 |
+
if weight_decay != 0 or maximize:
|
377 |
+
# Again, re-use the intermediate memory (device_grads) already allocated
|
378 |
+
torch._foreach_mul_(device_grads, minus_clr)
|
379 |
+
numerator = device_grads
|
380 |
+
else:
|
381 |
+
numerator = torch._foreach_mul(device_grads, minus_clr)
|
382 |
+
|
383 |
+
torch._foreach_addcdiv_(device_params, numerator, std)
|
env-llmeval/lib/python3.10/site-packages/torch/optim/adagrad.pyi
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .optimizer import Optimizer, ParamsT
|
2 |
+
|
3 |
+
class Adagrad(Optimizer):
|
4 |
+
def __init__(
|
5 |
+
self,
|
6 |
+
params: ParamsT,
|
7 |
+
lr: float = ...,
|
8 |
+
lr_decay: float = ...,
|
9 |
+
weight_decay: float = ...,
|
10 |
+
initial_accumulator_value: float = ...,
|
11 |
+
eps: float = ...,
|
12 |
+
) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/optim/adam.py
ADDED
@@ -0,0 +1,658 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional, Union, Tuple
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch import Tensor
|
5 |
+
from .optimizer import (Optimizer, ParamsT, _use_grad_for_differentiable, _get_value,
|
6 |
+
_stack_if_compiling, _dispatch_sqrt, _default_to_fused_or_foreach,
|
7 |
+
_capturable_doc, _differentiable_doc, _foreach_doc, _fused_doc,
|
8 |
+
_maximize_doc, _view_as_real)
|
9 |
+
from torch.utils._foreach_utils import _get_fused_kernels_supported_devices
|
10 |
+
|
11 |
+
__all__ = ['Adam', 'adam']
|
12 |
+
|
13 |
+
|
14 |
+
class Adam(Optimizer):
|
15 |
+
def __init__(self,
|
16 |
+
params: ParamsT,
|
17 |
+
lr: Union[float, Tensor] = 1e-3,
|
18 |
+
betas: Tuple[float, float] = (0.9, 0.999),
|
19 |
+
eps: float = 1e-8,
|
20 |
+
weight_decay: float = 0,
|
21 |
+
amsgrad: bool = False,
|
22 |
+
*,
|
23 |
+
foreach: Optional[bool] = None,
|
24 |
+
maximize: bool = False,
|
25 |
+
capturable: bool = False,
|
26 |
+
differentiable: bool = False,
|
27 |
+
fused: Optional[bool] = None):
|
28 |
+
if not 0.0 <= lr:
|
29 |
+
raise ValueError(f"Invalid learning rate: {lr}")
|
30 |
+
if isinstance(lr, Tensor) and foreach and not capturable:
|
31 |
+
raise ValueError("lr as a Tensor is not supported for capturable=False and foreach=True")
|
32 |
+
if not 0.0 <= eps:
|
33 |
+
raise ValueError(f"Invalid epsilon value: {eps}")
|
34 |
+
if not 0.0 <= betas[0] < 1.0:
|
35 |
+
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
|
36 |
+
if not 0.0 <= betas[1] < 1.0:
|
37 |
+
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
|
38 |
+
if not 0.0 <= weight_decay:
|
39 |
+
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
|
40 |
+
|
41 |
+
defaults = dict(lr=lr, betas=betas, eps=eps,
|
42 |
+
weight_decay=weight_decay, amsgrad=amsgrad,
|
43 |
+
maximize=maximize, foreach=foreach, capturable=capturable,
|
44 |
+
differentiable=differentiable, fused=fused)
|
45 |
+
super().__init__(params, defaults)
|
46 |
+
|
47 |
+
if fused:
|
48 |
+
if differentiable:
|
49 |
+
raise RuntimeError("`fused` does not support `differentiable`")
|
50 |
+
self._step_supports_amp_scaling = True
|
51 |
+
# TODO(crcrpar): [low prec params & their higher prec copy]
|
52 |
+
# Support AMP with FP16/BF16 model params which would need
|
53 |
+
# higher prec copy of params to do update math in higher prec to
|
54 |
+
# alleviate the loss of information.
|
55 |
+
fused_supported_devices = _get_fused_kernels_supported_devices()
|
56 |
+
if not all(
|
57 |
+
p.device.type in fused_supported_devices and
|
58 |
+
torch.is_floating_point(p) for pg in self.param_groups for p in pg['params']
|
59 |
+
):
|
60 |
+
raise RuntimeError("`fused=True` requires all the params to be floating point Tensors of "
|
61 |
+
f"supported devices: {fused_supported_devices}.")
|
62 |
+
if foreach:
|
63 |
+
raise RuntimeError("`fused` and `foreach` cannot be `True` together.")
|
64 |
+
|
65 |
+
def __setstate__(self, state):
|
66 |
+
super().__setstate__(state)
|
67 |
+
for group in self.param_groups:
|
68 |
+
group.setdefault('amsgrad', False)
|
69 |
+
group.setdefault('maximize', False)
|
70 |
+
group.setdefault('foreach', None)
|
71 |
+
group.setdefault('capturable', False)
|
72 |
+
group.setdefault('differentiable', False)
|
73 |
+
group.setdefault('fused', None)
|
74 |
+
state_values = list(self.state.values())
|
75 |
+
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step'])
|
76 |
+
if not step_is_tensor:
|
77 |
+
for s in state_values:
|
78 |
+
s['step'] = torch.tensor(float(s['step']), dtype=torch.float32)
|
79 |
+
|
80 |
+
def _init_group(
|
81 |
+
self,
|
82 |
+
group,
|
83 |
+
params_with_grad,
|
84 |
+
grads,
|
85 |
+
exp_avgs,
|
86 |
+
exp_avg_sqs,
|
87 |
+
max_exp_avg_sqs,
|
88 |
+
state_steps
|
89 |
+
):
|
90 |
+
has_complex = False
|
91 |
+
for p in group['params']:
|
92 |
+
if p.grad is not None:
|
93 |
+
has_complex |= torch.is_complex(p)
|
94 |
+
params_with_grad.append(p)
|
95 |
+
if p.grad.is_sparse:
|
96 |
+
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
|
97 |
+
grads.append(p.grad)
|
98 |
+
|
99 |
+
state = self.state[p]
|
100 |
+
# Lazy state initialization
|
101 |
+
if len(state) == 0:
|
102 |
+
# note(crcrpar): [special device hosting for step]
|
103 |
+
# Deliberately host `step` on CPU if both capturable and fused are off.
|
104 |
+
# This is because kernel launches are costly on CUDA and XLA.
|
105 |
+
state['step'] = (
|
106 |
+
torch.zeros((), dtype=torch.float32, device=p.device)
|
107 |
+
if group['capturable'] or group['fused']
|
108 |
+
else torch.tensor(0.0, dtype=torch.float32)
|
109 |
+
)
|
110 |
+
# Exponential moving average of gradient values
|
111 |
+
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
112 |
+
# Exponential moving average of squared gradient values
|
113 |
+
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
114 |
+
if group['amsgrad']:
|
115 |
+
# Maintains max of all exp. moving avg. of sq. grad. values
|
116 |
+
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
117 |
+
|
118 |
+
exp_avgs.append(state['exp_avg'])
|
119 |
+
exp_avg_sqs.append(state['exp_avg_sq'])
|
120 |
+
|
121 |
+
if group['amsgrad']:
|
122 |
+
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
|
123 |
+
if group['differentiable'] and state['step'].requires_grad:
|
124 |
+
raise RuntimeError('`requires_grad` is not supported for `step` in differentiable mode')
|
125 |
+
|
126 |
+
# Foreach without capturable does not support a tensor lr
|
127 |
+
if group['foreach'] and torch.is_tensor(group['lr']) and not group['capturable']:
|
128 |
+
raise RuntimeError('lr as a Tensor is not supported for capturable=False and foreach=True')
|
129 |
+
|
130 |
+
state_steps.append(state['step'])
|
131 |
+
return has_complex
|
132 |
+
|
133 |
+
@_use_grad_for_differentiable
|
134 |
+
def step(self, closure=None):
|
135 |
+
"""Perform a single optimization step.
|
136 |
+
|
137 |
+
Args:
|
138 |
+
closure (Callable, optional): A closure that reevaluates the model
|
139 |
+
and returns the loss.
|
140 |
+
"""
|
141 |
+
self._cuda_graph_capture_health_check()
|
142 |
+
|
143 |
+
loss = None
|
144 |
+
if closure is not None:
|
145 |
+
with torch.enable_grad():
|
146 |
+
loss = closure()
|
147 |
+
|
148 |
+
for group in self.param_groups:
|
149 |
+
params_with_grad = []
|
150 |
+
grads = []
|
151 |
+
exp_avgs = []
|
152 |
+
exp_avg_sqs = []
|
153 |
+
max_exp_avg_sqs = []
|
154 |
+
state_steps = []
|
155 |
+
beta1, beta2 = group['betas']
|
156 |
+
|
157 |
+
has_complex = self._init_group(
|
158 |
+
group,
|
159 |
+
params_with_grad,
|
160 |
+
grads,
|
161 |
+
exp_avgs,
|
162 |
+
exp_avg_sqs,
|
163 |
+
max_exp_avg_sqs,
|
164 |
+
state_steps)
|
165 |
+
|
166 |
+
adam(
|
167 |
+
params_with_grad,
|
168 |
+
grads,
|
169 |
+
exp_avgs,
|
170 |
+
exp_avg_sqs,
|
171 |
+
max_exp_avg_sqs,
|
172 |
+
state_steps,
|
173 |
+
amsgrad=group['amsgrad'],
|
174 |
+
has_complex=has_complex,
|
175 |
+
beta1=beta1,
|
176 |
+
beta2=beta2,
|
177 |
+
lr=group['lr'],
|
178 |
+
weight_decay=group['weight_decay'],
|
179 |
+
eps=group['eps'],
|
180 |
+
maximize=group['maximize'],
|
181 |
+
foreach=group['foreach'],
|
182 |
+
capturable=group['capturable'],
|
183 |
+
differentiable=group['differentiable'],
|
184 |
+
fused=group['fused'],
|
185 |
+
grad_scale=getattr(self, "grad_scale", None),
|
186 |
+
found_inf=getattr(self, "found_inf", None),
|
187 |
+
)
|
188 |
+
|
189 |
+
return loss
|
190 |
+
|
191 |
+
|
192 |
+
Adam.__doc__ = r"""Implements Adam algorithm.
|
193 |
+
|
194 |
+
.. math::
|
195 |
+
\begin{aligned}
|
196 |
+
&\rule{110mm}{0.4pt} \\
|
197 |
+
&\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2
|
198 |
+
\text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)} \\
|
199 |
+
&\hspace{13mm} \lambda \text{ (weight decay)}, \: \textit{amsgrad},
|
200 |
+
\:\textit{maximize} \\
|
201 |
+
&\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)},
|
202 |
+
v_0\leftarrow 0 \text{ (second moment)},\: \widehat{v_0}^{max}\leftarrow 0\\[-1.ex]
|
203 |
+
&\rule{110mm}{0.4pt} \\
|
204 |
+
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
|
205 |
+
|
206 |
+
&\hspace{5mm}\textbf{if} \: \textit{maximize}: \\
|
207 |
+
&\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
|
208 |
+
&\hspace{5mm}\textbf{else} \\
|
209 |
+
&\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
|
210 |
+
&\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\
|
211 |
+
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
|
212 |
+
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
|
213 |
+
&\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
|
214 |
+
&\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
|
215 |
+
&\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\
|
216 |
+
&\hspace{5mm}\textbf{if} \: amsgrad \\
|
217 |
+
&\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
|
218 |
+
\widehat{v_t}) \\
|
219 |
+
&\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
|
220 |
+
\big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\
|
221 |
+
&\hspace{5mm}\textbf{else} \\
|
222 |
+
&\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
|
223 |
+
\big(\sqrt{\widehat{v_t}} + \epsilon \big) \\
|
224 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
225 |
+
&\bf{return} \: \theta_t \\[-1.ex]
|
226 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
227 |
+
\end{aligned}
|
228 |
+
|
229 |
+
For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
|
230 |
+
""" + fr"""
|
231 |
+
Args:
|
232 |
+
params (iterable): iterable of parameters to optimize or dicts defining
|
233 |
+
parameter groups
|
234 |
+
lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
|
235 |
+
is not yet supported for all our implementations. Please use a float
|
236 |
+
LR if you are not also specifying fused=True or capturable=True.
|
237 |
+
betas (Tuple[float, float], optional): coefficients used for computing
|
238 |
+
running averages of gradient and its square (default: (0.9, 0.999))
|
239 |
+
eps (float, optional): term added to the denominator to improve
|
240 |
+
numerical stability (default: 1e-8)
|
241 |
+
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
242 |
+
amsgrad (bool, optional): whether to use the AMSGrad variant of this
|
243 |
+
algorithm from the paper `On the Convergence of Adam and Beyond`_
|
244 |
+
(default: False)
|
245 |
+
{_foreach_doc}
|
246 |
+
{_maximize_doc}
|
247 |
+
{_capturable_doc}
|
248 |
+
{_differentiable_doc}
|
249 |
+
{_fused_doc}
|
250 |
+
.. _Adam\: A Method for Stochastic Optimization:
|
251 |
+
https://arxiv.org/abs/1412.6980
|
252 |
+
.. _On the Convergence of Adam and Beyond:
|
253 |
+
https://openreview.net/forum?id=ryQu7f-RZ
|
254 |
+
|
255 |
+
"""
|
256 |
+
|
257 |
+
|
258 |
+
def adam(params: List[Tensor],
|
259 |
+
grads: List[Tensor],
|
260 |
+
exp_avgs: List[Tensor],
|
261 |
+
exp_avg_sqs: List[Tensor],
|
262 |
+
max_exp_avg_sqs: List[Tensor],
|
263 |
+
state_steps: List[Tensor],
|
264 |
+
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
|
265 |
+
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
|
266 |
+
foreach: Optional[bool] = None,
|
267 |
+
capturable: bool = False,
|
268 |
+
differentiable: bool = False,
|
269 |
+
fused: Optional[bool] = None,
|
270 |
+
grad_scale: Optional[Tensor] = None,
|
271 |
+
found_inf: Optional[Tensor] = None,
|
272 |
+
has_complex: bool = False,
|
273 |
+
*,
|
274 |
+
amsgrad: bool,
|
275 |
+
beta1: float,
|
276 |
+
beta2: float,
|
277 |
+
lr: Union[float, Tensor],
|
278 |
+
weight_decay: float,
|
279 |
+
eps: float,
|
280 |
+
maximize: bool):
|
281 |
+
r"""Functional API that performs Adam algorithm computation.
|
282 |
+
|
283 |
+
See :class:`~torch.optim.Adam` for details.
|
284 |
+
"""
|
285 |
+
# Respect when the user inputs False/True for foreach or fused. We only want to change
|
286 |
+
# the default when neither have been user-specified. Note that we default to foreach
|
287 |
+
# and pass False to use_fused. This is not a mistake--we want to give the fused impl
|
288 |
+
# bake-in time before making it the default, even if it is typically faster.
|
289 |
+
if fused is None and foreach is None:
|
290 |
+
_, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
|
291 |
+
# Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False.
|
292 |
+
if foreach and isinstance(lr, Tensor) and not capturable:
|
293 |
+
foreach = False
|
294 |
+
if fused is None:
|
295 |
+
fused = False
|
296 |
+
if foreach is None:
|
297 |
+
foreach = False
|
298 |
+
|
299 |
+
# this check is slow during compilation, so we skip it
|
300 |
+
# if it's strictly needed we can add this check back in dynamo
|
301 |
+
if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps):
|
302 |
+
raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
|
303 |
+
|
304 |
+
if foreach and torch.jit.is_scripting():
|
305 |
+
raise RuntimeError('torch.jit.script not supported with foreach optimizers')
|
306 |
+
if fused and torch.jit.is_scripting():
|
307 |
+
raise RuntimeError("torch.jit.script not supported with fused optimizers")
|
308 |
+
|
309 |
+
if fused and not torch.jit.is_scripting():
|
310 |
+
func = _fused_adam
|
311 |
+
elif foreach and not torch.jit.is_scripting():
|
312 |
+
func = _multi_tensor_adam
|
313 |
+
else:
|
314 |
+
func = _single_tensor_adam
|
315 |
+
|
316 |
+
func(params,
|
317 |
+
grads,
|
318 |
+
exp_avgs,
|
319 |
+
exp_avg_sqs,
|
320 |
+
max_exp_avg_sqs,
|
321 |
+
state_steps,
|
322 |
+
amsgrad=amsgrad,
|
323 |
+
has_complex=has_complex,
|
324 |
+
beta1=beta1,
|
325 |
+
beta2=beta2,
|
326 |
+
lr=lr,
|
327 |
+
weight_decay=weight_decay,
|
328 |
+
eps=eps,
|
329 |
+
maximize=maximize,
|
330 |
+
capturable=capturable,
|
331 |
+
differentiable=differentiable,
|
332 |
+
grad_scale=grad_scale,
|
333 |
+
found_inf=found_inf)
|
334 |
+
|
335 |
+
|
336 |
+
def _single_tensor_adam(params: List[Tensor],
|
337 |
+
grads: List[Tensor],
|
338 |
+
exp_avgs: List[Tensor],
|
339 |
+
exp_avg_sqs: List[Tensor],
|
340 |
+
max_exp_avg_sqs: List[Tensor],
|
341 |
+
state_steps: List[Tensor],
|
342 |
+
grad_scale: Optional[Tensor],
|
343 |
+
found_inf: Optional[Tensor],
|
344 |
+
*,
|
345 |
+
amsgrad: bool,
|
346 |
+
has_complex: bool,
|
347 |
+
beta1: float,
|
348 |
+
beta2: float,
|
349 |
+
lr: Union[float, Tensor],
|
350 |
+
weight_decay: float,
|
351 |
+
eps: float,
|
352 |
+
maximize: bool,
|
353 |
+
capturable: bool,
|
354 |
+
differentiable: bool):
|
355 |
+
|
356 |
+
assert grad_scale is None and found_inf is None
|
357 |
+
|
358 |
+
if torch.jit.is_scripting():
|
359 |
+
# this assert is due to JIT being dumb and not realizing that the ops below
|
360 |
+
# have overloads to handle both float and Tensor lrs, so we just assert it's
|
361 |
+
# a float since most people using JIT are using floats
|
362 |
+
assert isinstance(lr, float)
|
363 |
+
|
364 |
+
for i, param in enumerate(params):
|
365 |
+
grad = grads[i] if not maximize else -grads[i]
|
366 |
+
exp_avg = exp_avgs[i]
|
367 |
+
exp_avg_sq = exp_avg_sqs[i]
|
368 |
+
step_t = state_steps[i]
|
369 |
+
|
370 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
371 |
+
if not torch._utils.is_compiling() and capturable:
|
372 |
+
assert (
|
373 |
+
(param.is_cuda and step_t.is_cuda) or (param.is_xla and step_t.is_xla)
|
374 |
+
), "If capturable=True, params and state_steps must be CUDA or XLA tensors."
|
375 |
+
|
376 |
+
# update step
|
377 |
+
step_t += 1
|
378 |
+
|
379 |
+
if weight_decay != 0:
|
380 |
+
grad = grad.add(param, alpha=weight_decay)
|
381 |
+
|
382 |
+
if torch.is_complex(param):
|
383 |
+
grad = torch.view_as_real(grad)
|
384 |
+
exp_avg = torch.view_as_real(exp_avg)
|
385 |
+
exp_avg_sq = torch.view_as_real(exp_avg_sq)
|
386 |
+
if amsgrad:
|
387 |
+
max_exp_avg_sqs[i] = torch.view_as_real(max_exp_avg_sqs[i])
|
388 |
+
param = torch.view_as_real(param)
|
389 |
+
|
390 |
+
# Decay the first and second moment running average coefficient
|
391 |
+
exp_avg.lerp_(grad, 1 - beta1)
|
392 |
+
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
|
393 |
+
|
394 |
+
if capturable or differentiable:
|
395 |
+
step = step_t
|
396 |
+
|
397 |
+
bias_correction1 = 1 - beta1 ** step
|
398 |
+
bias_correction2 = 1 - beta2 ** step
|
399 |
+
|
400 |
+
step_size = lr / bias_correction1
|
401 |
+
step_size_neg = step_size.neg()
|
402 |
+
|
403 |
+
bias_correction2_sqrt = bias_correction2.sqrt()
|
404 |
+
|
405 |
+
if amsgrad:
|
406 |
+
# Maintains the maximum of all 2nd moment running avg. till now
|
407 |
+
if differentiable:
|
408 |
+
max_exp_avg_sq = max_exp_avg_sqs[i].clone()
|
409 |
+
else:
|
410 |
+
max_exp_avg_sq = max_exp_avg_sqs[i]
|
411 |
+
|
412 |
+
max_exp_avg_sqs[i].copy_(torch.maximum(max_exp_avg_sq, exp_avg_sq))
|
413 |
+
|
414 |
+
# Uses the max. for normalizing running avg. of gradient
|
415 |
+
# Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write
|
416 |
+
# (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor)
|
417 |
+
denom = (max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg)
|
418 |
+
else:
|
419 |
+
denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg)
|
420 |
+
|
421 |
+
param.addcdiv_(exp_avg, denom)
|
422 |
+
else:
|
423 |
+
step = _get_value(step_t)
|
424 |
+
|
425 |
+
bias_correction1 = 1 - beta1 ** step
|
426 |
+
bias_correction2 = 1 - beta2 ** step
|
427 |
+
|
428 |
+
step_size = lr / bias_correction1
|
429 |
+
|
430 |
+
bias_correction2_sqrt = _dispatch_sqrt(bias_correction2)
|
431 |
+
|
432 |
+
if amsgrad:
|
433 |
+
# Maintains the maximum of all 2nd moment running avg. till now
|
434 |
+
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
|
435 |
+
|
436 |
+
# Use the max. for normalizing running avg. of gradient
|
437 |
+
denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps)
|
438 |
+
else:
|
439 |
+
denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps)
|
440 |
+
|
441 |
+
param.addcdiv_(exp_avg, denom, value=-step_size)
|
442 |
+
|
443 |
+
# Lastly, switch back to complex view
|
444 |
+
if amsgrad and torch.is_complex(params[i]):
|
445 |
+
max_exp_avg_sqs[i] = torch.view_as_complex(max_exp_avg_sqs[i])
|
446 |
+
|
447 |
+
|
448 |
+
def _multi_tensor_adam(params: List[Tensor],
|
449 |
+
grads: List[Tensor],
|
450 |
+
exp_avgs: List[Tensor],
|
451 |
+
exp_avg_sqs: List[Tensor],
|
452 |
+
max_exp_avg_sqs: List[Tensor],
|
453 |
+
state_steps: List[Tensor],
|
454 |
+
grad_scale: Optional[Tensor],
|
455 |
+
found_inf: Optional[Tensor],
|
456 |
+
*,
|
457 |
+
amsgrad: bool,
|
458 |
+
has_complex: bool,
|
459 |
+
beta1: float,
|
460 |
+
beta2: float,
|
461 |
+
lr: Union[float, Tensor],
|
462 |
+
weight_decay: float,
|
463 |
+
eps: float,
|
464 |
+
maximize: bool,
|
465 |
+
capturable: bool,
|
466 |
+
differentiable: bool):
|
467 |
+
if len(params) == 0:
|
468 |
+
return
|
469 |
+
|
470 |
+
if isinstance(lr, Tensor) and not capturable:
|
471 |
+
raise RuntimeError("lr as a Tensor is not supported for capturable=False and foreach=True")
|
472 |
+
|
473 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
474 |
+
if not torch._utils.is_compiling() and capturable:
|
475 |
+
assert all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)), \
|
476 |
+
"If capturable=True, params and state_steps must be CUDA tensors."
|
477 |
+
|
478 |
+
assert grad_scale is None and found_inf is None
|
479 |
+
|
480 |
+
assert not differentiable, "_foreach ops don't support autograd"
|
481 |
+
|
482 |
+
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
|
483 |
+
[params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps])
|
484 |
+
for ((
|
485 |
+
device_params,
|
486 |
+
device_grads,
|
487 |
+
device_exp_avgs,
|
488 |
+
device_exp_avg_sqs,
|
489 |
+
device_max_exp_avg_sqs,
|
490 |
+
device_state_steps,
|
491 |
+
), _) in grouped_tensors.values():
|
492 |
+
|
493 |
+
if maximize:
|
494 |
+
device_grads = torch._foreach_neg(device_grads)
|
495 |
+
|
496 |
+
# Handle complex parameters
|
497 |
+
if has_complex:
|
498 |
+
if amsgrad:
|
499 |
+
_view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs, device_max_exp_avg_sqs)
|
500 |
+
else:
|
501 |
+
_view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs)
|
502 |
+
|
503 |
+
# Update steps
|
504 |
+
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
|
505 |
+
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
|
506 |
+
# wrapped it once now. The alpha is required to assure we go to the right overload.
|
507 |
+
if device_state_steps[0].is_cpu:
|
508 |
+
torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
|
509 |
+
else:
|
510 |
+
torch._foreach_add_(device_state_steps, 1)
|
511 |
+
|
512 |
+
if weight_decay != 0:
|
513 |
+
# Re-use the intermediate memory (device_grads) already allocated for maximize
|
514 |
+
if maximize:
|
515 |
+
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
|
516 |
+
else:
|
517 |
+
device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay)
|
518 |
+
|
519 |
+
# Decay the first and second moment running average coefficient
|
520 |
+
torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1)
|
521 |
+
|
522 |
+
torch._foreach_mul_(device_exp_avg_sqs, beta2)
|
523 |
+
torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads, 1 - beta2)
|
524 |
+
|
525 |
+
# Delete the local intermediate since it won't be used anymore to save on peak memory
|
526 |
+
del device_grads
|
527 |
+
|
528 |
+
if capturable:
|
529 |
+
bias_correction1 = torch._foreach_pow(beta1, device_state_steps)
|
530 |
+
bias_correction2 = torch._foreach_pow(beta2, device_state_steps)
|
531 |
+
# foreach_sub doesn't allow a scalar as the first arg
|
532 |
+
torch._foreach_sub_(bias_correction1, 1)
|
533 |
+
torch._foreach_sub_(bias_correction2, 1)
|
534 |
+
# we do not negate bias_correction1 as it'll need to be negated later anyway
|
535 |
+
torch._foreach_neg_(bias_correction2)
|
536 |
+
|
537 |
+
# foreach_div doesn't allow a scalar as the first arg
|
538 |
+
torch._foreach_div_(bias_correction1, lr)
|
539 |
+
torch._foreach_reciprocal_(bias_correction1)
|
540 |
+
|
541 |
+
torch._foreach_sqrt_(bias_correction2)
|
542 |
+
|
543 |
+
# Re-assign for clarity as we maintain minimal intermediates: we'll have
|
544 |
+
# step_size = - lr / (1 - beta1 ^ t) where t = num_steps
|
545 |
+
# bias_correction2_sqrt = sqrt(1 - beta2 ^ t)
|
546 |
+
step_size = bias_correction1
|
547 |
+
bias_correction2_sqrt = bias_correction2
|
548 |
+
|
549 |
+
if amsgrad:
|
550 |
+
# Maintains the maximum of all 2nd moment running avg. till now
|
551 |
+
torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) # type: ignore[assignment]
|
552 |
+
|
553 |
+
# Set intermediate to the max. for normalizing running avg. of gradient when amsgrad
|
554 |
+
exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs)
|
555 |
+
else:
|
556 |
+
exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs)
|
557 |
+
|
558 |
+
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt)
|
559 |
+
torch._foreach_add_(exp_avg_sq_sqrt, eps)
|
560 |
+
torch._foreach_div_(exp_avg_sq_sqrt, step_size)
|
561 |
+
|
562 |
+
# at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr
|
563 |
+
torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt)
|
564 |
+
else:
|
565 |
+
bias_correction1 = [1 - beta1 ** _get_value(step) for step in device_state_steps]
|
566 |
+
bias_correction2 = [1 - beta2 ** _get_value(step) for step in device_state_steps]
|
567 |
+
|
568 |
+
step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1])
|
569 |
+
|
570 |
+
bias_correction2_sqrt = [_dispatch_sqrt(bc) for bc in bias_correction2]
|
571 |
+
|
572 |
+
if amsgrad:
|
573 |
+
# Maintains the maximum of all 2nd moment running avg. till now
|
574 |
+
torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs)
|
575 |
+
|
576 |
+
# Use the max. for normalizing running avg. of gradient
|
577 |
+
exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs)
|
578 |
+
else:
|
579 |
+
exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs)
|
580 |
+
|
581 |
+
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt)
|
582 |
+
torch._foreach_add_(exp_avg_sq_sqrt, eps)
|
583 |
+
torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size)
|
584 |
+
|
585 |
+
|
586 |
+
def _fused_adam(
|
587 |
+
params: List[Tensor],
|
588 |
+
grads: List[Tensor],
|
589 |
+
exp_avgs: List[Tensor],
|
590 |
+
exp_avg_sqs: List[Tensor],
|
591 |
+
max_exp_avg_sqs: List[Tensor],
|
592 |
+
state_steps: List[Tensor],
|
593 |
+
grad_scale: Optional[Tensor],
|
594 |
+
found_inf: Optional[Tensor],
|
595 |
+
*,
|
596 |
+
amsgrad: bool,
|
597 |
+
has_complex: bool, # Needed for consistency.
|
598 |
+
beta1: float,
|
599 |
+
beta2: float,
|
600 |
+
lr: Union[float, Tensor],
|
601 |
+
weight_decay: float,
|
602 |
+
eps: float,
|
603 |
+
maximize: bool,
|
604 |
+
capturable: bool, # Needed for consistency.
|
605 |
+
differentiable: bool,
|
606 |
+
) -> None:
|
607 |
+
if not params:
|
608 |
+
return
|
609 |
+
if differentiable:
|
610 |
+
raise RuntimeError("Adam with fused=True does not support differentiable=True")
|
611 |
+
|
612 |
+
grad_scale_dict = {grad_scale.device: grad_scale} if grad_scale is not None else None
|
613 |
+
found_inf_dict = {found_inf.device: found_inf} if found_inf is not None else None
|
614 |
+
|
615 |
+
# We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer
|
616 |
+
# treating it as a scalar.
|
617 |
+
lr_dict = {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None
|
618 |
+
|
619 |
+
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
|
620 |
+
[params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps])
|
621 |
+
for (device, _), ((device_params,
|
622 |
+
device_grads,
|
623 |
+
device_exp_avgs,
|
624 |
+
device_exp_avg_sqs,
|
625 |
+
device_max_exp_avg_sqs,
|
626 |
+
device_state_steps,), _) in grouped_tensors.items():
|
627 |
+
device_grad_scale, device_found_inf = None, None
|
628 |
+
if grad_scale is not None:
|
629 |
+
if device not in grad_scale_dict:
|
630 |
+
grad_scale_dict[device] = grad_scale.to(device, non_blocking=True)
|
631 |
+
device_grad_scale = grad_scale_dict[device]
|
632 |
+
if found_inf is not None:
|
633 |
+
if found_inf not in found_inf_dict:
|
634 |
+
found_inf_dict[device] = found_inf.to(device, non_blocking=True)
|
635 |
+
device_found_inf = found_inf_dict[device]
|
636 |
+
if lr_dict is not None and device not in lr_dict:
|
637 |
+
lr_dict[device] = lr.to(device=device, non_blocking=True)
|
638 |
+
lr = lr_dict[device]
|
639 |
+
torch._foreach_add_(device_state_steps, 1)
|
640 |
+
torch._fused_adam_(
|
641 |
+
device_params,
|
642 |
+
device_grads,
|
643 |
+
device_exp_avgs,
|
644 |
+
device_exp_avg_sqs,
|
645 |
+
device_max_exp_avg_sqs,
|
646 |
+
device_state_steps,
|
647 |
+
amsgrad=amsgrad,
|
648 |
+
lr=lr,
|
649 |
+
beta1=beta1,
|
650 |
+
beta2=beta2,
|
651 |
+
weight_decay=weight_decay,
|
652 |
+
eps=eps,
|
653 |
+
maximize=maximize,
|
654 |
+
grad_scale=device_grad_scale,
|
655 |
+
found_inf=device_found_inf,
|
656 |
+
)
|
657 |
+
if device_found_inf is not None:
|
658 |
+
torch._foreach_sub_(device_state_steps, [device_found_inf] * len(device_state_steps))
|
env-llmeval/lib/python3.10/site-packages/torch/optim/adam.pyi
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, Tuple, Union
|
2 |
+
|
3 |
+
from torch import Tensor
|
4 |
+
|
5 |
+
from .optimizer import Optimizer, ParamsT
|
6 |
+
|
7 |
+
class Adam(Optimizer):
|
8 |
+
def __init__(
|
9 |
+
self,
|
10 |
+
params: ParamsT,
|
11 |
+
lr: Union[float, Tensor] = 1e-3,
|
12 |
+
betas: Tuple[float, float] = (0.9, 0.999),
|
13 |
+
eps: float = 1e-8,
|
14 |
+
weight_decay: float = 0,
|
15 |
+
amsgrad: bool = False,
|
16 |
+
*,
|
17 |
+
foreach: Optional[bool] = None,
|
18 |
+
maximize: bool = False,
|
19 |
+
capturable: bool = False,
|
20 |
+
differentiable: bool = False,
|
21 |
+
fused: Optional[bool] = None,
|
22 |
+
) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/optim/adamw.pyi
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, Tuple, Union
|
2 |
+
|
3 |
+
from torch import Tensor
|
4 |
+
|
5 |
+
from .optimizer import Optimizer, ParamsT
|
6 |
+
|
7 |
+
class AdamW(Optimizer):
|
8 |
+
def __init__(
|
9 |
+
self,
|
10 |
+
params: ParamsT,
|
11 |
+
lr: Union[float, Tensor] = 1e-3,
|
12 |
+
betas: Tuple[float, float] = (0.9, 0.999),
|
13 |
+
eps: float = 1e-8,
|
14 |
+
weight_decay: float = 1e-2,
|
15 |
+
amsgrad: bool = False,
|
16 |
+
*,
|
17 |
+
maximize: bool = False,
|
18 |
+
foreach: Optional[bool] = None,
|
19 |
+
capturable: bool = False,
|
20 |
+
differentiable: bool = False,
|
21 |
+
fused: Optional[bool] = None,
|
22 |
+
) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/optim/lbfgs.py
ADDED
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from functools import reduce
|
3 |
+
from .optimizer import Optimizer
|
4 |
+
|
5 |
+
__all__ = ['LBFGS']
|
6 |
+
|
7 |
+
def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None):
|
8 |
+
# ported from https://github.com/torch/optim/blob/master/polyinterp.lua
|
9 |
+
# Compute bounds of interpolation area
|
10 |
+
if bounds is not None:
|
11 |
+
xmin_bound, xmax_bound = bounds
|
12 |
+
else:
|
13 |
+
xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1)
|
14 |
+
|
15 |
+
# Code for most common case: cubic interpolation of 2 points
|
16 |
+
# w/ function and derivative values for both
|
17 |
+
# Solution in this case (where x2 is the farthest point):
|
18 |
+
# d1 = g1 + g2 - 3*(f1-f2)/(x1-x2);
|
19 |
+
# d2 = sqrt(d1^2 - g1*g2);
|
20 |
+
# min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2));
|
21 |
+
# t_new = min(max(min_pos,xmin_bound),xmax_bound);
|
22 |
+
d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2)
|
23 |
+
d2_square = d1**2 - g1 * g2
|
24 |
+
if d2_square >= 0:
|
25 |
+
d2 = d2_square.sqrt()
|
26 |
+
if x1 <= x2:
|
27 |
+
min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2))
|
28 |
+
else:
|
29 |
+
min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2))
|
30 |
+
return min(max(min_pos, xmin_bound), xmax_bound)
|
31 |
+
else:
|
32 |
+
return (xmin_bound + xmax_bound) / 2.
|
33 |
+
|
34 |
+
|
35 |
+
def _strong_wolfe(obj_func,
|
36 |
+
x,
|
37 |
+
t,
|
38 |
+
d,
|
39 |
+
f,
|
40 |
+
g,
|
41 |
+
gtd,
|
42 |
+
c1=1e-4,
|
43 |
+
c2=0.9,
|
44 |
+
tolerance_change=1e-9,
|
45 |
+
max_ls=25):
|
46 |
+
# ported from https://github.com/torch/optim/blob/master/lswolfe.lua
|
47 |
+
d_norm = d.abs().max()
|
48 |
+
g = g.clone(memory_format=torch.contiguous_format)
|
49 |
+
# evaluate objective and gradient using initial step
|
50 |
+
f_new, g_new = obj_func(x, t, d)
|
51 |
+
ls_func_evals = 1
|
52 |
+
gtd_new = g_new.dot(d)
|
53 |
+
|
54 |
+
# bracket an interval containing a point satisfying the Wolfe criteria
|
55 |
+
t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd
|
56 |
+
done = False
|
57 |
+
ls_iter = 0
|
58 |
+
while ls_iter < max_ls:
|
59 |
+
# check conditions
|
60 |
+
if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev):
|
61 |
+
bracket = [t_prev, t]
|
62 |
+
bracket_f = [f_prev, f_new]
|
63 |
+
bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)]
|
64 |
+
bracket_gtd = [gtd_prev, gtd_new]
|
65 |
+
break
|
66 |
+
|
67 |
+
if abs(gtd_new) <= -c2 * gtd:
|
68 |
+
bracket = [t]
|
69 |
+
bracket_f = [f_new]
|
70 |
+
bracket_g = [g_new]
|
71 |
+
done = True
|
72 |
+
break
|
73 |
+
|
74 |
+
if gtd_new >= 0:
|
75 |
+
bracket = [t_prev, t]
|
76 |
+
bracket_f = [f_prev, f_new]
|
77 |
+
bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)]
|
78 |
+
bracket_gtd = [gtd_prev, gtd_new]
|
79 |
+
break
|
80 |
+
|
81 |
+
# interpolate
|
82 |
+
min_step = t + 0.01 * (t - t_prev)
|
83 |
+
max_step = t * 10
|
84 |
+
tmp = t
|
85 |
+
t = _cubic_interpolate(
|
86 |
+
t_prev,
|
87 |
+
f_prev,
|
88 |
+
gtd_prev,
|
89 |
+
t,
|
90 |
+
f_new,
|
91 |
+
gtd_new,
|
92 |
+
bounds=(min_step, max_step))
|
93 |
+
|
94 |
+
# next step
|
95 |
+
t_prev = tmp
|
96 |
+
f_prev = f_new
|
97 |
+
g_prev = g_new.clone(memory_format=torch.contiguous_format)
|
98 |
+
gtd_prev = gtd_new
|
99 |
+
f_new, g_new = obj_func(x, t, d)
|
100 |
+
ls_func_evals += 1
|
101 |
+
gtd_new = g_new.dot(d)
|
102 |
+
ls_iter += 1
|
103 |
+
|
104 |
+
# reached max number of iterations?
|
105 |
+
if ls_iter == max_ls:
|
106 |
+
bracket = [0, t]
|
107 |
+
bracket_f = [f, f_new]
|
108 |
+
bracket_g = [g, g_new]
|
109 |
+
|
110 |
+
# zoom phase: we now have a point satisfying the criteria, or
|
111 |
+
# a bracket around it. We refine the bracket until we find the
|
112 |
+
# exact point satisfying the criteria
|
113 |
+
insuf_progress = False
|
114 |
+
# find high and low points in bracket
|
115 |
+
low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0)
|
116 |
+
while not done and ls_iter < max_ls:
|
117 |
+
# line-search bracket is so small
|
118 |
+
if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change:
|
119 |
+
break
|
120 |
+
|
121 |
+
# compute new trial value
|
122 |
+
t = _cubic_interpolate(bracket[0], bracket_f[0], bracket_gtd[0],
|
123 |
+
bracket[1], bracket_f[1], bracket_gtd[1])
|
124 |
+
|
125 |
+
# test that we are making sufficient progress:
|
126 |
+
# in case `t` is so close to boundary, we mark that we are making
|
127 |
+
# insufficient progress, and if
|
128 |
+
# + we have made insufficient progress in the last step, or
|
129 |
+
# + `t` is at one of the boundary,
|
130 |
+
# we will move `t` to a position which is `0.1 * len(bracket)`
|
131 |
+
# away from the nearest boundary point.
|
132 |
+
eps = 0.1 * (max(bracket) - min(bracket))
|
133 |
+
if min(max(bracket) - t, t - min(bracket)) < eps:
|
134 |
+
# interpolation close to boundary
|
135 |
+
if insuf_progress or t >= max(bracket) or t <= min(bracket):
|
136 |
+
# evaluate at 0.1 away from boundary
|
137 |
+
if abs(t - max(bracket)) < abs(t - min(bracket)):
|
138 |
+
t = max(bracket) - eps
|
139 |
+
else:
|
140 |
+
t = min(bracket) + eps
|
141 |
+
insuf_progress = False
|
142 |
+
else:
|
143 |
+
insuf_progress = True
|
144 |
+
else:
|
145 |
+
insuf_progress = False
|
146 |
+
|
147 |
+
# Evaluate new point
|
148 |
+
f_new, g_new = obj_func(x, t, d)
|
149 |
+
ls_func_evals += 1
|
150 |
+
gtd_new = g_new.dot(d)
|
151 |
+
ls_iter += 1
|
152 |
+
|
153 |
+
if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]:
|
154 |
+
# Armijo condition not satisfied or not lower than lowest point
|
155 |
+
bracket[high_pos] = t
|
156 |
+
bracket_f[high_pos] = f_new
|
157 |
+
bracket_g[high_pos] = g_new.clone(memory_format=torch.contiguous_format)
|
158 |
+
bracket_gtd[high_pos] = gtd_new
|
159 |
+
low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0)
|
160 |
+
else:
|
161 |
+
if abs(gtd_new) <= -c2 * gtd:
|
162 |
+
# Wolfe conditions satisfied
|
163 |
+
done = True
|
164 |
+
elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0:
|
165 |
+
# old high becomes new low
|
166 |
+
bracket[high_pos] = bracket[low_pos]
|
167 |
+
bracket_f[high_pos] = bracket_f[low_pos]
|
168 |
+
bracket_g[high_pos] = bracket_g[low_pos]
|
169 |
+
bracket_gtd[high_pos] = bracket_gtd[low_pos]
|
170 |
+
|
171 |
+
# new point becomes new low
|
172 |
+
bracket[low_pos] = t
|
173 |
+
bracket_f[low_pos] = f_new
|
174 |
+
bracket_g[low_pos] = g_new.clone(memory_format=torch.contiguous_format)
|
175 |
+
bracket_gtd[low_pos] = gtd_new
|
176 |
+
|
177 |
+
# return stuff
|
178 |
+
t = bracket[low_pos]
|
179 |
+
f_new = bracket_f[low_pos]
|
180 |
+
g_new = bracket_g[low_pos]
|
181 |
+
return f_new, g_new, t, ls_func_evals
|
182 |
+
|
183 |
+
|
184 |
+
class LBFGS(Optimizer):
|
185 |
+
"""Implements L-BFGS algorithm.
|
186 |
+
|
187 |
+
Heavily inspired by `minFunc
|
188 |
+
<https://www.cs.ubc.ca/~schmidtm/Software/minFunc.html>`_.
|
189 |
+
|
190 |
+
.. warning::
|
191 |
+
This optimizer doesn't support per-parameter options and parameter
|
192 |
+
groups (there can be only one).
|
193 |
+
|
194 |
+
.. warning::
|
195 |
+
Right now all parameters have to be on a single device. This will be
|
196 |
+
improved in the future.
|
197 |
+
|
198 |
+
.. note::
|
199 |
+
This is a very memory intensive optimizer (it requires additional
|
200 |
+
``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory
|
201 |
+
try reducing the history size, or use a different algorithm.
|
202 |
+
|
203 |
+
Args:
|
204 |
+
lr (float): learning rate (default: 1)
|
205 |
+
max_iter (int): maximal number of iterations per optimization step
|
206 |
+
(default: 20)
|
207 |
+
max_eval (int): maximal number of function evaluations per optimization
|
208 |
+
step (default: max_iter * 1.25).
|
209 |
+
tolerance_grad (float): termination tolerance on first order optimality
|
210 |
+
(default: 1e-7).
|
211 |
+
tolerance_change (float): termination tolerance on function
|
212 |
+
value/parameter changes (default: 1e-9).
|
213 |
+
history_size (int): update history size (default: 100).
|
214 |
+
line_search_fn (str): either 'strong_wolfe' or None (default: None).
|
215 |
+
"""
|
216 |
+
|
217 |
+
def __init__(self,
|
218 |
+
params,
|
219 |
+
lr=1,
|
220 |
+
max_iter=20,
|
221 |
+
max_eval=None,
|
222 |
+
tolerance_grad=1e-7,
|
223 |
+
tolerance_change=1e-9,
|
224 |
+
history_size=100,
|
225 |
+
line_search_fn=None):
|
226 |
+
if max_eval is None:
|
227 |
+
max_eval = max_iter * 5 // 4
|
228 |
+
defaults = dict(
|
229 |
+
lr=lr,
|
230 |
+
max_iter=max_iter,
|
231 |
+
max_eval=max_eval,
|
232 |
+
tolerance_grad=tolerance_grad,
|
233 |
+
tolerance_change=tolerance_change,
|
234 |
+
history_size=history_size,
|
235 |
+
line_search_fn=line_search_fn)
|
236 |
+
super().__init__(params, defaults)
|
237 |
+
|
238 |
+
if len(self.param_groups) != 1:
|
239 |
+
raise ValueError("LBFGS doesn't support per-parameter options "
|
240 |
+
"(parameter groups)")
|
241 |
+
|
242 |
+
self._params = self.param_groups[0]['params']
|
243 |
+
self._numel_cache = None
|
244 |
+
|
245 |
+
def _numel(self):
|
246 |
+
if self._numel_cache is None:
|
247 |
+
self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0)
|
248 |
+
return self._numel_cache
|
249 |
+
|
250 |
+
def _gather_flat_grad(self):
|
251 |
+
views = []
|
252 |
+
for p in self._params:
|
253 |
+
if p.grad is None:
|
254 |
+
view = p.new(p.numel()).zero_()
|
255 |
+
elif p.grad.is_sparse:
|
256 |
+
view = p.grad.to_dense().view(-1)
|
257 |
+
else:
|
258 |
+
view = p.grad.view(-1)
|
259 |
+
views.append(view)
|
260 |
+
return torch.cat(views, 0)
|
261 |
+
|
262 |
+
def _add_grad(self, step_size, update):
|
263 |
+
offset = 0
|
264 |
+
for p in self._params:
|
265 |
+
numel = p.numel()
|
266 |
+
# view as to avoid deprecated pointwise semantics
|
267 |
+
p.add_(update[offset:offset + numel].view_as(p), alpha=step_size)
|
268 |
+
offset += numel
|
269 |
+
assert offset == self._numel()
|
270 |
+
|
271 |
+
def _clone_param(self):
|
272 |
+
return [p.clone(memory_format=torch.contiguous_format) for p in self._params]
|
273 |
+
|
274 |
+
def _set_param(self, params_data):
|
275 |
+
for p, pdata in zip(self._params, params_data):
|
276 |
+
p.copy_(pdata)
|
277 |
+
|
278 |
+
def _directional_evaluate(self, closure, x, t, d):
|
279 |
+
self._add_grad(t, d)
|
280 |
+
loss = float(closure())
|
281 |
+
flat_grad = self._gather_flat_grad()
|
282 |
+
self._set_param(x)
|
283 |
+
return loss, flat_grad
|
284 |
+
|
285 |
+
@torch.no_grad()
|
286 |
+
def step(self, closure):
|
287 |
+
"""Perform a single optimization step.
|
288 |
+
|
289 |
+
Args:
|
290 |
+
closure (Callable): A closure that reevaluates the model
|
291 |
+
and returns the loss.
|
292 |
+
"""
|
293 |
+
assert len(self.param_groups) == 1
|
294 |
+
|
295 |
+
# Make sure the closure is always called with grad enabled
|
296 |
+
closure = torch.enable_grad()(closure)
|
297 |
+
|
298 |
+
group = self.param_groups[0]
|
299 |
+
lr = group['lr']
|
300 |
+
max_iter = group['max_iter']
|
301 |
+
max_eval = group['max_eval']
|
302 |
+
tolerance_grad = group['tolerance_grad']
|
303 |
+
tolerance_change = group['tolerance_change']
|
304 |
+
line_search_fn = group['line_search_fn']
|
305 |
+
history_size = group['history_size']
|
306 |
+
|
307 |
+
# NOTE: LBFGS has only global state, but we register it as state for
|
308 |
+
# the first param, because this helps with casting in load_state_dict
|
309 |
+
state = self.state[self._params[0]]
|
310 |
+
state.setdefault('func_evals', 0)
|
311 |
+
state.setdefault('n_iter', 0)
|
312 |
+
|
313 |
+
# evaluate initial f(x) and df/dx
|
314 |
+
orig_loss = closure()
|
315 |
+
loss = float(orig_loss)
|
316 |
+
current_evals = 1
|
317 |
+
state['func_evals'] += 1
|
318 |
+
|
319 |
+
flat_grad = self._gather_flat_grad()
|
320 |
+
opt_cond = flat_grad.abs().max() <= tolerance_grad
|
321 |
+
|
322 |
+
# optimal condition
|
323 |
+
if opt_cond:
|
324 |
+
return orig_loss
|
325 |
+
|
326 |
+
# tensors cached in state (for tracing)
|
327 |
+
d = state.get('d')
|
328 |
+
t = state.get('t')
|
329 |
+
old_dirs = state.get('old_dirs')
|
330 |
+
old_stps = state.get('old_stps')
|
331 |
+
ro = state.get('ro')
|
332 |
+
H_diag = state.get('H_diag')
|
333 |
+
prev_flat_grad = state.get('prev_flat_grad')
|
334 |
+
prev_loss = state.get('prev_loss')
|
335 |
+
|
336 |
+
n_iter = 0
|
337 |
+
# optimize for a max of max_iter iterations
|
338 |
+
while n_iter < max_iter:
|
339 |
+
# keep track of nb of iterations
|
340 |
+
n_iter += 1
|
341 |
+
state['n_iter'] += 1
|
342 |
+
|
343 |
+
############################################################
|
344 |
+
# compute gradient descent direction
|
345 |
+
############################################################
|
346 |
+
if state['n_iter'] == 1:
|
347 |
+
d = flat_grad.neg()
|
348 |
+
old_dirs = []
|
349 |
+
old_stps = []
|
350 |
+
ro = []
|
351 |
+
H_diag = 1
|
352 |
+
else:
|
353 |
+
# do lbfgs update (update memory)
|
354 |
+
y = flat_grad.sub(prev_flat_grad)
|
355 |
+
s = d.mul(t)
|
356 |
+
ys = y.dot(s) # y*s
|
357 |
+
if ys > 1e-10:
|
358 |
+
# updating memory
|
359 |
+
if len(old_dirs) == history_size:
|
360 |
+
# shift history by one (limited-memory)
|
361 |
+
old_dirs.pop(0)
|
362 |
+
old_stps.pop(0)
|
363 |
+
ro.pop(0)
|
364 |
+
|
365 |
+
# store new direction/step
|
366 |
+
old_dirs.append(y)
|
367 |
+
old_stps.append(s)
|
368 |
+
ro.append(1. / ys)
|
369 |
+
|
370 |
+
# update scale of initial Hessian approximation
|
371 |
+
H_diag = ys / y.dot(y) # (y*y)
|
372 |
+
|
373 |
+
# compute the approximate (L-BFGS) inverse Hessian
|
374 |
+
# multiplied by the gradient
|
375 |
+
num_old = len(old_dirs)
|
376 |
+
|
377 |
+
if 'al' not in state:
|
378 |
+
state['al'] = [None] * history_size
|
379 |
+
al = state['al']
|
380 |
+
|
381 |
+
# iteration in L-BFGS loop collapsed to use just one buffer
|
382 |
+
q = flat_grad.neg()
|
383 |
+
for i in range(num_old - 1, -1, -1):
|
384 |
+
al[i] = old_stps[i].dot(q) * ro[i]
|
385 |
+
q.add_(old_dirs[i], alpha=-al[i])
|
386 |
+
|
387 |
+
# multiply by initial Hessian
|
388 |
+
# r/d is the final direction
|
389 |
+
d = r = torch.mul(q, H_diag)
|
390 |
+
for i in range(num_old):
|
391 |
+
be_i = old_dirs[i].dot(r) * ro[i]
|
392 |
+
r.add_(old_stps[i], alpha=al[i] - be_i)
|
393 |
+
|
394 |
+
if prev_flat_grad is None:
|
395 |
+
prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format)
|
396 |
+
else:
|
397 |
+
prev_flat_grad.copy_(flat_grad)
|
398 |
+
prev_loss = loss
|
399 |
+
|
400 |
+
############################################################
|
401 |
+
# compute step length
|
402 |
+
############################################################
|
403 |
+
# reset initial guess for step size
|
404 |
+
if state['n_iter'] == 1:
|
405 |
+
t = min(1., 1. / flat_grad.abs().sum()) * lr
|
406 |
+
else:
|
407 |
+
t = lr
|
408 |
+
|
409 |
+
# directional derivative
|
410 |
+
gtd = flat_grad.dot(d) # g * d
|
411 |
+
|
412 |
+
# directional derivative is below tolerance
|
413 |
+
if gtd > -tolerance_change:
|
414 |
+
break
|
415 |
+
|
416 |
+
# optional line search: user function
|
417 |
+
ls_func_evals = 0
|
418 |
+
if line_search_fn is not None:
|
419 |
+
# perform line search, using user function
|
420 |
+
if line_search_fn != "strong_wolfe":
|
421 |
+
raise RuntimeError("only 'strong_wolfe' is supported")
|
422 |
+
else:
|
423 |
+
x_init = self._clone_param()
|
424 |
+
|
425 |
+
def obj_func(x, t, d):
|
426 |
+
return self._directional_evaluate(closure, x, t, d)
|
427 |
+
|
428 |
+
loss, flat_grad, t, ls_func_evals = _strong_wolfe(
|
429 |
+
obj_func, x_init, t, d, loss, flat_grad, gtd)
|
430 |
+
self._add_grad(t, d)
|
431 |
+
opt_cond = flat_grad.abs().max() <= tolerance_grad
|
432 |
+
else:
|
433 |
+
# no line search, simply move with fixed-step
|
434 |
+
self._add_grad(t, d)
|
435 |
+
if n_iter != max_iter:
|
436 |
+
# re-evaluate function only if not in last iteration
|
437 |
+
# the reason we do this: in a stochastic setting,
|
438 |
+
# no use to re-evaluate that function here
|
439 |
+
with torch.enable_grad():
|
440 |
+
loss = float(closure())
|
441 |
+
flat_grad = self._gather_flat_grad()
|
442 |
+
opt_cond = flat_grad.abs().max() <= tolerance_grad
|
443 |
+
ls_func_evals = 1
|
444 |
+
|
445 |
+
# update func eval
|
446 |
+
current_evals += ls_func_evals
|
447 |
+
state['func_evals'] += ls_func_evals
|
448 |
+
|
449 |
+
############################################################
|
450 |
+
# check conditions
|
451 |
+
############################################################
|
452 |
+
if n_iter == max_iter:
|
453 |
+
break
|
454 |
+
|
455 |
+
if current_evals >= max_eval:
|
456 |
+
break
|
457 |
+
|
458 |
+
# optimal condition
|
459 |
+
if opt_cond:
|
460 |
+
break
|
461 |
+
|
462 |
+
# lack of progress
|
463 |
+
if d.mul(t).abs().max() <= tolerance_change:
|
464 |
+
break
|
465 |
+
|
466 |
+
if abs(loss - prev_loss) < tolerance_change:
|
467 |
+
break
|
468 |
+
|
469 |
+
state['d'] = d
|
470 |
+
state['t'] = t
|
471 |
+
state['old_dirs'] = old_dirs
|
472 |
+
state['old_stps'] = old_stps
|
473 |
+
state['ro'] = ro
|
474 |
+
state['H_diag'] = H_diag
|
475 |
+
state['prev_flat_grad'] = prev_flat_grad
|
476 |
+
state['prev_loss'] = prev_loss
|
477 |
+
|
478 |
+
return orig_loss
|
env-llmeval/lib/python3.10/site-packages/torch/optim/nadam.py
ADDED
@@ -0,0 +1,473 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import Tensor
|
3 |
+
from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _dispatch_sqrt, _stack_if_compiling,
|
4 |
+
_capturable_doc, _differentiable_doc, _foreach_doc, _default_to_fused_or_foreach, _view_as_real)
|
5 |
+
from typing import List, Optional
|
6 |
+
|
7 |
+
__all__ = ['NAdam', 'nadam']
|
8 |
+
|
9 |
+
class NAdam(Optimizer):
|
10 |
+
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
|
11 |
+
weight_decay=0, momentum_decay=4e-3, decoupled_weight_decay: bool = False,
|
12 |
+
*, foreach: Optional[bool] = None, capturable: bool = False,
|
13 |
+
differentiable: bool = False):
|
14 |
+
if not 0.0 <= lr:
|
15 |
+
raise ValueError(f"Invalid learning rate: {lr}")
|
16 |
+
if not 0.0 <= eps:
|
17 |
+
raise ValueError(f"Invalid epsilon value: {eps}")
|
18 |
+
if not 0.0 <= betas[0] < 1.0:
|
19 |
+
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
|
20 |
+
if not 0.0 <= betas[1] < 1.0:
|
21 |
+
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
|
22 |
+
if not 0.0 <= weight_decay:
|
23 |
+
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
|
24 |
+
if not 0.0 <= momentum_decay:
|
25 |
+
raise ValueError(f"Invalid momentum_decay value: {momentum_decay}")
|
26 |
+
defaults = dict(lr=lr, betas=betas, eps=eps,
|
27 |
+
weight_decay=weight_decay, momentum_decay=momentum_decay,
|
28 |
+
decoupled_weight_decay=decoupled_weight_decay,
|
29 |
+
foreach=foreach, capturable=capturable, differentiable=differentiable)
|
30 |
+
super().__init__(params, defaults)
|
31 |
+
|
32 |
+
def __setstate__(self, state):
|
33 |
+
super().__setstate__(state)
|
34 |
+
for group in self.param_groups:
|
35 |
+
group.setdefault('foreach', None)
|
36 |
+
group.setdefault('capturable', False)
|
37 |
+
group.setdefault('differentiable', False)
|
38 |
+
group.setdefault('decoupled_weight_decay', False)
|
39 |
+
state_values = list(self.state.values())
|
40 |
+
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step'])
|
41 |
+
if not step_is_tensor:
|
42 |
+
for s in state_values:
|
43 |
+
s['step'] = torch.tensor(float(s['step']), dtype=torch.float32)
|
44 |
+
mu_product_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['mu_product'])
|
45 |
+
if not mu_product_is_tensor:
|
46 |
+
for s in state_values:
|
47 |
+
s['mu_product'] = torch.tensor(s['mu_product'], dtype=torch.float32)
|
48 |
+
|
49 |
+
def _init_group(self, group, params_with_grad, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps):
|
50 |
+
has_complex = False
|
51 |
+
for p in group['params']:
|
52 |
+
if p.grad is not None:
|
53 |
+
has_complex |= torch.is_complex(p)
|
54 |
+
params_with_grad.append(p)
|
55 |
+
if p.grad.is_sparse:
|
56 |
+
raise RuntimeError('NAdam does not support sparse gradients')
|
57 |
+
grads.append(p.grad)
|
58 |
+
|
59 |
+
state = self.state[p]
|
60 |
+
# Lazy state initialization
|
61 |
+
if len(state) == 0:
|
62 |
+
# note(crcrpar): [special device hosting for step]
|
63 |
+
# Deliberately host `step` and `mu_product` on CPU if capturable is False.
|
64 |
+
# This is because kernel launches are costly on CUDA and XLA.
|
65 |
+
state['step'] = (
|
66 |
+
torch.zeros((), dtype=torch.float32, device=p.device)
|
67 |
+
if group['capturable'] else torch.tensor(0.0, dtype=torch.float32)
|
68 |
+
)
|
69 |
+
state['mu_product'] = (
|
70 |
+
torch.ones((), dtype=torch.float32, device=p.device)
|
71 |
+
if group['capturable'] else torch.tensor(1.0, dtype=torch.float32)
|
72 |
+
)
|
73 |
+
# Exponential moving average of gradient values
|
74 |
+
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
75 |
+
# Exponential moving average of squared gradient values
|
76 |
+
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
77 |
+
|
78 |
+
exp_avgs.append(state['exp_avg'])
|
79 |
+
exp_avg_sqs.append(state['exp_avg_sq'])
|
80 |
+
mu_products.append(state['mu_product'])
|
81 |
+
state_steps.append(state['step'])
|
82 |
+
return has_complex
|
83 |
+
|
84 |
+
@_use_grad_for_differentiable
|
85 |
+
def step(self, closure=None):
|
86 |
+
"""Performs a single optimization step.
|
87 |
+
|
88 |
+
Args:
|
89 |
+
closure (Callable, optional): A closure that reevaluates the model
|
90 |
+
and returns the loss.
|
91 |
+
"""
|
92 |
+
self._cuda_graph_capture_health_check()
|
93 |
+
|
94 |
+
loss = None
|
95 |
+
if closure is not None:
|
96 |
+
with torch.enable_grad():
|
97 |
+
loss = closure()
|
98 |
+
|
99 |
+
for group in self.param_groups:
|
100 |
+
params_with_grad = []
|
101 |
+
grads = []
|
102 |
+
exp_avgs = []
|
103 |
+
exp_avg_sqs = []
|
104 |
+
mu_products = []
|
105 |
+
state_steps = []
|
106 |
+
beta1, beta2 = group['betas']
|
107 |
+
|
108 |
+
has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps)
|
109 |
+
|
110 |
+
nadam(params_with_grad,
|
111 |
+
grads,
|
112 |
+
exp_avgs,
|
113 |
+
exp_avg_sqs,
|
114 |
+
mu_products,
|
115 |
+
state_steps,
|
116 |
+
beta1=beta1,
|
117 |
+
beta2=beta2,
|
118 |
+
lr=group['lr'],
|
119 |
+
weight_decay=group['weight_decay'],
|
120 |
+
momentum_decay=group['momentum_decay'],
|
121 |
+
eps=group['eps'],
|
122 |
+
decoupled_weight_decay=group['decoupled_weight_decay'],
|
123 |
+
foreach=group['foreach'],
|
124 |
+
capturable=group['capturable'],
|
125 |
+
differentiable=group['differentiable'],
|
126 |
+
has_complex=has_complex)
|
127 |
+
|
128 |
+
return loss
|
129 |
+
|
130 |
+
NAdam.__doc__ = r"""Implements NAdam algorithm.
|
131 |
+
|
132 |
+
.. math::
|
133 |
+
\begin{aligned}
|
134 |
+
&\rule{110mm}{0.4pt} \\
|
135 |
+
&\textbf{input} : \gamma_t \text{ (lr)}, \: \beta_1,\beta_2 \text{ (betas)},
|
136 |
+
\: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\
|
137 |
+
&\hspace{13mm} \: \lambda \text{ (weight decay)}, \:\psi \text{ (momentum decay)} \\
|
138 |
+
&\hspace{13mm} \: \textit{decoupled\_weight\_decay} \\
|
139 |
+
&\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)},
|
140 |
+
v_0 \leftarrow 0 \text{ ( second moment)} \\[-1.ex]
|
141 |
+
&\rule{110mm}{0.4pt} \\
|
142 |
+
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
|
143 |
+
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
|
144 |
+
&\hspace{5mm} \theta_t \leftarrow \theta_{t-1} \\
|
145 |
+
&\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\
|
146 |
+
&\hspace{10mm}\textbf{if} \: \textit{decoupled\_weight\_decay} \\
|
147 |
+
&\hspace{15mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\
|
148 |
+
&\hspace{10mm}\textbf{else} \\
|
149 |
+
&\hspace{15mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
|
150 |
+
&\hspace{5mm} \mu_t \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{t \psi} \big) \\
|
151 |
+
&\hspace{5mm} \mu_{t+1} \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{(t+1)\psi}\big)\\
|
152 |
+
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
|
153 |
+
&\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
|
154 |
+
&\hspace{5mm}\widehat{m_t} \leftarrow \mu_{t+1} m_t/(1-\prod_{i=1}^{t+1}\mu_i)\\[-1.ex]
|
155 |
+
& \hspace{11mm} + (1-\mu_t) g_t /(1-\prod_{i=1}^{t} \mu_{i}) \\
|
156 |
+
&\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\
|
157 |
+
&\hspace{5mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
|
158 |
+
\big(\sqrt{\widehat{v_t}} + \epsilon \big) \\
|
159 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
160 |
+
&\bf{return} \: \theta_t \\[-1.ex]
|
161 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
162 |
+
\end{aligned}
|
163 |
+
|
164 |
+
For further details regarding the algorithm we refer to `Incorporating Nesterov Momentum into Adam`_.
|
165 |
+
""" + fr"""
|
166 |
+
Args:
|
167 |
+
params (iterable): iterable of parameters to optimize or dicts defining
|
168 |
+
parameter groups
|
169 |
+
lr (float, optional): learning rate (default: 2e-3)
|
170 |
+
betas (Tuple[float, float], optional): coefficients used for computing
|
171 |
+
running averages of gradient and its square (default: (0.9, 0.999))
|
172 |
+
eps (float, optional): term added to the denominator to improve
|
173 |
+
numerical stability (default: 1e-8)
|
174 |
+
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
175 |
+
momentum_decay (float, optional): momentum momentum_decay (default: 4e-3)
|
176 |
+
decoupled_weight_decay (bool, optional): whether to use decoupled weight
|
177 |
+
decay as in AdamW to obtain NAdamW (default: False)
|
178 |
+
{_foreach_doc}
|
179 |
+
{_capturable_doc}
|
180 |
+
{_differentiable_doc}
|
181 |
+
|
182 |
+
.. _Incorporating Nesterov Momentum into Adam:
|
183 |
+
https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ
|
184 |
+
.. _Decoupled Weight Decay Regularization:
|
185 |
+
https://arxiv.org/abs/1711.05101
|
186 |
+
|
187 |
+
"""
|
188 |
+
|
189 |
+
|
190 |
+
def nadam(params: List[Tensor],
|
191 |
+
grads: List[Tensor],
|
192 |
+
exp_avgs: List[Tensor],
|
193 |
+
exp_avg_sqs: List[Tensor],
|
194 |
+
mu_products: List[Tensor],
|
195 |
+
state_steps: List[Tensor],
|
196 |
+
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
|
197 |
+
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
|
198 |
+
decoupled_weight_decay: bool = False,
|
199 |
+
foreach: Optional[bool] = None,
|
200 |
+
capturable: bool = False,
|
201 |
+
differentiable: bool = False,
|
202 |
+
has_complex: bool = False,
|
203 |
+
*,
|
204 |
+
beta1: float,
|
205 |
+
beta2: float,
|
206 |
+
lr: float,
|
207 |
+
weight_decay: float,
|
208 |
+
momentum_decay: float,
|
209 |
+
eps: float):
|
210 |
+
r"""Functional API that performs NAdam algorithm computation.
|
211 |
+
|
212 |
+
See :class:`~torch.optim.NAdam` for details.
|
213 |
+
"""
|
214 |
+
|
215 |
+
|
216 |
+
if not all(isinstance(t, torch.Tensor) for t in state_steps):
|
217 |
+
raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
|
218 |
+
|
219 |
+
if not all(isinstance(t, torch.Tensor) for t in mu_products):
|
220 |
+
raise RuntimeError("API has changed, `mu_products` argument must contain a list of singleton tensors")
|
221 |
+
|
222 |
+
if foreach is None:
|
223 |
+
_, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
|
224 |
+
|
225 |
+
if foreach and torch.jit.is_scripting():
|
226 |
+
raise RuntimeError('torch.jit.script not supported with foreach optimizers')
|
227 |
+
|
228 |
+
if foreach and not torch.jit.is_scripting():
|
229 |
+
func = _multi_tensor_nadam
|
230 |
+
else:
|
231 |
+
func = _single_tensor_nadam
|
232 |
+
|
233 |
+
func(params,
|
234 |
+
grads,
|
235 |
+
exp_avgs,
|
236 |
+
exp_avg_sqs,
|
237 |
+
mu_products,
|
238 |
+
state_steps,
|
239 |
+
beta1=beta1,
|
240 |
+
beta2=beta2,
|
241 |
+
lr=lr,
|
242 |
+
weight_decay=weight_decay,
|
243 |
+
momentum_decay=momentum_decay,
|
244 |
+
decoupled_weight_decay=decoupled_weight_decay,
|
245 |
+
eps=eps,
|
246 |
+
capturable=capturable,
|
247 |
+
differentiable=differentiable,
|
248 |
+
has_complex=has_complex)
|
249 |
+
|
250 |
+
|
251 |
+
def _single_tensor_nadam(params: List[Tensor],
|
252 |
+
grads: List[Tensor],
|
253 |
+
exp_avgs: List[Tensor],
|
254 |
+
exp_avg_sqs: List[Tensor],
|
255 |
+
mu_products: List[Tensor],
|
256 |
+
state_steps: List[Tensor],
|
257 |
+
*,
|
258 |
+
beta1: float,
|
259 |
+
beta2: float,
|
260 |
+
lr: float,
|
261 |
+
weight_decay: float,
|
262 |
+
momentum_decay: float,
|
263 |
+
eps: float,
|
264 |
+
decoupled_weight_decay: bool,
|
265 |
+
capturable: bool,
|
266 |
+
differentiable: bool,
|
267 |
+
has_complex: bool):
|
268 |
+
|
269 |
+
for i, param in enumerate(params):
|
270 |
+
grad = grads[i]
|
271 |
+
exp_avg = exp_avgs[i]
|
272 |
+
exp_avg_sq = exp_avg_sqs[i]
|
273 |
+
mu_product = mu_products[i]
|
274 |
+
step_t = state_steps[i]
|
275 |
+
|
276 |
+
if torch.is_complex(param):
|
277 |
+
param = torch.view_as_real(param)
|
278 |
+
grad = torch.view_as_real(grad)
|
279 |
+
exp_avg = torch.view_as_real(exp_avg)
|
280 |
+
exp_avg_sq = torch.view_as_real(exp_avg_sq)
|
281 |
+
|
282 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
283 |
+
if not torch._utils.is_compiling() and capturable:
|
284 |
+
assert (
|
285 |
+
(param.is_cuda and mu_product.is_cuda and step_t.is_cuda) or (param.is_xla and mu_product.is_xla and step_t.is_xla)
|
286 |
+
), "If capturable=True, params, mu_products, and state_steps must be CUDA or XLA tensors."
|
287 |
+
|
288 |
+
# update step
|
289 |
+
step_t += 1
|
290 |
+
|
291 |
+
if capturable:
|
292 |
+
step = step_t
|
293 |
+
else:
|
294 |
+
step = _get_value(step_t)
|
295 |
+
|
296 |
+
bias_correction2 = 1 - beta2 ** step
|
297 |
+
|
298 |
+
if weight_decay != 0:
|
299 |
+
if decoupled_weight_decay:
|
300 |
+
# Perform stepweight decay
|
301 |
+
param.mul_(1 - lr * weight_decay)
|
302 |
+
else:
|
303 |
+
grad = grad.add(param, alpha=weight_decay)
|
304 |
+
|
305 |
+
# calculate the momentum cache \mu^{t} and \mu^{t+1}
|
306 |
+
mu = beta1 * (1. - 0.5 * (0.96 ** (step * momentum_decay)))
|
307 |
+
mu_next = beta1 * (1. - 0.5 * (0.96 ** ((step + 1) * momentum_decay)))
|
308 |
+
|
309 |
+
# update mu_product
|
310 |
+
mu_product *= mu
|
311 |
+
|
312 |
+
# decay the first and second moment running average coefficient
|
313 |
+
exp_avg.lerp_(grad, 1 - beta1)
|
314 |
+
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
|
315 |
+
denom = exp_avg_sq.div(bias_correction2).sqrt()
|
316 |
+
|
317 |
+
if differentiable or capturable:
|
318 |
+
denom = denom.add(eps)
|
319 |
+
# Make autograd track the operations
|
320 |
+
# by updating the grad and exp_avg directly and not using the
|
321 |
+
# scalar "value" argument of addcdiv.
|
322 |
+
mu_product_next = mu_product * mu_next
|
323 |
+
grad = grad * (-lr * (1. - mu) / (1. - mu_product))
|
324 |
+
exp_avg = exp_avg * (-lr * mu_next / (1. - mu_product_next))
|
325 |
+
param.addcdiv_(grad, denom)
|
326 |
+
param.addcdiv_(exp_avg, denom)
|
327 |
+
else:
|
328 |
+
mu_product_next = _get_value(mu_product) * mu_next
|
329 |
+
denom.add_(eps)
|
330 |
+
param.addcdiv_(grad, denom, value=(-lr * (1. - mu) / (1. - _get_value(mu_product))))
|
331 |
+
param.addcdiv_(exp_avg, denom, value=(-lr * mu_next) / (1. - mu_product_next))
|
332 |
+
|
333 |
+
|
334 |
+
def _multi_tensor_nadam(params: List[Tensor],
|
335 |
+
grads: List[Tensor],
|
336 |
+
exp_avgs: List[Tensor],
|
337 |
+
exp_avg_sqs: List[Tensor],
|
338 |
+
mu_products: List[Tensor],
|
339 |
+
state_steps: List[Tensor],
|
340 |
+
*,
|
341 |
+
beta1: float,
|
342 |
+
beta2: float,
|
343 |
+
lr: float,
|
344 |
+
weight_decay: float,
|
345 |
+
momentum_decay: float,
|
346 |
+
eps: float,
|
347 |
+
decoupled_weight_decay: bool,
|
348 |
+
capturable: bool,
|
349 |
+
differentiable: bool,
|
350 |
+
has_complex: bool):
|
351 |
+
|
352 |
+
if len(params) == 0:
|
353 |
+
return
|
354 |
+
|
355 |
+
assert not differentiable, "_foreach ops don't support autograd"
|
356 |
+
|
357 |
+
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
358 |
+
if not torch._utils.is_compiling() and capturable:
|
359 |
+
assert all(p.is_cuda and mp.is_cuda and step.is_cuda
|
360 |
+
for p, mp, step in zip(params, mu_products, state_steps)), \
|
361 |
+
"If capturable=True, params, mu_products, and state_steps must be CUDA tensors."
|
362 |
+
|
363 |
+
|
364 |
+
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps])
|
365 |
+
for ((grouped_params, grouped_grads, grouped_exp_avgs,
|
366 |
+
grouped_exp_avg_sqs, grouped_mu_products, grouped_state_steps), _) in grouped_tensors.values():
|
367 |
+
|
368 |
+
# handle complex
|
369 |
+
if has_complex:
|
370 |
+
_view_as_real(grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_avg_sqs)
|
371 |
+
|
372 |
+
# Update steps
|
373 |
+
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
|
374 |
+
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
|
375 |
+
# wrapped it once now. The alpha is required to assure we go to the right overload.
|
376 |
+
if grouped_state_steps[0].is_cpu:
|
377 |
+
torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
|
378 |
+
else:
|
379 |
+
torch._foreach_add_(grouped_state_steps, 1)
|
380 |
+
|
381 |
+
if weight_decay != 0:
|
382 |
+
if decoupled_weight_decay:
|
383 |
+
# Perform stepweight decay
|
384 |
+
torch._foreach_mul_(grouped_params, 1 - lr * weight_decay)
|
385 |
+
else:
|
386 |
+
grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay)
|
387 |
+
|
388 |
+
# Decay the first and second moment running average coefficient
|
389 |
+
torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1)
|
390 |
+
|
391 |
+
torch._foreach_mul_(grouped_exp_avg_sqs, beta2)
|
392 |
+
torch._foreach_addcmul_(grouped_exp_avg_sqs, grouped_grads, grouped_grads, 1 - beta2)
|
393 |
+
|
394 |
+
exp_avg_sq_sqrt = torch._foreach_sqrt(grouped_exp_avg_sqs)
|
395 |
+
|
396 |
+
if capturable:
|
397 |
+
# mus will be beta1 * (1 - 0.5 * 0.96 ** (step * momentum_decay))
|
398 |
+
exponent = torch._foreach_mul(grouped_state_steps, momentum_decay)
|
399 |
+
mus = torch._foreach_pow(0.96, exponent)
|
400 |
+
torch._foreach_mul_(mus, -0.5)
|
401 |
+
torch._foreach_add_(mus, 1.0)
|
402 |
+
torch._foreach_mul_(mus, beta1)
|
403 |
+
|
404 |
+
# mu_nexts will be beta1 * (1 - 0.5 * 0.96 ** ((step + 1) * momentum_decay))
|
405 |
+
torch._foreach_add_(exponent, momentum_decay)
|
406 |
+
mu_nexts = torch._foreach_pow(0.96, exponent)
|
407 |
+
torch._foreach_mul_(mu_nexts, -0.5)
|
408 |
+
torch._foreach_add_(mu_nexts, 1.0)
|
409 |
+
torch._foreach_mul_(mu_nexts, beta1)
|
410 |
+
|
411 |
+
# save peak memory as we don't need exponent anymore
|
412 |
+
del exponent
|
413 |
+
|
414 |
+
bias_correction_sqrt = torch._foreach_pow(beta2, grouped_state_steps)
|
415 |
+
# foreach_sub doesn't allow a scalar as the first arg
|
416 |
+
torch._foreach_sub_(bias_correction_sqrt, 1.0)
|
417 |
+
torch._foreach_neg_(bias_correction_sqrt)
|
418 |
+
torch._foreach_sqrt_(bias_correction_sqrt)
|
419 |
+
else:
|
420 |
+
bias_correction_sqrt = [_dispatch_sqrt(1 - beta2 ** _get_value(step)) for step in grouped_state_steps]
|
421 |
+
mus = [beta1 * (1. - 0.5 * (0.96 ** (_get_value(step) * momentum_decay))) for step in grouped_state_steps]
|
422 |
+
mu_nexts = [beta1 * (1. - 0.5 * (0.96 ** ((_get_value(step) + 1) * momentum_decay)))
|
423 |
+
for step in grouped_state_steps]
|
424 |
+
|
425 |
+
# update mu_products
|
426 |
+
torch._foreach_mul_(grouped_mu_products, mus)
|
427 |
+
|
428 |
+
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction_sqrt)
|
429 |
+
torch._foreach_add_(exp_avg_sq_sqrt, eps)
|
430 |
+
|
431 |
+
# explicitly delete bias_correction refs to save memory
|
432 |
+
del bias_correction_sqrt
|
433 |
+
|
434 |
+
if capturable:
|
435 |
+
# Build up the step_size multiplier for grad, reusing mus' memory
|
436 |
+
torch._foreach_sub_(mus, 1.0)
|
437 |
+
torch._foreach_mul_(mus, lr)
|
438 |
+
# foreach_sub doesn't allow a scalar as the first arg
|
439 |
+
denom = torch._foreach_sub(grouped_mu_products, 1.0)
|
440 |
+
torch._foreach_neg_(denom)
|
441 |
+
torch._foreach_div_(mus, denom)
|
442 |
+
# - lr * (1 - mu) / (1 - mu_product)
|
443 |
+
step_size_grads = mus
|
444 |
+
# explicitly delete denom to save memory
|
445 |
+
del denom
|
446 |
+
|
447 |
+
# Build up the step_size multiplier for exp_avg, reusing mu_nexts' memory
|
448 |
+
denom = torch._foreach_mul(grouped_mu_products, mu_nexts)
|
449 |
+
torch._foreach_mul_(mu_nexts, lr)
|
450 |
+
# foreach_sub doesn't allow a scalar as the first arg, but it's okay because
|
451 |
+
# we need a negative here anyway
|
452 |
+
torch._foreach_sub_(denom, 1.0)
|
453 |
+
torch._foreach_div_(mu_nexts, denom)
|
454 |
+
# - lr * mu_next / (1 - mu_product * mu_next)
|
455 |
+
step_size_expavg = mu_nexts
|
456 |
+
# explicitly delete denom to save memory
|
457 |
+
del denom
|
458 |
+
|
459 |
+
# we cannot inplace into step_size_grads cuz it is a list of ScalarTensors
|
460 |
+
# and mul'ing with grouped_grads will result in a list of bigger Tensors
|
461 |
+
numerator = torch._foreach_mul(step_size_grads, grouped_grads)
|
462 |
+
torch._foreach_addcmul_(numerator, step_size_expavg, grouped_exp_avgs)
|
463 |
+
|
464 |
+
# finally, update params
|
465 |
+
torch._foreach_addcdiv_(grouped_params, numerator, exp_avg_sq_sqrt)
|
466 |
+
else:
|
467 |
+
step_size_grads = _stack_if_compiling([(lr * (1. - mu) / (1. - _get_value(mu_product))) * -1
|
468 |
+
for mu_product, mu in zip(grouped_mu_products, mus)])
|
469 |
+
step_size_expavg = _stack_if_compiling([(lr * mu_next / (1. - _get_value(mu_product) * mu_next)) * -1
|
470 |
+
for mu_product, mu_next in zip(grouped_mu_products, mu_nexts)])
|
471 |
+
|
472 |
+
torch._foreach_addcdiv_(grouped_params, grouped_grads, exp_avg_sq_sqrt, step_size_grads)
|
473 |
+
torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, exp_avg_sq_sqrt, step_size_expavg)
|
env-llmeval/lib/python3.10/site-packages/torch/optim/nadam.pyi
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Tuple
|
2 |
+
|
3 |
+
from .optimizer import Optimizer, ParamsT
|
4 |
+
|
5 |
+
class NAdam(Optimizer):
|
6 |
+
def __init__(
|
7 |
+
self,
|
8 |
+
params: ParamsT,
|
9 |
+
lr: float = ...,
|
10 |
+
betas: Tuple[float, float] = ...,
|
11 |
+
eps: float = ...,
|
12 |
+
weight_decay: float = ...,
|
13 |
+
momentum_decay: float = ...,
|
14 |
+
decoupled_weight_decay: bool = ...,
|
15 |
+
) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/optim/radam.pyi
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Tuple
|
2 |
+
|
3 |
+
from .optimizer import Optimizer, ParamsT
|
4 |
+
|
5 |
+
class RAdam(Optimizer):
|
6 |
+
def __init__(
|
7 |
+
self,
|
8 |
+
params: ParamsT,
|
9 |
+
lr: float = ...,
|
10 |
+
betas: Tuple[float, float] = ...,
|
11 |
+
eps: float = ...,
|
12 |
+
weight_decay: float = ...,
|
13 |
+
decoupled_weight_decay: bool = ...,
|
14 |
+
) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/optim/rmsprop.pyi
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .optimizer import Optimizer, ParamsT
|
2 |
+
|
3 |
+
class RMSprop(Optimizer):
|
4 |
+
def __init__(
|
5 |
+
self,
|
6 |
+
params: ParamsT,
|
7 |
+
lr: float = ...,
|
8 |
+
alpha: float = ...,
|
9 |
+
eps: float = ...,
|
10 |
+
weight_decay: float = ...,
|
11 |
+
momentum: float = ...,
|
12 |
+
centered: bool = ...,
|
13 |
+
) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/optim/rprop.py
ADDED
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torch import Tensor
|
3 |
+
from .optimizer import (Optimizer, _use_grad_for_differentiable, _default_to_fused_or_foreach,
|
4 |
+
_differentiable_doc, _foreach_doc, _maximize_doc, _view_as_real)
|
5 |
+
from typing import List, Optional
|
6 |
+
|
7 |
+
__all__ = ["Rprop", "rprop"]
|
8 |
+
|
9 |
+
|
10 |
+
class Rprop(Optimizer):
|
11 |
+
def __init__(
|
12 |
+
self,
|
13 |
+
params,
|
14 |
+
lr=1e-2,
|
15 |
+
etas=(0.5, 1.2),
|
16 |
+
step_sizes=(1e-6, 50),
|
17 |
+
*,
|
18 |
+
foreach: Optional[bool] = None,
|
19 |
+
maximize: bool = False,
|
20 |
+
differentiable: bool = False,
|
21 |
+
):
|
22 |
+
if not 0.0 <= lr:
|
23 |
+
raise ValueError(f"Invalid learning rate: {lr}")
|
24 |
+
if not 0.0 < etas[0] < 1.0 < etas[1]:
|
25 |
+
raise ValueError(f"Invalid eta values: {etas[0]}, {etas[1]}")
|
26 |
+
|
27 |
+
defaults = dict(
|
28 |
+
lr=lr,
|
29 |
+
etas=etas,
|
30 |
+
step_sizes=step_sizes,
|
31 |
+
foreach=foreach,
|
32 |
+
maximize=maximize,
|
33 |
+
differentiable=differentiable,
|
34 |
+
)
|
35 |
+
super().__init__(params, defaults)
|
36 |
+
|
37 |
+
def __setstate__(self, state):
|
38 |
+
super().__setstate__(state)
|
39 |
+
for group in self.param_groups:
|
40 |
+
group.setdefault("foreach", None)
|
41 |
+
group.setdefault("maximize", False)
|
42 |
+
group.setdefault("differentiable", False)
|
43 |
+
|
44 |
+
def _init_group(self, group, params, grads, prevs, step_sizes):
|
45 |
+
has_complex = False
|
46 |
+
for p in group["params"]:
|
47 |
+
if p.grad is None:
|
48 |
+
continue
|
49 |
+
has_complex |= torch.is_complex(p)
|
50 |
+
params.append(p)
|
51 |
+
grad = p.grad
|
52 |
+
if grad.is_sparse:
|
53 |
+
raise RuntimeError("Rprop does not support sparse gradients")
|
54 |
+
|
55 |
+
grads.append(grad)
|
56 |
+
state = self.state[p]
|
57 |
+
|
58 |
+
# State initialization
|
59 |
+
if len(state) == 0:
|
60 |
+
state["step"] = 0
|
61 |
+
state["prev"] = torch.zeros_like(
|
62 |
+
p, memory_format=torch.preserve_format
|
63 |
+
)
|
64 |
+
if p.dtype.is_complex:
|
65 |
+
# Complex Number should be as if they are two independent real numbers.
|
66 |
+
# Hence the step_size shouldn't be zero for imaginary part.
|
67 |
+
state["step_size"] = (
|
68 |
+
grad.new()
|
69 |
+
.resize_as_(grad)
|
70 |
+
.fill_(complex(group["lr"], group["lr"]))
|
71 |
+
)
|
72 |
+
else:
|
73 |
+
state["step_size"] = (
|
74 |
+
grad.new().resize_as_(grad).fill_(group["lr"])
|
75 |
+
)
|
76 |
+
|
77 |
+
prevs.append(state["prev"])
|
78 |
+
step_sizes.append(state["step_size"])
|
79 |
+
|
80 |
+
state["step"] += 1
|
81 |
+
return has_complex
|
82 |
+
|
83 |
+
@_use_grad_for_differentiable
|
84 |
+
def step(self, closure=None):
|
85 |
+
"""Performs a single optimization step.
|
86 |
+
|
87 |
+
Args:
|
88 |
+
closure (Callable, optional): A closure that reevaluates the model
|
89 |
+
and returns the loss.
|
90 |
+
"""
|
91 |
+
loss = None
|
92 |
+
if closure is not None:
|
93 |
+
with torch.enable_grad():
|
94 |
+
loss = closure()
|
95 |
+
|
96 |
+
for group in self.param_groups:
|
97 |
+
params = []
|
98 |
+
grads = []
|
99 |
+
prevs = []
|
100 |
+
step_sizes = []
|
101 |
+
etaminus, etaplus = group["etas"]
|
102 |
+
step_size_min, step_size_max = group["step_sizes"]
|
103 |
+
foreach = group["foreach"]
|
104 |
+
maximize = group["maximize"]
|
105 |
+
|
106 |
+
has_complex = self._init_group(group, params, grads, prevs, step_sizes)
|
107 |
+
|
108 |
+
rprop(
|
109 |
+
params,
|
110 |
+
grads,
|
111 |
+
prevs,
|
112 |
+
step_sizes,
|
113 |
+
step_size_min=step_size_min,
|
114 |
+
step_size_max=step_size_max,
|
115 |
+
etaminus=etaminus,
|
116 |
+
etaplus=etaplus,
|
117 |
+
foreach=foreach,
|
118 |
+
maximize=maximize,
|
119 |
+
differentiable=group["differentiable"],
|
120 |
+
has_complex=has_complex,
|
121 |
+
)
|
122 |
+
|
123 |
+
return loss
|
124 |
+
|
125 |
+
|
126 |
+
Rprop.__doc__ = r"""Implements the resilient backpropagation algorithm.
|
127 |
+
|
128 |
+
.. math::
|
129 |
+
\begin{aligned}
|
130 |
+
&\rule{110mm}{0.4pt} \\
|
131 |
+
&\textbf{input} : \theta_0 \in \mathbf{R}^d \text{ (params)},f(\theta)
|
132 |
+
\text{ (objective)}, \\
|
133 |
+
&\hspace{13mm} \eta_{+/-} \text{ (etaplus, etaminus)}, \Gamma_{max/min}
|
134 |
+
\text{ (step sizes)} \\
|
135 |
+
&\textbf{initialize} : g^0_{prev} \leftarrow 0,
|
136 |
+
\: \eta_0 \leftarrow \text{lr (learning rate)} \\
|
137 |
+
&\rule{110mm}{0.4pt} \\
|
138 |
+
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
|
139 |
+
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
|
140 |
+
&\hspace{5mm} \textbf{for} \text{ } i = 0, 1, \ldots, d-1 \: \mathbf{do} \\
|
141 |
+
&\hspace{10mm} \textbf{if} \: g^i_{prev} g^i_t > 0 \\
|
142 |
+
&\hspace{15mm} \eta^i_t \leftarrow \mathrm{min}(\eta^i_{t-1} \eta_{+},
|
143 |
+
\Gamma_{max}) \\
|
144 |
+
&\hspace{10mm} \textbf{else if} \: g^i_{prev} g^i_t < 0 \\
|
145 |
+
&\hspace{15mm} \eta^i_t \leftarrow \mathrm{max}(\eta^i_{t-1} \eta_{-},
|
146 |
+
\Gamma_{min}) \\
|
147 |
+
&\hspace{15mm} g^i_t \leftarrow 0 \\
|
148 |
+
&\hspace{10mm} \textbf{else} \: \\
|
149 |
+
&\hspace{15mm} \eta^i_t \leftarrow \eta^i_{t-1} \\
|
150 |
+
&\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \eta_t \mathrm{sign}(g_t) \\
|
151 |
+
&\hspace{5mm}g_{prev} \leftarrow g_t \\
|
152 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
153 |
+
&\bf{return} \: \theta_t \\[-1.ex]
|
154 |
+
&\rule{110mm}{0.4pt} \\[-1.ex]
|
155 |
+
\end{aligned}
|
156 |
+
|
157 |
+
For further details regarding the algorithm we refer to the paper
|
158 |
+
`A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm
|
159 |
+
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.21.1417>`_.
|
160 |
+
""" + fr"""
|
161 |
+
Args:
|
162 |
+
params (iterable): iterable of parameters to optimize or dicts defining
|
163 |
+
parameter groups
|
164 |
+
lr (float, optional): learning rate (default: 1e-2)
|
165 |
+
etas (Tuple[float, float], optional): pair of (etaminus, etaplus), that
|
166 |
+
are multiplicative increase and decrease factors
|
167 |
+
(default: (0.5, 1.2))
|
168 |
+
step_sizes (Tuple[float, float], optional): a pair of minimal and
|
169 |
+
maximal allowed step sizes (default: (1e-6, 50))
|
170 |
+
{_foreach_doc}
|
171 |
+
{_maximize_doc}
|
172 |
+
{_differentiable_doc}
|
173 |
+
|
174 |
+
"""
|
175 |
+
|
176 |
+
def rprop(
|
177 |
+
params: List[Tensor],
|
178 |
+
grads: List[Tensor],
|
179 |
+
prevs: List[Tensor],
|
180 |
+
step_sizes: List[Tensor],
|
181 |
+
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
|
182 |
+
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
|
183 |
+
foreach: Optional[bool] = None,
|
184 |
+
maximize: bool = False,
|
185 |
+
differentiable: bool = False,
|
186 |
+
has_complex: bool = False,
|
187 |
+
*,
|
188 |
+
step_size_min: float,
|
189 |
+
step_size_max: float,
|
190 |
+
etaminus: float,
|
191 |
+
etaplus: float,
|
192 |
+
):
|
193 |
+
r"""Functional API that performs rprop algorithm computation.
|
194 |
+
|
195 |
+
See :class:`~torch.optim.Rprop` for details.
|
196 |
+
"""
|
197 |
+
|
198 |
+
if foreach is None:
|
199 |
+
_, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
|
200 |
+
|
201 |
+
if foreach and torch.jit.is_scripting():
|
202 |
+
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
|
203 |
+
|
204 |
+
if foreach and not torch.jit.is_scripting():
|
205 |
+
func = _multi_tensor_rprop
|
206 |
+
else:
|
207 |
+
func = _single_tensor_rprop
|
208 |
+
|
209 |
+
func(
|
210 |
+
params,
|
211 |
+
grads,
|
212 |
+
prevs,
|
213 |
+
step_sizes,
|
214 |
+
step_size_min=step_size_min,
|
215 |
+
step_size_max=step_size_max,
|
216 |
+
etaminus=etaminus,
|
217 |
+
etaplus=etaplus,
|
218 |
+
maximize=maximize,
|
219 |
+
differentiable=differentiable,
|
220 |
+
has_complex=has_complex,
|
221 |
+
)
|
222 |
+
|
223 |
+
|
224 |
+
def _single_tensor_rprop(
|
225 |
+
params: List[Tensor],
|
226 |
+
grads: List[Tensor],
|
227 |
+
prevs: List[Tensor],
|
228 |
+
step_sizes: List[Tensor],
|
229 |
+
*,
|
230 |
+
step_size_min: float,
|
231 |
+
step_size_max: float,
|
232 |
+
etaminus: float,
|
233 |
+
etaplus: float,
|
234 |
+
maximize: bool,
|
235 |
+
differentiable: bool,
|
236 |
+
has_complex: bool,
|
237 |
+
):
|
238 |
+
|
239 |
+
for i, param in enumerate(params):
|
240 |
+
grad = grads[i]
|
241 |
+
grad = grad if not maximize else -grad
|
242 |
+
prev = prevs[i]
|
243 |
+
step_size = step_sizes[i]
|
244 |
+
|
245 |
+
if torch.is_complex(param):
|
246 |
+
grad = torch.view_as_real(grad)
|
247 |
+
prev = torch.view_as_real(prev)
|
248 |
+
param = torch.view_as_real(param)
|
249 |
+
step_size = torch.view_as_real(step_size)
|
250 |
+
if differentiable:
|
251 |
+
sign = grad.mul(prev.clone()).sign()
|
252 |
+
else:
|
253 |
+
sign = grad.mul(prev).sign()
|
254 |
+
sign[sign.gt(0)] = etaplus
|
255 |
+
sign[sign.lt(0)] = etaminus
|
256 |
+
sign[sign.eq(0)] = 1
|
257 |
+
|
258 |
+
# update stepsizes with step size updates
|
259 |
+
step_size.mul_(sign).clamp_(step_size_min, step_size_max)
|
260 |
+
|
261 |
+
# for dir<0, dfdx=0
|
262 |
+
# for dir>=0 dfdx=dfdx
|
263 |
+
grad = grad.clone(memory_format=torch.preserve_format)
|
264 |
+
grad[sign.eq(etaminus)] = 0
|
265 |
+
|
266 |
+
# update parameters
|
267 |
+
param.addcmul_(grad.sign(), step_size, value=-1)
|
268 |
+
prev.copy_(grad)
|
269 |
+
|
270 |
+
|
271 |
+
def _multi_tensor_rprop(
|
272 |
+
params: List[Tensor],
|
273 |
+
grads: List[Tensor],
|
274 |
+
prevs: List[Tensor],
|
275 |
+
step_sizes: List[Tensor],
|
276 |
+
*,
|
277 |
+
step_size_min: float,
|
278 |
+
step_size_max: float,
|
279 |
+
etaminus: float,
|
280 |
+
etaplus: float,
|
281 |
+
maximize: bool,
|
282 |
+
differentiable: bool,
|
283 |
+
has_complex: bool,
|
284 |
+
):
|
285 |
+
|
286 |
+
if len(params) == 0:
|
287 |
+
return
|
288 |
+
|
289 |
+
assert not differentiable, "_foreach ops don't support autograd"
|
290 |
+
|
291 |
+
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, prevs, step_sizes])
|
292 |
+
for ((grouped_params, grouped_grads, grouped_prevs, grouped_step_sizes), _) in grouped_tensors.values():
|
293 |
+
# Handle complex params
|
294 |
+
if has_complex:
|
295 |
+
_view_as_real(grouped_params, grouped_grads, grouped_prevs, grouped_step_sizes)
|
296 |
+
|
297 |
+
signs = torch._foreach_mul(grouped_grads, grouped_prevs)
|
298 |
+
if maximize:
|
299 |
+
torch._foreach_neg_(signs)
|
300 |
+
|
301 |
+
# At the end of the step, grouped_prevs will contain the current grads, so we reuse
|
302 |
+
# grouped_prevs memory instead of creating a new buffer, but, for clarity, we reassign
|
303 |
+
# to keep referring to the buffer as grouped_grads.
|
304 |
+
torch._foreach_copy_(grouped_prevs, grouped_grads)
|
305 |
+
if maximize:
|
306 |
+
torch._foreach_neg_(grouped_prevs)
|
307 |
+
grouped_grads = grouped_prevs
|
308 |
+
|
309 |
+
torch._foreach_sign_(signs)
|
310 |
+
for sign in signs:
|
311 |
+
sign[sign.gt(0)] = etaplus
|
312 |
+
sign[sign.lt(0)] = etaminus
|
313 |
+
sign[sign.eq(0)] = 1
|
314 |
+
|
315 |
+
# update stepsizes with step size updates
|
316 |
+
torch._foreach_mul_(grouped_step_sizes, signs)
|
317 |
+
for step_size in grouped_step_sizes:
|
318 |
+
step_size.clamp_(step_size_min, step_size_max)
|
319 |
+
|
320 |
+
# for dir<0, dfdx=0
|
321 |
+
# for dir>=0 dfdx=dfdx
|
322 |
+
grouped_grads = list(grouped_grads)
|
323 |
+
for i in range(len(grouped_grads)):
|
324 |
+
grouped_grads[i][signs[i].eq(etaminus)] = 0
|
325 |
+
|
326 |
+
# explicitly del signs as it's not used after here to save memory
|
327 |
+
del signs
|
328 |
+
|
329 |
+
# update parameters
|
330 |
+
grad_signs = [grad.sign() for grad in grouped_grads]
|
331 |
+
torch._foreach_addcmul_(grouped_params, grad_signs, grouped_step_sizes, value=-1)
|
332 |
+
|
333 |
+
# Logically, you may expect grouped_prevs to get updated to grouped_grads, but that's
|
334 |
+
# basically already happened since we've been using grouped_prevs' memory to store
|
335 |
+
# updated grouped_grads!
|
env-llmeval/lib/python3.10/site-packages/torch/optim/rprop.pyi
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Tuple
|
2 |
+
|
3 |
+
from .optimizer import Optimizer, ParamsT
|
4 |
+
|
5 |
+
class Rprop(Optimizer):
|
6 |
+
def __init__(
|
7 |
+
self,
|
8 |
+
params: ParamsT,
|
9 |
+
lr: float = ...,
|
10 |
+
etas: Tuple[float, float] = ...,
|
11 |
+
step_sizes: Tuple[float, float] = ...,
|
12 |
+
) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/optim/sparse_adam.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from . import _functional as F
|
3 |
+
from .optimizer import Optimizer, _maximize_doc
|
4 |
+
|
5 |
+
__all__ = ['SparseAdam']
|
6 |
+
|
7 |
+
class SparseAdam(Optimizer):
|
8 |
+
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, maximize: bool = False):
|
9 |
+
if not 0.0 < lr:
|
10 |
+
raise ValueError(f"Invalid learning rate: {lr}")
|
11 |
+
if not 0.0 < eps:
|
12 |
+
raise ValueError(f"Invalid epsilon value: {eps}")
|
13 |
+
if not 0.0 <= betas[0] < 1.0:
|
14 |
+
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
|
15 |
+
if not 0.0 <= betas[1] < 1.0:
|
16 |
+
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
|
17 |
+
|
18 |
+
defaults = dict(lr=lr, betas=betas, eps=eps, maximize=maximize)
|
19 |
+
super().__init__(params, defaults)
|
20 |
+
|
21 |
+
sparse_params = []
|
22 |
+
for index, param_group in enumerate(self.param_groups):
|
23 |
+
assert isinstance(param_group, dict), f"param_groups must be a list of dicts, but got {type(param_group)}"
|
24 |
+
# given param group, convert given params to a list first before iterating
|
25 |
+
for d_index, d_param in enumerate(param_group['params']):
|
26 |
+
if d_param.is_sparse:
|
27 |
+
sparse_params.append([index, d_index])
|
28 |
+
if sparse_params:
|
29 |
+
raise ValueError(
|
30 |
+
f"Sparse params at indices {sparse_params}: SparseAdam requires dense parameter tensors"
|
31 |
+
)
|
32 |
+
|
33 |
+
|
34 |
+
@torch.no_grad()
|
35 |
+
def step(self, closure=None):
|
36 |
+
"""Perform a single optimization step.
|
37 |
+
|
38 |
+
Args:
|
39 |
+
closure (Callable, optional): A closure that reevaluates the model
|
40 |
+
and returns the loss.
|
41 |
+
"""
|
42 |
+
loss = None
|
43 |
+
if closure is not None:
|
44 |
+
with torch.enable_grad():
|
45 |
+
loss = closure()
|
46 |
+
|
47 |
+
for group in self.param_groups:
|
48 |
+
params_with_grad = []
|
49 |
+
grads = []
|
50 |
+
exp_avgs = []
|
51 |
+
exp_avg_sqs = []
|
52 |
+
state_steps = []
|
53 |
+
eps = group['eps']
|
54 |
+
lr = group['lr']
|
55 |
+
beta1, beta2 = group['betas']
|
56 |
+
maximize = group.get('maximize', False)
|
57 |
+
|
58 |
+
for p in group['params']:
|
59 |
+
if p.grad is not None:
|
60 |
+
params_with_grad.append(p)
|
61 |
+
if not p.grad.is_sparse:
|
62 |
+
raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead')
|
63 |
+
grads.append(p.grad)
|
64 |
+
|
65 |
+
state = self.state[p]
|
66 |
+
|
67 |
+
# State initialization
|
68 |
+
if len(state) == 0:
|
69 |
+
state['step'] = 0
|
70 |
+
# Exponential moving average of gradient values
|
71 |
+
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
72 |
+
# Exponential moving average of squared gradient values
|
73 |
+
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
74 |
+
|
75 |
+
exp_avgs.append(state['exp_avg'])
|
76 |
+
exp_avg_sqs.append(state['exp_avg_sq'])
|
77 |
+
|
78 |
+
# update the steps for each param group update
|
79 |
+
state['step'] += 1
|
80 |
+
# record the step after step update
|
81 |
+
state_steps.append(state['step'])
|
82 |
+
|
83 |
+
F.sparse_adam(params_with_grad,
|
84 |
+
grads,
|
85 |
+
exp_avgs,
|
86 |
+
exp_avg_sqs,
|
87 |
+
state_steps,
|
88 |
+
beta1=beta1,
|
89 |
+
beta2=beta2,
|
90 |
+
lr=group['lr'],
|
91 |
+
eps=group['eps'],
|
92 |
+
maximize=maximize)
|
93 |
+
|
94 |
+
return loss
|
95 |
+
|
96 |
+
SparseAdam.__doc__ = fr"""SparseAdam implements a masked version of the Adam algorithm
|
97 |
+
suitable for sparse gradients. Currently, due to implementation constraints (explained
|
98 |
+
below), SparseAdam is only intended for a narrow subset of use cases, specifically
|
99 |
+
parameters of a dense layout with gradients of a sparse layout. This occurs in a
|
100 |
+
special case where the module backwards produces grads already in a sparse layout.
|
101 |
+
One example NN module that behaves as such is ``nn.Embedding(sparse=True)``.
|
102 |
+
|
103 |
+
SparseAdam approximates the Adam algorithm by masking out the parameter and moment
|
104 |
+
updates corresponding to the zero values in the gradients. Whereas the Adam algorithm
|
105 |
+
will update the first moment, the second moment, and the parameters based on all values
|
106 |
+
of the gradients, SparseAdam only updates the moments and parameters corresponding
|
107 |
+
to the non-zero values of the gradients.
|
108 |
+
|
109 |
+
A simplified way of thinking about the `intended` implementation is as such:
|
110 |
+
|
111 |
+
1. Create a mask of the non-zero values in the sparse gradients. For example,
|
112 |
+
if your gradient looks like [0, 5, 0, 0, 9], the mask would be [0, 1, 0, 0, 1].
|
113 |
+
2. Apply this mask over the running moments and do computation on only the
|
114 |
+
non-zero values.
|
115 |
+
3. Apply this mask over the parameters and only apply an update on non-zero values.
|
116 |
+
|
117 |
+
In actuality, we use sparse layout Tensors to optimize this approximation, which means the
|
118 |
+
more gradients that are masked by not being materialized, the more performant the optimization.
|
119 |
+
Since we rely on using sparse layout tensors, we infer that any materialized value in the
|
120 |
+
sparse layout is non-zero and we do NOT actually verify that all values are not zero!
|
121 |
+
It is important to not conflate a semantically sparse tensor (a tensor where many
|
122 |
+
of its values are zeros) with a sparse layout tensor (a tensor where ``.is_sparse``
|
123 |
+
returns ``True``). The SparseAdam approximation is intended for `semantically` sparse
|
124 |
+
tensors and the sparse layout is only a implementation detail. A clearer implementation
|
125 |
+
would be to use MaskedTensors, but those are experimental.
|
126 |
+
|
127 |
+
|
128 |
+
.. note::
|
129 |
+
|
130 |
+
If you suspect your gradients are semantically sparse (but do not have sparse
|
131 |
+
layout), this variant may not be the best for you. Ideally, you want to avoid
|
132 |
+
materializing anything that is suspected to be sparse in the first place, since
|
133 |
+
needing to convert all your grads from dense layout to sparse layout may outweigh
|
134 |
+
the performance gain. Here, using Adam may be the best alternative, unless you
|
135 |
+
can easily rig up your module to output sparse grads similar to
|
136 |
+
``nn.Embedding(sparse=True)``. If you insist on converting your grads, you can do
|
137 |
+
so by manually overriding your parameters' ``.grad`` fields with their sparse
|
138 |
+
equivalents before calling ``.step()``.
|
139 |
+
|
140 |
+
|
141 |
+
Args:
|
142 |
+
params (iterable): iterable of parameters to optimize or dicts defining
|
143 |
+
parameter groups
|
144 |
+
lr (float, optional): learning rate (default: 1e-3)
|
145 |
+
betas (Tuple[float, float], optional): coefficients used for computing
|
146 |
+
running averages of gradient and its square (default: (0.9, 0.999))
|
147 |
+
eps (float, optional): term added to the denominator to improve
|
148 |
+
numerical stability (default: 1e-8)
|
149 |
+
{_maximize_doc}
|
150 |
+
|
151 |
+
.. _Adam\: A Method for Stochastic Optimization:
|
152 |
+
https://arxiv.org/abs/1412.6980
|
153 |
+
|
154 |
+
"""
|
env-llmeval/lib/python3.10/site-packages/torch/optim/swa_utils.pyi
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Callable, Iterable, Union
|
2 |
+
|
3 |
+
from torch import device, Tensor
|
4 |
+
from torch.nn.modules import Module
|
5 |
+
from .lr_scheduler import _LRScheduler
|
6 |
+
from .optimizer import Optimizer
|
7 |
+
|
8 |
+
class AveragedModel(Module):
|
9 |
+
def __init__(
|
10 |
+
self,
|
11 |
+
model: Module,
|
12 |
+
device: Union[int, device] = ...,
|
13 |
+
avg_fn: Callable[[Tensor, Tensor, int], Tensor] = ...,
|
14 |
+
use_buffers: bool = ...,
|
15 |
+
) -> None: ...
|
16 |
+
def update_parameters(self, model: Module) -> None: ...
|
17 |
+
|
18 |
+
def update_bn(
|
19 |
+
loader: Iterable[Any],
|
20 |
+
model: Module,
|
21 |
+
device: Union[int, device] = ...,
|
22 |
+
) -> None: ...
|
23 |
+
|
24 |
+
class SWALR(_LRScheduler):
|
25 |
+
def __init__(
|
26 |
+
self,
|
27 |
+
optimizer: Optimizer,
|
28 |
+
swa_lr: float,
|
29 |
+
anneal_epochs: int,
|
30 |
+
anneal_strategy: str,
|
31 |
+
last_epoch: int = ...,
|
32 |
+
) -> None: ...
|
env-llmeval/lib/python3.10/site-packages/torch/package/__init__.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .analyze.is_from_package import is_from_package
|
2 |
+
from .file_structure_representation import Directory
|
3 |
+
from .glob_group import GlobGroup
|
4 |
+
from .importer import (
|
5 |
+
Importer,
|
6 |
+
ObjMismatchError,
|
7 |
+
ObjNotFoundError,
|
8 |
+
OrderedImporter,
|
9 |
+
sys_importer,
|
10 |
+
)
|
11 |
+
from .package_exporter import EmptyMatchError, PackageExporter, PackagingError
|
12 |
+
from .package_importer import PackageImporter
|
env-llmeval/lib/python3.10/site-packages/torch/package/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (655 Bytes). View file
|
|